From 5d5a595ae4db6c066dcf24a442bf6a66f1ee9ce1 Mon Sep 17 00:00:00 2001 From: Markus Gierlinger <2192234+Magier@users.noreply.github.com> Date: Wed, 18 Sep 2024 15:16:51 +0200 Subject: [PATCH] update version of used cis rulesets (#345) * update version of used cis rulesets * fix accidental indentation in ruleset yaml --- .gitignore | 1 + cmd/controller/state/kubebench/spec/aks.go | 2 +- cmd/controller/state/kubebench/spec/eks.go | 2 +- cmd/controller/state/kubebench/spec/gke.go | 2 +- cmd/linter/kubebench/common.go | 3 +- .../kubebench-rules/aks-1.4.0/config.yaml | 2 + .../aks-1.4.0/controlplane.yaml | 31 + .../aks-1.4.0/managedservices.yaml | 144 ++++ .../kubebench-rules/aks-1.4.0/master.yaml | 6 + .../kubebench-rules/aks-1.4.0/node.yaml | 298 ++++++++ .../kubebench-rules/aks-1.4.0/policies.yaml | 206 ++++++ .../kubebench-rules/aks-1.5.0/config.yaml | 2 + .../aks-1.5.0/controlplane.yaml | 23 + .../aks-1.5.0/managedservices.yaml | 112 +++ .../kubebench-rules/aks-1.5.0/master.yaml | 5 + .../kubebench-rules/aks-1.5.0/node.yaml | 403 ++++++++++ .../kubebench-rules/aks-1.5.0/policies.yaml | 153 ++++ .../kubebench/kubebench-rules/config.yaml | 37 + .../kubebench-rules/eks-1.4.0/config.yaml | 2 + .../eks-1.4.0/controlplane.yaml | 14 + .../eks-1.4.0/managedservices.yaml | 294 ++++++++ .../kubebench-rules/eks-1.4.0/master.yaml | 5 + .../kubebench-rules/eks-1.4.0/node.yaml | 384 ++++++++++ .../kubebench-rules/eks-1.4.0/policies.yaml | 203 ++++++ .../kubebench-rules/eks-1.5.0/config.yaml | 2 + .../eks-1.5.0/controlplane.yaml | 14 + .../eks-1.5.0/managedservices.yaml | 255 +++++++ .../kubebench-rules/eks-1.5.0/master.yaml | 5 + .../kubebench-rules/eks-1.5.0/node.yaml | 369 ++++++++++ .../kubebench-rules/eks-1.5.0/policies.yaml | 188 +++++ .../kubebench-rules/gke-1.5.0/config.yaml | 2 + .../gke-1.5.0/controlplane.yaml | 17 + .../gke-1.5.0/managedservices.yaml | 687 ++++++++++++++++++ .../kubebench-rules/gke-1.5.0/master.yaml | 5 + .../kubebench-rules/gke-1.5.0/node.yaml | 71 ++ .../kubebench-rules/gke-1.5.0/policies.yaml | 158 ++++ .../kubebench-rules/gke-1.6.0/config.yaml | 2 + .../gke-1.6.0/controlplane.yaml | 17 + .../gke-1.6.0/managedservices.yaml | 653 +++++++++++++++++ .../kubebench-rules/gke-1.6.0/master.yaml | 5 + .../kubebench-rules/gke-1.6.0/node.yaml | 380 ++++++++++ .../kubebench-rules/gke-1.6.0/policies.yaml | 143 ++++ 42 files changed, 5302 insertions(+), 5 deletions(-) create mode 100644 cmd/linter/kubebench/kubebench-rules/aks-1.4.0/config.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/aks-1.4.0/controlplane.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/aks-1.4.0/managedservices.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/aks-1.4.0/master.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/aks-1.4.0/node.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/aks-1.4.0/policies.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/aks-1.5.0/config.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/aks-1.5.0/controlplane.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/aks-1.5.0/managedservices.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/aks-1.5.0/master.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/aks-1.5.0/node.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/aks-1.5.0/policies.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.4.0/config.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.4.0/controlplane.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.4.0/managedservices.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.4.0/master.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.4.0/node.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.4.0/policies.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.5.0/config.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.5.0/controlplane.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.5.0/managedservices.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.5.0/master.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.5.0/node.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/eks-1.5.0/policies.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/gke-1.5.0/config.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/gke-1.5.0/controlplane.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/gke-1.5.0/managedservices.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/gke-1.5.0/master.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/gke-1.5.0/node.yaml create mode 100644 cmd/linter/kubebench/kubebench-rules/gke-1.5.0/policies.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/gke-1.6.0/config.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/gke-1.6.0/controlplane.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/gke-1.6.0/managedservices.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/gke-1.6.0/master.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/gke-1.6.0/node.yaml create mode 100755 cmd/linter/kubebench/kubebench-rules/gke-1.6.0/policies.yaml diff --git a/.gitignore b/.gitignore index 36f318a9..f1244bfa 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ node_modules .cache .venv .devbox +.DS_Store \ No newline at end of file diff --git a/cmd/controller/state/kubebench/spec/aks.go b/cmd/controller/state/kubebench/spec/aks.go index 3fdfe2d5..05e0a434 100644 --- a/cmd/controller/state/kubebench/spec/aks.go +++ b/cmd/controller/state/kubebench/spec/aks.go @@ -56,7 +56,7 @@ func AKS(nodeName, jobName string) *batchv1.Job { "--config-dir", "/etc/kubebench-rules/", "run", "--targets", "node", - "--benchmark", "aks-1.3", + "--benchmark", "aks-1.4", "--json", }, VolumeMounts: []corev1.VolumeMount{ diff --git a/cmd/controller/state/kubebench/spec/eks.go b/cmd/controller/state/kubebench/spec/eks.go index 86f41892..54073666 100644 --- a/cmd/controller/state/kubebench/spec/eks.go +++ b/cmd/controller/state/kubebench/spec/eks.go @@ -56,7 +56,7 @@ func EKS(nodeName, jobName string) *batchv1.Job { "--config-dir", "/etc/kubebench-rules/", "run", "--targets", "node", - "--benchmark", "eks-1.3.0", + "--benchmark", "eks-1.4.0", "--json", }, VolumeMounts: []corev1.VolumeMount{ diff --git a/cmd/controller/state/kubebench/spec/gke.go b/cmd/controller/state/kubebench/spec/gke.go index 6c4a0ca0..1b751704 100644 --- a/cmd/controller/state/kubebench/spec/gke.go +++ b/cmd/controller/state/kubebench/spec/gke.go @@ -57,7 +57,7 @@ func GKE(nodeName, jobName string) *batchv1.Job { "run", "--targets", "node,policies,managedservices", - "--benchmark", "gke-1.4.0", + "--benchmark", "gke-1.5.0", "--json", }, VolumeMounts: []corev1.VolumeMount{ diff --git a/cmd/linter/kubebench/common.go b/cmd/linter/kubebench/common.go index dd45c26e..f0882b3b 100644 --- a/cmd/linter/kubebench/common.go +++ b/cmd/linter/kubebench/common.go @@ -18,7 +18,6 @@ import ( "bufio" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -71,7 +70,7 @@ func runChecks(nodetype check2.NodeType, testYamlFile, detectedVersion string) { os.Exit(1) } - in, err := ioutil.ReadFile(testYamlFile) + in, err := os.ReadFile(testYamlFile) if err != nil { exitWithError(fmt.Errorf("error opening %s test file: %v", testYamlFile, err)) } diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/config.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/controlplane.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/controlplane.yaml new file mode 100644 index 00000000..02a2bce8 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/controlplane.yaml @@ -0,0 +1,31 @@ +--- +controls: +version: "aks-1.4" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit Logs" + type: "manual" + remediation: | + Azure audit logs are enabled and managed in the Azure portal. To enable log collection for + the Kubernetes master components in your AKS cluster, open the Azure portal in a web + browser and complete the following steps: + 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't + select the resource group that contains your individual AKS cluster resources, such + as MC_myResourceGroup_myAKSCluster_eastus. + 2. On the left-hand side, choose Diagnostic settings. + 3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting. + 4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics. + 5. Select an existing workspace or create a new one. If you create a workspace, provide + a workspace name, a resource group, and a location. + 6. In the list of available logs, select the logs you wish to enable. For this example, + enable the kube-audit and kube-audit-admin logs. Common logs include the kube- + apiserver, kube-controller-manager, and kube-scheduler. You can return and change + the collected logs once Log Analytics workspaces are enabled. + 7. When ready, select Save to enable collection of the selected logs. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/managedservices.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/managedservices.yaml new file mode 100644 index 00000000..207fe3ae --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/managedservices.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "aks-1.4" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.1.2 + text: "Minimize user access to Azure Container Registry (ACR) (Manual)" + type: "manual" + remediation: | + Azure Container Registry + If you use Azure Container Registry (ACR) as your container image store, you need to grant + permissions to the service principal for your AKS cluster to read and pull images. Currently, + the recommended configuration is to use the az aks create or az aks update command to + integrate with a registry and assign the appropriate role for the service principal. For + detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes + Service. + To avoid needing an Owner or Azure account administrator role, you can configure a + service principal manually or use an existing service principal to authenticate ACR from + AKS. For more information, see ACR authentication with service principals or Authenticate + from Kubernetes with a pull secret. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Azure Container Registry (ACR) (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Access and identity options for Azure Kubernetes Service (AKS)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated AKS Service Accounts (Manual)" + type: "manual" + remediation: | + Azure Active Directory integration + The security of AKS clusters can be enhanced with the integration of Azure Active Directory + (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, + cloud-based directory, and identity management service that combines core directory + services, application access management, and identity protection. With Azure AD, you can + integrate on-premises identities into AKS clusters to provide a single source for account + management and security. + Azure Active Directory integration with AKS clusters + With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes + resources within a namespace or across the cluster. To obtain a kubectl configuration + context, a user can run the az aks get-credentials command. When a user then interacts + with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD + credentials. This approach provides a single source for user account management and + password credentials. The user can only access the resources as defined by the cluster + administrator. + Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect + is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID + Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, + Webhook Token Authentication is used to verify authentication tokens. Webhook token + authentication is configured and managed as part of the AKS cluster. + scored: false + + - id: 5.3 + text: "Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with Azure AD (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + - id: 5.5.2 + text: "Use Azure RBAC for Kubernetes Authorization (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Restrict untrusted workloads (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + - id: 5.6.2 + text: "Hostile multi-tenant workloads (Manual)" + type: "manual" + remediation: "No remediation" + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/master.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/master.yaml new file mode 100644 index 00000000..d496d05d --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "aks-1.4" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/node.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/node.yaml new file mode 100644 index 00000000..7234d197 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/node.yaml @@ -0,0 +1,298 @@ +--- +controls: +version: "aks-1.4" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.8 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --eventRecordQPS + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.9 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.10 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/policies.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/policies.yaml new file mode 100644 index 00000000..7b5b77d2 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.4.0/policies.yaml @@ -0,0 +1,206 @@ +--- +controls: +version: "aks-1.4" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of privileged containers. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostPID containers. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostIPC containers. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostNetwork containers. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of containers with .spec.allowPrivilegeEscalation set to true. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, + ensuring that either MustRunAsNonRoot or MustRunAs + with the range of UIDs not including 0, is set. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "Azure Policy / OPA" + checks: [] + + - id: 4.4 + text: "CNI Plugin" + checks: + - id: 4.4.1 + text: "Ensure that the latest CNI version is used (Manual)" + type: "manual" + remediation: | + Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. + scored: false + + - id: 4.4.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.5 + text: "Secrets Management" + checks: + - id: 4.5.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.5.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.6 + text: "Extensible Admission Control" + checks: + - id: 4.6.1 + text: "Verify that admission controllers are working as expected (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 4.7 + text: "General Policies" + checks: + - id: 4.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.7.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.7.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/config.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/config.yaml new file mode 100755 index 00000000..b7839455 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/controlplane.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/controlplane.yaml new file mode 100755 index 00000000..c3a05dc9 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/controlplane.yaml @@ -0,0 +1,23 @@ +controls: "" +version: aks-1.5.0 +id: 2 +text: Master (Control Plane) Configuration +type: controlplane +groups: + - id: "2.1" + text: Logging + checks: + - id: 2.1.1 + text: Enable audit Logs + type: manual + remediation: |- + Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps: + + 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC_myResourceGroup_myAKSCluster_eastus. + 2. On the left-hand side, choose Diagnostic settings. + 3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting. + 4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics. + 5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location. + 6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled. + 7. When ready, select Save to enable collection of the selected logs. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/managedservices.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/managedservices.yaml new file mode 100755 index 00000000..a48d9a14 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/managedservices.yaml @@ -0,0 +1,112 @@ +controls: "" +version: aks-1.5.0 +id: 5 +text: Managed services +type: managedservices +groups: + - id: "5.1" + text: Image Registry and Image Scanning + checks: + - id: 5.1.1 + text: Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider + type: automated + remediation: |- + Enable Azure Defender for Container Registries: If you find that Azure Defender for container registries is not enabled and you wish to enable it, you can do so using the following command: + + `az security pricing create --name ContainerRegistry --tier Standard` + + Please note, enabling Azure Defender for container registries incurs additional costs, so be sure to review the pricing details on the official Azure documentation before enabling it. + scored: false + - id: 5.1.2 + text: Minimize user access to Azure Container Registry (ACR) + type: manual + remediation: |- + Azure Container Registry + If you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service. + + To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret. + scored: false + - id: 5.1.3 + text: Minimize cluster access to read-only for Azure Container Registry (ACR) + type: manual + scored: false + - id: 5.1.4 + text: Minimize Container Registries to only those approved + type: manual + remediation: |- + If you are using Azure Container Registry you have this option: + https://docs.microsoft.com/en-us/azure/container-registry/container-registry-firewall-access-rules + + For other non-AKS repos using admission controllers or Azure Policy will also work. + + Limiting or locking down egress traffic is also recommended: + https://docs.microsoft.com/en-us/azure/aks/limit-egress-traffic + scored: false + - id: "5.2" + text: Access and identity options for Azure Kubernetes Service (AKS) + checks: + - id: 5.2.1 + text: Prefer using dedicated AKS Service Accounts + type: manual + remediation: |- + Azure Active Directory integration + The security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security. + + Azure Active Directory integration with AKS clusters + + With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator. + + Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster. + scored: false + - id: "5.3" + text: Key Management Service (KMS) + checks: + - id: 5.3.1 + text: Ensure Kubernetes Secrets are encrypted + type: manual + scored: false + - id: "5.4" + text: Cluster Networking + checks: + - id: 5.4.1 + text: Restrict Access to the Control Plane Endpoint + type: automated + remediation: |- + By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server. + + With this in mind, you can update your cluster accordingly using the AKS CLI to ensure that Private Endpoint Access is enabled. + + If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0']. + + For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix): + scored: false + - id: 5.4.2 + text: Ensure clusters are Private Cluster enabled and Public Access Disabled + type: automated + remediation: To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone + scored: false + - id: 5.4.3 + text: Ensure clusters are created with Private Nodes + type: automated + remediation: "```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n```\n\nWhere `--enable-private-cluster` is a mandatory flag for a private cluster." + scored: false + - id: 5.4.4 + text: Ensure Network Policy is Enabled and set as appropriate + type: automated + remediation: Utilize Calico or other network policy engine to segment and isolate your traffic. + scored: false + - id: 5.4.5 + text: Encrypt traffic to HTTPS load balancers with TLS certificates + type: manual + scored: false + - id: "5.5" + text: Authentication and Authorization + checks: + - id: 5.5.1 + text: Manage Kubernetes RBAC users with Azure AD + type: manual + scored: false + - id: 5.5.2 + text: Use Azure RBAC for Kubernetes Authorization + type: manual + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/master.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/master.yaml new file mode 100755 index 00000000..16b481d0 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/master.yaml @@ -0,0 +1,5 @@ +controls: "" +version: aks-1.5.0 +id: 1 +text: Master (Control Plane) Components +type: master diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/node.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/node.yaml new file mode 100755 index 00000000..595c93d3 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/node.yaml @@ -0,0 +1,403 @@ +controls: "" +version: aks-1.5.0 +id: 3 +text: Worker Nodes +type: node +groups: + - id: "3.1" + text: Worker Node Configuration Files + checks: + - id: 3.1.1 + text: Ensure that the kubeconfig file permissions are set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + type: automated + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the below command (based on the file location on your system) on the each worker + node. For example, + ``` + chmod 644 + ``` + scored: false + - id: 3.1.2 + text: Ensure that the kubelet kubeconfig file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + type: automated + tests: + test_items: + - flag: root:root + remediation: |- + Run the below command (based on the file location on your system) on each worker node. For example, + + ``` + chown root:root + ``` + scored: false + - id: 3.1.3 + text: Ensure that the azure.json file has permissions set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $kubernetesazurejson; then stat -c permissions=%a $kubernetesazurejson; fi'' ' + type: automated + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the following command (using the config file location identified in the Audit step) + + ``` + chmod 644 /etc/kubernetes/azure.json + ``` + scored: false + - id: 3.1.4 + text: Ensure that the azure.json file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubernetesazurejson; then stat -c %U:%G $kubernetesazurejson; fi'' ' + type: automated + tests: + test_items: + - flag: root:root + remediation: |- + Run the following command (using the config file location identified in the Audit step) + + ``` + chown root:root /etc/kubernetes/azure.json + ``` + scored: false + - id: "3.2" + text: Kubelet + checks: + - id: 3.2.1 + text: Ensure that the --anonymous-auth argument is set to false + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --anonymous-auth + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: "false" + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + + ``` + "anonymous": "enabled": false + ``` + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --anonymous-auth=false + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"authentication.*anonymous":{"enabled":false}"` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.aks.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: true + - id: 3.2.2 + text: Ensure that the --authorization-mode argument is not set to AlwaysAllow + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: "**Remediation Method 1:**\n\nIf modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n ```\n\"authentication\"... \"webhook\":{\"enabled\":true\n``` \n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n```\n--authorization-mode=Webhook\n```\n\n**Remediation Method 3:**\n\nIf using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n**See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n```\n\n**For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.3 + text: Ensure that the --client-ca-file argument is set as appropriate + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: "**Remediation Method 1:**\n\nIf modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n``` \n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n```\n--client-ca-file=\n```\n\n**Remediation Method 3:**\n\nIf using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n**See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n```\n\n**For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```\n```" + scored: true + - id: 3.2.4 + text: Ensure that the --read-only-port is secured + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --read-only-port + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: "0" + remediation: |- + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + + ``` + readOnlyPort to 0 + ``` + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --read-only-port=0 + ``` + + For all remediations: + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false + - id: 3.2.5 + text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: "0" + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s + + ``` + "streamingConnectionIdleTimeout": "4h0m0s" + ``` + + You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --streaming-connection-idle-timeout=4h0m0s + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"streamingConnectionIdleTimeout":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.aks.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: true + - id: 3.2.6 + text: Ensure that the --make-iptables-util-chains argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: "true" + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + + ``` + "makeIPTablesUtilChains": true + ``` + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --make-iptables-util-chains:true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"makeIPTablesUtilChains": true` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.aks.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.7 + text: Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0 + + ``` + "eventRecordQPS": 5 + ``` + + Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --eventRecordQPS=5 + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"eventRecordQPS"` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.aks.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false + - id: 3.2.8 + text: Ensure that the --rotate-certificates argument is not set to false + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "RotateCertificate":true + ``` + + Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --RotateCertificate=true + ``` + scored: true + - id: 3.2.9 + text: Ensure that the RotateKubeletServerCertificate argument is set to true + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: "**Remediation Method 1:**\n\nIf modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n```\n\"RotateKubeletServerCertificate\":true\n```\n\n**Remediation Method 2:**\n\nIf using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`. \n\nIf using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n```\n--rotate-kubelet-server-certificate=true\n```\n\n**Remediation Method 3:**\n\nIf using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n**See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n```\n\n**For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true diff --git a/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/policies.yaml b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/policies.yaml new file mode 100755 index 00000000..bcbdb8d4 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/aks-1.5.0/policies.yaml @@ -0,0 +1,153 @@ +controls: "" +version: aks-1.5.0 +id: 4 +text: Policies +type: policies +groups: + - id: "4.1" + text: RBAC and Service Accounts + checks: + - id: 4.1.1 + text: Ensure that the cluster-admin role is only used where required + type: automated + remediation: "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. \n\nWhere possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n```\nkubectl delete clusterrolebinding [name]\n```" + scored: false + - id: 4.1.2 + text: Minimize access to secrets + type: automated + remediation: Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster. + scored: false + - id: 4.1.3 + text: Minimize wildcard use in Roles and ClusterRoles + type: automated + remediation: Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + scored: false + - id: 4.1.4 + text: Minimize access to create pods + type: automated + remediation: Where possible, remove `create` access to `pod` objects in the cluster. + scored: false + - id: 4.1.5 + text: Ensure that default service accounts are not actively used + type: automated + remediation: "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\nModify the configuration of each default service account to include this value \n\n```\nautomountServiceAccountToken: false\n```\n\nAutomatic remediation for the default account:\n\n`kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`" + scored: false + - id: 4.1.6 + text: Ensure that Service Account Tokens are only mounted where necessary + type: automated + remediation: Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + scored: false + - id: "4.2" + text: Pod Security Standards + checks: + - id: 4.2.1 + text: Minimize the admission of privileged containers + type: automated + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers. + + To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce. + + `kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted` + + The above command enforces the restricted policy for the NAMESPACE namespace. + + You can also enable Pod Security Admission for all your namespaces. For example: + + `kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline` + + Pod Security Policies and Assignments can be found by searching for Policies in the Azure Portal. A detailed step-by-step guide can be found here: + + https://learn.microsoft.com/en-us/azure/governance/policy/concepts/policy-for-kubernetes + scored: false + - id: 4.2.2 + text: Minimize the admission of containers wishing to share the host process ID namespace + type: automated + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. + + Pod Security Policies and Assignments can be found by searching for Policies in the Azure Portal. A detailed step-by-step guide can be found here: + + https://learn.microsoft.com/en-us/azure/governance/policy/concepts/policy-for-kubernetes + scored: false + - id: 4.2.3 + text: Minimize the admission of containers wishing to share the host IPC namespace + type: automated + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. + + Pod Security Policies and Assignments can be found by searching for Policies in the Azure Portal. A detailed step-by-step guide can be found here: + + https://learn.microsoft.com/en-us/azure/governance/policy/concepts/policy-for-kubernetes + scored: false + - id: 4.2.4 + text: Minimize the admission of containers wishing to share the host network namespace + type: automated + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. + + Pod Security Policies and Assignments can be found by searching for Policies in the Azure Portal. A detailed step-by-step guide can be found here: + + https://learn.microsoft.com/en-us/azure/governance/policy/concepts/policy-for-kubernetes + scored: false + - id: 4.2.5 + text: Minimize the admission of containers with allowPrivilegeEscalation + type: automated + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + + Pod Security Policies and Assignments can be found by searching for Policies in the Azure Portal. A detailed step-by-step guide can be found here: + + https://learn.microsoft.com/en-us/azure/governance/policy/concepts/policy-for-kubernetes + scored: false + - id: "4.3" + text: Azure Policy / OPA + checks: [] + - id: "4.4" + text: CNI Plugin + checks: + - id: 4.4.1 + text: Ensure latest CNI version is used + type: automated + remediation: As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico. + scored: false + - id: 4.4.2 + text: Ensure that all Namespaces have Network Policies defined + type: automated + remediation: Follow the documentation and create `NetworkPolicy` objects as you need them. + scored: false + - id: "4.5" + text: Secrets Management + checks: + - id: 4.5.1 + text: Prefer using secrets as files over secrets as environment variables + type: automated + remediation: If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. + scored: false + - id: 4.5.2 + text: Consider external secret storage + type: manual + remediation: Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution. + scored: false + - id: "4.6" + text: General Policies + checks: + - id: 4.6.1 + text: Create administrative boundaries between resources using namespaces + type: manual + remediation: Follow the documentation and create namespaces for objects in your deployment as you need them. + scored: false + - id: 4.6.2 + text: Apply Security Context to Your Pods and Containers + type: manual + remediation: |- + As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy that prevents pods from running as privileged or escalating privileges. + It should also restrict the types of volumes that can be mounted and the root supplemental groups that can be added. + + Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path. + scored: false + - id: 4.6.3 + text: The default namespace should not be used + type: automated + remediation: Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/config.yaml b/cmd/linter/kubebench/kubebench-rules/config.yaml index f4eb3b4f..a5f87f3b 100644 --- a/cmd/linter/kubebench/kubebench-rules/config.yaml +++ b/cmd/linter/kubebench/kubebench-rules/config.yaml @@ -125,6 +125,7 @@ node: kubernetes: defaultconf: "/etc/kubernetes/config" + azurejson: "/etc/kubernetes/azure.json" kubelet: cafile: @@ -337,6 +338,18 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "gke-1.5.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "gke-1.6.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "eks-1.0.1": - "master" - "node" @@ -361,6 +374,18 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "eks-1.4.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "eks-1.5.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "rh-0.7": - "master" - "node" @@ -376,6 +401,18 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "aks-1.4.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "aks-1.5.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "ack-1.0": - "master" - "node" diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/config.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/controlplane.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/controlplane.yaml new file mode 100644 index 00000000..e737df40 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/controlplane.yaml @@ -0,0 +1,14 @@ +controls: "" +version: eks-1.4.0 +id: 2 +text: Control Plane Configuration +type: controlplane +groups: + - id: "2.1" + text: Logging + checks: + - id: 2.1.1 + text: Enable audit Logs + type: automated + remediation: "**From Console:**\n1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n```\nAPI server: Enabled\nAudit: Enabled \nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n```\n5. Click 'Save Changes'.\n\n**From CLI:**\n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n```" + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/managedservices.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/managedservices.yaml new file mode 100644 index 00000000..13c30e01 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/managedservices.yaml @@ -0,0 +1,294 @@ +controls: "" +version: eks-1.4.0 +id: 5 +text: Managed services +type: managedservices +groups: + - id: "5.1" + text: Image Registry and Image Scanning + checks: + - id: 5.1.1 + text: Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider + type: automated + remediation: |- + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI) + ``` + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + ``` + + To edit the settings of an existing repository (AWS CLI) + ``` + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + ``` + + Use the following steps to start a manual image scan using the AWS Management Console. + + 1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + 2. From the navigation bar, choose the Region to create your repository in. + 3. In the navigation pane, choose Repositories. + 4. On the Repositories page, choose the repository that contains the image to scan. + 5. On the Images page, select the image to scan and then choose Scan. + scored: false + - id: 5.1.2 + text: Minimize user access to Amazon ECR + type: manual + remediation: |- + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + + **Topics** + + - Amazon ECR Identity-Based Policies + - Amazon ECR Resource-Based Policies + - Authorization Based on Amazon ECR Tags + - Amazon ECR IAM Roles + + **Amazon ECR Identity-Based Policies** + + With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide. + + **Actions** + The Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation. + + Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service. + + To specify multiple actions in a single statement, separate them with commas as follows: + + `"Action": [ + "ecr:action1", + "ecr:action2"` + + You can specify multiple actions using wildcards (*). For example, to specify all actions that begin with the word Describe, include the following action: + + `"Action": "ecr:Describe*"` + + To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide. + + **Resources** + The Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (*) to indicate that the statement applies to all resources. + + An Amazon ECR repository resource has the following ARN: + + `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}` + + For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces. + + For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN: + + `"Resource": "arn:aws:ecr:us-east-1:123456789012:repository/my-repo"` + + To specify all repositories that belong to a specific account, use the wildcard (*): + + `"Resource": "arn:aws:ecr:us-east-1:123456789012:repository/*"` + + To specify multiple resources in a single statement, separate the ARNs with commas. + + `"Resource": [ + "resource1", + "resource2"` + + To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry. + + **Condition Keys** + The Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request. + + If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted. + + You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide. + + Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide. + + Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control. + + To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry. + scored: false + - id: 5.1.3 + text: Minimize cluster access to read-only for Amazon ECR + type: manual + remediation: |- + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR. + + ``` + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + ``` + scored: false + - id: 5.1.4 + text: Minimize Container Registries to only those approved + type: manual + remediation: |- + To minimize AWS ECR container registries to only those approved, you can follow these steps: + + 1. Define your approval criteria: Determine the criteria that containers must meet to be considered approved. This can include factors such as security, compliance, compatibility, and other requirements. + + 2. Identify all existing ECR registries: Identify all ECR registries that are currently being used in your organization. + + 3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry against your approval criteria to determine whether it should be approved or not. This can be done by reviewing the registry settings and configuration, as well as conducting security assessments and vulnerability scans. + + 4. Establish policies and procedures: Establish policies and procedures that outline how ECR registries will be approved, maintained, and monitored. This should include guidelines for developers to follow when selecting a registry for their container images. + + 5. Implement access controls: Implement access controls to ensure that only approved ECR registries are used to store and distribute container images. This can be done by setting up IAM policies and roles that restrict access to unapproved registries or create a whitelist of approved registries. + + 6. Monitor and review: Continuously monitor and review the use of ECR registries to ensure that they continue to meet your approval criteria. This can include regularly reviewing access logs, scanning for vulnerabilities, and conducting periodic audits. + + By following these steps, you can minimize AWS ECR container registries to only those approved, which can help to improve security, reduce complexity, and streamline container management in your organization. Additionally, AWS provides several tools and services that can help you manage your ECR registries, such as AWS Config, AWS CloudFormation, and AWS Identity and Access Management (IAM). + scored: false + - id: "5.2" + text: Identity and Access Management (IAM) + checks: + - id: 5.2.1 + text: Prefer using dedicated EKS Service Accounts + type: manual + remediation: |- + With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. + + Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod’s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services. + + The IAM roles for service accounts feature provides the following benefits: + + - Least privilege — By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam. + - Credential isolation — A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod. + - Audit-ability — Access and event logging is available through CloudTrail to help ensure retrospective auditing. + + To get started, see list text hereEnabling IAM roles for service accounts on your cluster. + + For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts. + scored: false + - id: "5.3" + text: AWS EKS Key Management Service + checks: + - id: 5.3.1 + text: Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS + type: manual + remediation: "This process can only be performed during Cluster Creation. \n\nEnable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section." + scored: false + - id: "5.4" + text: Cluster Networking + checks: + - id: 5.4.1 + text: Restrict Access to the Control Plane Endpoint + type: automated + remediation: |- + By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server. + + With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled. + + If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0']. + + For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix): + + ``aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPrivateAccess=true, publicAccessCidrs="203.0.113.5/32"`` + + Note: + + The CIDR blocks specified cannot include reserved addresses. + There is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section. + For more detailed information, see the EKS Cluster Endpoint documentation link in the references section. + scored: false + - id: 5.4.2 + text: Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled + type: automated + remediation: |- + By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. + + With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled. + + For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted: + + ``aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false`` + + Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section. + scored: false + - id: 5.4.3 + text: Ensure clusters are created with Private Nodes + type: automated + remediation: |- + ``` + aws eks update-cluster-config \ + --region region-code \ + --name my-cluster \ + --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true + ``` + scored: false + - id: 5.4.4 + text: Ensure Network Policy is Enabled and set as appropriate + type: automated + remediation: Utilize Calico or other network policy engine to segment and isolate your traffic. + scored: false + - id: 5.4.5 + text: Encrypt traffic to HTTPS load balancers with TLS certificates + type: manual + remediation: Your load balancer vendor can provide details on configuring HTTPS with TLS. + scored: false + - id: "5.5" + text: Authentication and Authorization + checks: + - id: 5.5.1 + text: Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater + type: manual + remediation: |- + Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation. + + Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore. + + The relevant AWS CLI commands, depending on the use case, are: + ``` + aws eks update-kubeconfig + aws eks get-token + ``` + scored: false + - id: "5.6" + text: Other Cluster Configurations + checks: + - id: 5.6.1 + text: Consider Fargate for running untrusted workloads + type: manual + remediation: |- + **Create a Fargate profile for your cluster** + Before you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile. + + **Note** + If you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate. + + **via eksctl CLI** + Create your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required. + + ``` + eksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value + ``` + + **via AWS Management Console** + + To create a Fargate profile for a cluster with the AWS Management Console + 1. Open the Amazon EKS console at https://console.aws.amazon.com/eks/home#/clusters. + 1. Choose the cluster to create a Fargate profile for. + 1. Under Fargate profiles, choose Add Fargate profile. + 1. On the Configure Fargate profile page, enter the following information and choose Next. + + - For Name, enter a unique name for your Fargate profile. + - For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role. + - For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets. + - For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods. + + 5. On the Configure pods selection page, enter the following information and choose Next. + - For Namespace, enter a namespace to match for pods, such as kube-system or default. + - Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector. + + 6. On the Review and create page, review the information for your Fargate profile and choose Create. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/master.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/master.yaml new file mode 100644 index 00000000..93572d38 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/master.yaml @@ -0,0 +1,5 @@ +controls: "" +version: eks-1.4.0 +id: 1 +text: Control Plane Components +type: master diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/node.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/node.yaml new file mode 100644 index 00000000..a0657045 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/node.yaml @@ -0,0 +1,384 @@ +controls: "" +version: eks-1.4.0 +id: 3 +text: Worker Nodes +type: node +groups: + - id: "3.1" + text: Worker Node Configuration Files + checks: + - id: 3.1.1 + text: Ensure that the kubeconfig file permissions are set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + type: manual + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the below command (based on the file location on your system) on the each worker + node. For example, + ``` + chmod 644 + ``` + scored: false + - id: 3.1.2 + text: Ensure that the kubelet kubeconfig file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + type: manual + tests: + test_items: + - flag: root:root + remediation: "Run the below command (based on the file location on your system) on each worker node. \n\nFor example,\n\n```\nchown root:root \n```" + scored: false + - id: 3.1.3 + text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: manual + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the following command (using the config file location identified in the Audit step) + + ``` + chmod 644 /etc/kubernetes/kubelet/kubelet-config.json + ``` + scored: false + - id: 3.1.4 + text: Ensure that the kubelet configuration file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: manual + tests: + test_items: + - flag: root:root + remediation: |- + Run the following command (using the config file location identified in the Audit step) + + ``` + chown root:root /etc/kubernetes/kubelet/kubelet-config.json + ``` + scored: false + - id: "3.2" + text: Kubelet + checks: + - id: 3.2.1 + text: Ensure that the Anonymous Auth is Not Enabled + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --anonymous-auth + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: "false" + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nDisable Anonymous Authentication by setting the following parameter:\n\n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n```\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--anonymous-auth=false\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.2 + text: Ensure that the --authorization-mode argument is not set to AlwaysAllow + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nEnable Webhook Authentication by setting the following parameter:\n\n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n```\n\nNext, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n```\n\"authorization\": { \"mode\": \"Webhook }\n```\n\nFiner detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.3 + text: Ensure that a Client CA File is Configured + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nConfigure the client certificate authority file by setting the following parameter appropriately:\n\n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n```\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--client-ca-file=\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.4 + text: Ensure that the --read-only-port is disabled + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: manual + tests: + test_items: + - flag: --read-only-port + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: "0" + remediation: |- + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0 + + ``` + "readOnlyPort": 0 + ``` + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --read-only-port=0 + ``` + + For each remediation: + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false + - id: 3.2.5 + text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: "0" + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s + + ``` + "streamingConnectionIdleTimeout": "4h0m0s" + ``` + + You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --streaming-connection-idle-timeout=4h0m0s + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"streamingConnectionIdleTimeout":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: true + - id: 3.2.6 + text: Ensure that the --make-iptables-util-chains argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: "true" + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "makeIPTablesUtilChains": true + ``` + + Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --make-iptables-util-chains:true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.7 + text: Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture + audit: '/bin/ps -fC $kubeletbin ' + type: automated + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: "0" + remediation: |- + If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level. + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + Based on your system, restart the `kubelet` service. For example: + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + ``` + scored: false + - id: 3.2.8 + text: Ensure that the --rotate-certificates argument is not present or is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: manual + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: "true" + - flag: --rotate-certificates + path: '{.rotateCertificates}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "RotateCertificate":true + ``` + + Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --RotateCertificate=true + ``` + scored: true + - id: 3.2.9 + text: Ensure that the RotateKubeletServerCertificate argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --rotate-kubelet-server-certificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: "true" + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "featureGates": { + "RotateKubeletServerCertificate":true + }, + ``` + + Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --rotate-kubelet-server-certificate=true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"RotateKubeletServerCertificate":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediation methods:** + Restart the `kubelet` service and check status. The example below is for when using systemctl to manage services: + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false + - id: "3.3" + text: Container Optimized OS + checks: + - id: 3.3.1 + text: Prefer using a container-optimized OS when possible + type: manual + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/policies.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/policies.yaml new file mode 100644 index 00000000..2985d901 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.4.0/policies.yaml @@ -0,0 +1,203 @@ +controls: "" +version: eks-1.4.0 +id: 4 +text: Policies +type: policies +groups: + - id: "4.1" + text: RBAC and Service Accounts + checks: + - id: 4.1.1 + text: Ensure that the cluster-admin role is only used where required + type: manual + remediation: "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. \n\nWhere possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n```\nkubectl delete clusterrolebinding [name]\n```" + scored: false + - id: 4.1.2 + text: Minimize access to secrets + type: manual + remediation: Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster. + scored: false + - id: 4.1.3 + text: Minimize wildcard use in Roles and ClusterRoles + type: automated + remediation: Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + scored: false + - id: 4.1.4 + text: Minimize access to create pods + type: manual + remediation: Where possible, remove `create` access to `pod` objects in the cluster. + scored: false + - id: 4.1.5 + text: Ensure that default service accounts are not actively used. + type: manual + remediation: "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\nModify the configuration of each default service account to include this value \n\n```\nautomountServiceAccountToken: false\n```\n\nAutomatic remediation for the default account:\n\n`kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`" + scored: false + - id: 4.1.6 + text: Ensure that Service Account Tokens are only mounted where necessary + type: manual + remediation: Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + scored: false + - id: 4.1.7 + text: Avoid use of system:masters group + type: manual + remediation: Remove the `system:masters` group from all users in the cluster. + scored: false + - id: 4.1.8 + text: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster + type: manual + remediation: Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + - id: "4.2" + text: Pod Security Standards + checks: + - id: 4.2.1 + text: Minimize the admission of privileged containers + type: manual + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers. + + To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce. + + `kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted` + + The above command enforces the restricted policy for the NAMESPACE namespace. + + You can also enable Pod Security Admission for all your namespaces. For example: + + `kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline` + scored: false + - id: 4.2.2 + text: Minimize the admission of containers wishing to share the host process ID namespace + type: manual + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. + scored: false + - id: 4.2.3 + text: Minimize the admission of containers wishing to share the host IPC namespace + type: manual + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. + scored: false + - id: 4.2.4 + text: Minimize the admission of containers wishing to share the host network namespace + type: manual + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. + scored: false + - id: 4.2.5 + text: Minimize the admission of containers with allowPrivilegeEscalation + type: manual + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + - id: 4.2.6 + text: Minimize the admission of root containers + type: manual + remediation: Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + - id: 4.2.7 + text: Minimize the admission of containers with added capabilities + type: manual + remediation: Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array. + scored: false + - id: 4.2.8 + text: Minimize the admission of containers with capabilities assigned + type: manual + remediation: Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities. + scored: false + - id: "4.3" + text: CNI Plugin + checks: + - id: 4.3.1 + text: Ensure CNI plugin supports network policies. + type: manual + remediation: As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico. + scored: false + - id: 4.3.2 + text: Ensure that all Namespaces have Network Policies defined + type: manual + remediation: Follow the documentation and create `NetworkPolicy` objects as you need them. + scored: false + - id: "4.4" + text: Secrets Management + checks: + - id: 4.4.1 + text: Prefer using secrets as files over secrets as environment variables + type: manual + remediation: If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. + scored: false + - id: 4.4.2 + text: Consider external secret storage + type: manual + remediation: Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution. + scored: false + - id: "4.5" + text: General Policies + checks: + - id: 4.5.1 + text: Create administrative boundaries between resources using namespaces + type: manual + remediation: Follow the documentation and create namespaces for objects in your deployment as you need them. + scored: false + - id: 4.5.2 + text: Apply Security Context to Your Pods and Containers + type: manual + remediation: |- + As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this: + + ``` + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + ``` + + This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added. + + Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path. + scored: false + - id: 4.5.3 + text: The default namespace should not be used + type: manual + remediation: Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/config.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/controlplane.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/controlplane.yaml new file mode 100644 index 00000000..22b6539f --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/controlplane.yaml @@ -0,0 +1,14 @@ +controls: "" +version: eks-1.5.0 +id: 2 +text: Control Plane Configuration +type: controlplane +groups: + - id: "2.1" + text: Logging + checks: + - id: 2.1.1 + text: Enable audit Logs + type: automated + remediation: "**From Console:**\n1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n```\nAPI server: Enabled\nAudit: Enabled \nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n```\n5. Click 'Save Changes'.\n\n**From CLI:**\n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n```" + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/managedservices.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/managedservices.yaml new file mode 100644 index 00000000..aebec77d --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/managedservices.yaml @@ -0,0 +1,255 @@ +controls: "" +version: eks-1.5.0 +id: 5 +text: Managed services +type: managedservices +groups: + - id: "5.1" + text: Image Registry and Image Scanning + checks: + - id: 5.1.1 + text: Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider + type: automated + remediation: |- + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI) + ``` + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + ``` + + To edit the settings of an existing repository (AWS CLI) + ``` + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + ``` + + Use the following steps to start a manual image scan using the AWS Management Console. + + 1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + 2. From the navigation bar, choose the Region to create your repository in. + 3. In the navigation pane, choose Repositories. + 4. On the Repositories page, choose the repository that contains the image to scan. + 5. On the Images page, select the image to scan and then choose Scan. + scored: false + - id: 5.1.2 + text: Minimize user access to Amazon ECR + type: manual + remediation: |- + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + + **Topics** + + - Amazon ECR Identity-Based Policies + - Amazon ECR Resource-Based Policies + - Authorization Based on Amazon ECR Tags + - Amazon ECR IAM Roles + + **Amazon ECR Identity-Based Policies** + + With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide. + + **Actions** + The Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation. + + Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service. + + To specify multiple actions in a single statement, separate them with commas as follows: + + `"Action": [ + "ecr:action1", + "ecr:action2"` + + You can specify multiple actions using wildcards (*). For example, to specify all actions that begin with the word Describe, include the following action: + + `"Action": "ecr:Describe*"` + + To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide. + + **Resources** + The Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (*) to indicate that the statement applies to all resources. + + An Amazon ECR repository resource has the following ARN: + + `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}` + + For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces. + + For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN: + + `"Resource": "arn:aws:ecr:us-east-1:123456789012:repository/my-repo"` + + To specify all repositories that belong to a specific account, use the wildcard (*): + + `"Resource": "arn:aws:ecr:us-east-1:123456789012:repository/*"` + + To specify multiple resources in a single statement, separate the ARNs with commas. + + `"Resource": [ + "resource1", + "resource2"` + + To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry. + + **Condition Keys** + The Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request. + + If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted. + + You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide. + + Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide. + + Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control. + + To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry. + scored: false + - id: 5.1.3 + text: Minimize cluster access to read-only for Amazon ECR + type: manual + remediation: |- + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR. + + ``` + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + ``` + scored: false + - id: 5.1.4 + text: Minimize Container Registries to only those approved + type: manual + remediation: |- + To minimize AWS ECR container registries to only those approved, you can follow these steps: + + 1. Define your approval criteria: Determine the criteria that containers must meet to be considered approved. This can include factors such as security, compliance, compatibility, and other requirements. + + 2. Identify all existing ECR registries: Identify all ECR registries that are currently being used in your organization. + + 3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry against your approval criteria to determine whether it should be approved or not. This can be done by reviewing the registry settings and configuration, as well as conducting security assessments and vulnerability scans. + + 4. Establish policies and procedures: Establish policies and procedures that outline how ECR registries will be approved, maintained, and monitored. This should include guidelines for developers to follow when selecting a registry for their container images. + + 5. Implement access controls: Implement access controls to ensure that only approved ECR registries are used to store and distribute container images. This can be done by setting up IAM policies and roles that restrict access to unapproved registries or create a whitelist of approved registries. + + 6. Monitor and review: Continuously monitor and review the use of ECR registries to ensure that they continue to meet your approval criteria. This can include regularly reviewing access logs, scanning for vulnerabilities, and conducting periodic audits. + + By following these steps, you can minimize AWS ECR container registries to only those approved, which can help to improve security, reduce complexity, and streamline container management in your organization. Additionally, AWS provides several tools and services that can help you manage your ECR registries, such as AWS Config, AWS CloudFormation, and AWS Identity and Access Management (IAM). + scored: false + - id: "5.2" + text: Identity and Access Management (IAM) + checks: + - id: 5.2.1 + text: Prefer using dedicated EKS Service Accounts + type: automated + remediation: |- + With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. + + Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod’s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services. + + The IAM roles for service accounts feature provides the following benefits: + + - Least privilege — By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam. + - Credential isolation — A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod. + - Audit-ability — Access and event logging is available through CloudTrail to help ensure retrospective auditing. + + To get started, see list text hereEnabling IAM roles for service accounts on your cluster. + + For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts. + scored: false + - id: "5.3" + text: AWS EKS Key Management Service + checks: + - id: 5.3.1 + text: Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS + type: manual + remediation: "This process can only be performed during Cluster Creation. \n\nEnable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section." + scored: false + - id: "5.4" + text: Cluster Networking + checks: + - id: 5.4.1 + text: Restrict Access to the Control Plane Endpoint + type: automated + remediation: |- + By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server. + + With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled. + + If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0']. + + For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix): + + ``aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPrivateAccess=true, publicAccessCidrs="203.0.113.5/32"`` + + Note: + + The CIDR blocks specified cannot include reserved addresses. + There is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section. + For more detailed information, see the EKS Cluster Endpoint documentation link in the references section. + scored: false + - id: 5.4.2 + text: Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled + type: automated + remediation: |- + By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. + + With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled. + + For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted: + + ``aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false`` + + Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section. + scored: false + - id: 5.4.3 + text: Ensure clusters are created with Private Nodes + type: automated + remediation: |- + ``` + aws eks update-cluster-config \ + --region region-code \ + --name my-cluster \ + --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true + ``` + scored: false + - id: 5.4.4 + text: Ensure Network Policy is Enabled and set as appropriate + type: automated + remediation: Utilize Calico or other network policy engine to segment and isolate your traffic. + scored: false + - id: 5.4.5 + text: Encrypt traffic to HTTPS load balancers with TLS certificates + type: manual + remediation: Your load balancer vendor can provide details on configuring HTTPS with TLS. + scored: false + - id: "5.5" + text: Authentication and Authorization + checks: + - id: 5.5.1 + text: Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater + type: manual + remediation: |- + Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation. + + Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore. + + The relevant AWS CLI commands, depending on the use case, are: + ``` + aws eks update-kubeconfig + aws eks get-token + ``` + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/master.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/master.yaml new file mode 100644 index 00000000..42e18441 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/master.yaml @@ -0,0 +1,5 @@ +controls: "" +version: eks-1.5.0 +id: 1 +text: Control Plane Components +type: master diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/node.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/node.yaml new file mode 100644 index 00000000..c1b6f371 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/node.yaml @@ -0,0 +1,369 @@ +controls: "" +version: eks-1.5.0 +id: 3 +text: Worker Nodes +type: node +groups: + - id: "3.1" + text: Worker Node Configuration Files + checks: + - id: 3.1.1 + text: Ensure that the kubeconfig file permissions are set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + type: automated + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the below command (based on the file location on your system) on the each worker + node. For example, + ``` + chmod 644 + ``` + scored: false + - id: 3.1.2 + text: Ensure that the kubelet kubeconfig file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + type: automated + tests: + test_items: + - flag: root:root + remediation: "Run the below command (based on the file location on your system) on each worker node. \n\nFor example,\n\n```\nchown root:root \n```" + scored: false + - id: 3.1.3 + text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: automated + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the following command (using the config file location identified in the Audit step) + + ``` + chmod 644 /etc/kubernetes/kubelet/kubelet-config.json + ``` + scored: false + - id: 3.1.4 + text: Ensure that the kubelet configuration file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: automated + tests: + test_items: + - flag: root:root + remediation: |- + Run the following command (using the config file location identified in the Audit step) + + ``` + chown root:root /etc/kubernetes/kubelet/kubelet-config.json + ``` + scored: false + - id: "3.2" + text: Kubelet + checks: + - id: 3.2.1 + text: Ensure that the Anonymous Auth is Not Enabled + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --anonymous-auth + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: "false" + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nDisable Anonymous Authentication by setting the following parameter:\n\n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n```\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--anonymous-auth=false\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.2 + text: Ensure that the --authorization-mode argument is not set to AlwaysAllow + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nEnable Webhook Authentication by setting the following parameter:\n\n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n```\n\nNext, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n```\n\"authorization\": { \"mode\": \"Webhook }\n```\n\nFiner detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.3 + text: Ensure that a Client CA File is Configured + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nConfigure the client certificate authority file by setting the following parameter appropriately:\n\n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n```\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--client-ca-file=\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.4 + text: Ensure that the --read-only-port is disabled + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --read-only-port + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: "0" + remediation: |- + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0 + + ``` + "readOnlyPort": 0 + ``` + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --read-only-port=0 + ``` + + For each remediation: + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false + - id: 3.2.5 + text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: "0" + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s + + ``` + "streamingConnectionIdleTimeout": "4h0m0s" + ``` + + You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --streaming-connection-idle-timeout=4h0m0s + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"streamingConnectionIdleTimeout":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: true + - id: 3.2.6 + text: Ensure that the --make-iptables-util-chains argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: "true" + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "makeIPTablesUtilChains": true + ``` + + Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --make-iptables-util-chains:true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.7 + text: Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture + audit: '/bin/ps -fC $kubeletbin ' + type: automated + tests: + test_items: + - flag: --hostname-override + remediation: |- + If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level. + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + Based on your system, restart the `kubelet` service. For example: + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + ``` + scored: false + - id: 3.2.8 + text: Ensure that the --rotate-certificates argument is not present or is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: "0" + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "RotateCertificate":true + ``` + + Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --RotateCertificate=true + ``` + scored: true + - id: 3.2.9 + text: Ensure that the RotateKubeletServerCertificate argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --rotate-kubelet-server-certificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: "true" + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "featureGates": { + "RotateKubeletServerCertificate":true + }, + ``` + + Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --rotate-kubelet-server-certificate=true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"RotateKubeletServerCertificate":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediation methods:** + Restart the `kubelet` service and check status. The example below is for when using systemctl to manage services: + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/policies.yaml b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/policies.yaml new file mode 100644 index 00000000..5257150a --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/eks-1.5.0/policies.yaml @@ -0,0 +1,188 @@ +controls: "" +version: eks-1.5.0 +id: 4 +text: Policies +type: policies +groups: + - id: "4.1" + text: RBAC and Service Accounts + checks: + - id: 4.1.1 + text: Ensure that the cluster-admin role is only used where required + type: automated + remediation: "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. \n\nWhere possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n```\nkubectl delete clusterrolebinding [name]\n```" + scored: false + - id: 4.1.2 + text: Minimize access to secrets + type: automated + remediation: Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster. + scored: false + - id: 4.1.3 + text: Minimize wildcard use in Roles and ClusterRoles + type: automated + remediation: Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + scored: false + - id: 4.1.4 + text: Minimize access to create pods + type: automated + remediation: Where possible, remove `create` access to `pod` objects in the cluster. + scored: false + - id: 4.1.5 + text: Ensure that default service accounts are not actively used. + type: automated + remediation: "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\nModify the configuration of each default service account to include this value \n\n```\nautomountServiceAccountToken: false\n```\n\nAutomatic remediation for the default account:\n\n`kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`" + scored: false + - id: 4.1.6 + text: Ensure that Service Account Tokens are only mounted where necessary + type: automated + remediation: Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + scored: false + - id: 4.1.7 + text: Avoid use of system:masters group + type: automated + remediation: Remove the `system:masters` group from all users in the cluster. + scored: false + - id: 4.1.8 + text: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster + type: manual + remediation: Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + - id: "4.2" + text: Pod Security Standards + checks: + - id: 4.2.1 + text: Minimize the admission of privileged containers + type: automated + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers. + + To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce. + + `kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted` + + The above command enforces the restricted policy for the NAMESPACE namespace. + + You can also enable Pod Security Admission for all your namespaces. For example: + + `kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline` + scored: false + - id: 4.2.2 + text: Minimize the admission of containers wishing to share the host process ID namespace + type: automated + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. + scored: false + - id: 4.2.3 + text: Minimize the admission of containers wishing to share the host IPC namespace + type: automated + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. + scored: false + - id: 4.2.4 + text: Minimize the admission of containers wishing to share the host network namespace + type: automated + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. + scored: false + - id: 4.2.5 + text: Minimize the admission of containers with allowPrivilegeEscalation + type: automated + remediation: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + - id: "4.3" + text: CNI Plugin + checks: + - id: 4.3.1 + text: Ensure CNI plugin supports network policies. + type: manual + remediation: As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico. + scored: false + - id: 4.3.2 + text: Ensure that all Namespaces have Network Policies defined + type: automated + remediation: Follow the documentation and create `NetworkPolicy` objects as you need them. + scored: false + - id: "4.4" + text: Secrets Management + checks: + - id: 4.4.1 + text: Prefer using secrets as files over secrets as environment variables + type: automated + remediation: If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. + scored: false + - id: 4.4.2 + text: Consider external secret storage + type: manual + remediation: Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution. + scored: false + - id: "4.5" + text: General Policies + checks: + - id: 4.5.1 + text: Create administrative boundaries between resources using namespaces + type: manual + remediation: Follow the documentation and create namespaces for objects in your deployment as you need them. + scored: false + - id: 4.5.2 + text: Apply Security Context to Your Pods and Containers + type: manual + remediation: |- + As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this: + + ``` + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + ``` + + This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added. + + Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path. + scored: false + - id: 4.5.3 + text: The default namespace should not be used + type: automated + remediation: Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/config.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/controlplane.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/controlplane.yaml new file mode 100644 index 00000000..c39d0a0a --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/controlplane.yaml @@ -0,0 +1,17 @@ +controls: "" +version: gke-1.5.0 +id: 2 +text: Control Plane Configuration +type: controlplane +groups: + - id: "2.1" + text: Authentication and Authorization + checks: + - id: 2.1.1 + text: Client certificate authentication should not be used for users + type: manual + remediation: |- + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates. + + You can remediate the availability of client certificates in your GKE cluster. See Recommendation 5.8.2. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/managedservices.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/managedservices.yaml new file mode 100644 index 00000000..5a213435 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/managedservices.yaml @@ -0,0 +1,687 @@ +controls: "" +version: gke-1.5.0 +id: 5 +text: Managed services +type: managedservices +groups: + - id: "5.1" + text: Image Registry and Image Scanning + checks: + - id: 5.1.1 + text: Ensure Image Vulnerability Scanning is enabled + type: automated + remediation: |- + #### For Images Hosted in GCR: + + ##### Using Google Cloud Console + + 1. Go to GCR by visiting: [https://console.cloud.google.com/gcr](https://console.cloud.google.com/gcr) + 2. Select Settings and, under the Vulnerability Scanning heading, click the TURN ON button. + + ##### Using Command Line + ``` + gcloud services enable containeranalysis.googleapis.com + ``` + + #### For Images Hosted in AR: + + ##### Using Google Cloud Console + + 1. Go to GCR by visiting: [https://console.cloud.google.com/artifacts](https://console.cloud.google.com/artifacts) + 2. Select Settings and, under the Vulnerability Scanning heading, click the ENABLE button. + + ##### Using Command Line + ``` + gcloud services enable containerscanning.googleapis.com + ``` + scored: false + - id: 5.1.2 + text: Minimize user access to Container Image repositories + type: manual + remediation: "#### For Images Hosted in AR:\n\nUsing Google Cloud Console: \n\n1. Go to Artifacts Browser by visiting [https://console.cloud.google.com/artifacts](https://console.cloud.google.com/artifacts)\n2. From the list of artifacts select each repository with format `Docker`\n3. Under the Permissions tab, modify the roles for each member and ensure only authorized users have the Artifact Registry Administrator, Artifact Registry Reader, Artifact Registry Repository Administrator and Artifact Registry Writer roles.\n\nUsing Command Line: \n```\ngcloud artifacts repositories set-iam-policy --location \n```\n\nTo learn how to configure policy files see: https://cloud.google.com/artifact-registry/docs/access-control#grant\n\n#### For Images Hosted in GCR:\nUsing Google Cloud Console: \n\nTo modify roles granted at the GCR bucket level:\n1. Go to Storage Browser by visiting: [https://console.cloud.google.com/storage/browser](https://console.cloud.google.com/storage/browser).\n2. From the list of storage buckets, select `artifacts..appspot.com` for the GCR bucket\n3. Under the Permissions tab, modify permissions of the identified member via the drop-down role menu and change the Role to `Storage Object Viewer` for read-only access.\n\nFor a User or Service account with Project level permissions inherited by the GCR bucket, or the `Service Account User Role`:\n1. Go to IAM by visiting: [https://console.cloud.google.com/iam-admin/iam](https://console.cloud.google.com/iam-admin/iam)\n2. Find the User or Service account to be modified and click on the corresponding pencil icon.\n3. Remove the `create`/`modify` role (`Storage Admin` / `Storage Object Admin` / `Storage Object Creator` / `Service Account User`) on the user or service account.\n4. If required add the `Storage Object Viewer` role - note with caution that this permits the account to view all objects stored in GCS for the project.\n\nUsing Command Line:\n\nTo change roles at the GCR bucket level:\nFirstly, run the following if read permissions are required:\n```\ngsutil iam ch ::objectViewer gs://artifacts..appspot.com\n```\nThen remove the excessively privileged role (`Storage Admin` / `Storage Object Admin` / `Storage Object Creator`) using:\n```\ngsutil iam ch -d :: gs://artifacts..appspot.com\n```\nwhere:\n- `` can be one of the following:\n - `user`, if the `` is a Google account.\n - `serviceAccount`, if `` specifies a Service account.\n - `` can be one of the following:\n - a Google account (for example, `someone@example.com`).\n - a Cloud IAM service account.\n\nTo modify roles defined at the project level and subsequently inherited within the GCR bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly and apply it using:\n```\ngcloud projects set-iam-policy \n```" + scored: false + - id: 5.1.3 + text: Minimize cluster access to read-only for Container Image repositories + type: manual + remediation: "#### For Images Hosted in AR:\n\nUsing Google Cloud Console:\n\n1. Go to Artifacts Browser by visiting [https://console.cloud.google.com/artifacts](https://console.cloud.google.com/artifacts) \n2. From the list of repositories, for each repository with Format Docker\n3. Under the Permissions tab, modify the permissions for GKE Service account and ensure that only the Artifact Registry Viewer role is set.\n\nUsing Command Line:\nAdd artifactregistry.reader role\n```\ngcloud artifacts repositories add-iam-policy-binding \\\n--location= \\\n--member='serviceAccount:' \\\n--role='roles/artifactregistry.reader'\n```\n\nRemove any roles other than `artifactregistry.reader`\n\n```\ngcloud artifacts repositories remove-iam-policy-binding \\\n--location \\\n--member='serviceAccount:' \\\n--role=''\n```\n\n#### For Images Hosted in GCR:\n\nUsing Google Cloud Console:\n\nFor an account explicitly granted access to the bucket:\n1. Go to Storage Browser by visiting: [https://console.cloud.google.com/storage/browser](https://console.cloud.google.com/storage/browser).\n 2. From the list of storage buckets, select `artifacts..appspot.com` for the GCR bucket.\n 3. Under the Permissions tab, modify permissions of the identified GKE Service Account via the drop-down role menu and change to the Role to `Storage Object Viewer` for read-only access.\n\nFor an account that inherits access to the bucket through Project level permissions:\n1. Go to IAM console by visiting: [https://console.cloud.google.com/iam-admin](https://console.cloud.google.com/iam-admin).\n2. From the list of accounts, identify the required service account and select the corresponding pencil icon.\n3. Remove the `Storage Admin` / `Storage Object Admin` / `Storage Object Creator` roles.\n4. Add the `Storage Object Viewer` role - note with caution that this permits the account to view all objects stored in GCS for the project.\n5. Click `SAVE`.\n\nUsing Command Line:\n\nFor an account explicitly granted to the bucket:\nFirstly add read access to the Kubernetes Service Account:\n```\ngsutil iam ch ::objectViewer gs://artifacts..appspot.com\n```\nwhere:\n- `` can be one of the following:\n - `user`, if the `` is a Google account.\n - `serviceAccount`, if `` specifies a Service account.\n - `` can be one of the following:\n - a Google account (for example, `someone@example.com`).\n - a Cloud IAM service account.\n\nThen remove the excessively privileged role (`Storage Admin` / `Storage Object Admin` / `Storage Object Creator`) using:\n```\ngsutil iam ch -d :: gs://artifacts..appspot.com\n```\nFor an account that inherits access to the GCR Bucket through Project level permissions, modify the Projects IAM policy file accordingly, then upload it using:\n```\ngcloud projects set-iam-policy \n```" + scored: false + - id: 5.1.4 + text: Minimize Container Registries to only those approved + type: manual + remediation: |- + Using Google Cloud Console: + + 1. Go to Binary Authorization by visiting: [https://console.cloud.google.com/security/binary-authorization](https://console.cloud.google.com/security/binary-authorization) + 2. Enable Binary Authorization API (if disabled). + 3. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 4. Select Kubernetes cluster for which Binary Authorization is disabled. + 5. Within the `Details` pane, under the `Security` heading, click on the pencil icon called `Edit binary authorization`. + 6. Ensure that `Enable Binary Authorization` is checked. + 7. Click `SAVE CHANGES`. + 8. Return to the Binary Authorization by visiting: [https://console.cloud.google.com/security/binary-authorization](https://console.cloud.google.com/security/binary-authorization). + 9. Set an appropriate policy for the cluster and enter the approved container registries under Image paths. + + Using Command Line: + + Update the cluster to enable Binary Authorization: + ``` + gcloud container cluster update --enable-binauthz + ``` + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: [https://cloud.google.com/binary-authorization/docs/policy-yaml-reference](https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + + Import the policy file into Binary Authorization: + ``` + gcloud container binauthz policy import + ``` + scored: false + - id: "5.2" + text: Identity and Access Management (IAM) + checks: + - id: 5.2.1 + text: Ensure GKE clusters are not running using the Compute Engine default service account + type: automated + remediation: |- + Using Google Cloud Console: + + To create a minimally privileged service account: + 1. Go to Service Accounts by visiting: [https://console.cloud.google.com/iam-admin/serviceaccounts](https://console.cloud.google.com/iam-admin/serviceaccounts). + 2. Click on `CREATE SERVICE ACCOUNT`. + 3. Enter Service Account Details. + 4. Click `CREATE AND CONTINUE`. + 5. Within Service Account permissions add the following roles: + - `Logs Writer`. + - `Monitoring Metric Writer`. + - `Monitoring Viewer. + 6. Click `CONTINUE`. + 7. Grant users access to this service account and create keys as required. + 8. Click `DONE`. + + To create a Node pool to use the Service account: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Click on the cluster name within which the Node pool will be launched. + 3. Click on `ADD NODE POOL`. + 4. Within the Node Pool details, select the `Security` subheading, and under `Identity defaults, select the minimally privileged service account from the Service Account drop-down. + 5. Click `CREATE to launch the Node pool. + + Note: The workloads will need to be migrated to the new Node pool, and the old node pools that use the default service account should be deleted to complete the remediation. + + Using Command Line: + + To create a minimally privileged service account: + ``` + gcloud iam service-accounts create --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=gcloud iam service-accounts list --format='value(email)' --filter='displayName:GKE Node Service Account' + ``` + Grant the following roles to the service account: + ``` + export PROJECT_ID=gcloud config get-value project + gcloud projects add-iam-policy-binding --member serviceAccount: --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding --member serviceAccount: --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding --member serviceAccount: --role roles/logging.logWriter + ``` + To create a new Node pool using the Service account, run the following command: + ``` + gcloud container node-pools create --service-account=@.iam.gserviceaccount.com--cluster= --zone + ``` + Note: The workloads will need to be migrated to the new Node pool, and the old node pools that use the default service account should be deleted to complete the remediation. + scored: false + - id: 5.2.2 + text: Prefer using dedicated GCP Service Accounts and Workload Identity + type: manual + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. From the list of clusters, select the cluster for which Workload Identity is disabled. + 3. Within the `Details` pane, under the `Security` section, click on the pencil icon named `Edit workload identity`. + 4. Enable Workload Identity and set the workload pool to the namespace of the Cloud project containing the cluster, for example: `.svc.id.goog`. + 5. Click `SAVE CHANGES` and wait for the cluster to update. + 6. Once the cluster has updated, select each Node pool within the cluster Details page. + 7. For each Node pool, select `EDIT` within the Node pool Details page + 8. Within the Edit node pool pane, check the 'Enable GKE Metadata Server' checkbox and click `SAVE`. + + Using Command Line: + ``` + gcloud container clusters update --zone --workload-pool .svc.id.goog + ``` + Note that existing Node pools are unaffected. New Node pools default to `--workload-metadata-from-node=GKE_METADATA_SERVER`. + Then, modify existing Node pools to enable `GKE_METADATA_SERVER`: + ``` + gcloud container node-pools update --cluster --zone --workload-metadata=GKE_METADATA + ``` + Workloads may need to be modified in order for them to use Workload Identity as described within: [https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). Also consider the effects on the availability of hosted workloads as Node pools are updated. It may be more appropriate to create new Node Pools. + scored: false + - id: "5.3" + text: Cloud Key Management Service (Cloud KMS) + checks: + - id: 5.3.1 + text: Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS + type: automated + remediation: "To enable Application-layer Secrets Encryption, several configuration items are required. These include: \n- A key ring \n- A key \n- A GKE service account with `Cloud KMS CryptoKey Encrypter/Decrypter` role\n\nOnce these are created, Application-layer Secrets Encryption can be enabled on an existing or new cluster. \n\nUsing Google Cloud Console:\n\nTo create a key\n1. Go to Cloud KMS by visiting [https://console.cloud.google.com/security/kms](https://console.cloud.google.com/security/kms).\n2. Select `CREATE KEY RING`.\n3. Enter a Key ring name and the region where the keys will be stored.\n4. Click `CREATE`.\n5. Enter a Key name and appropriate rotation period within the Create key pane.\n6. Click `CREATE`.\n\nTo enable on a new cluster\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list).\n2. Click `CREATE CLUSTER`, and choose the required cluster mode.\n3. Within the `Security` heading, under `CLUSTER`, check `Encrypt secrets at the application layer` checkbox.\n4. Select the kms key as the customer-managed key and, if prompted, grant permissions to the GKE Service account.\n5. Click `CREATE`.\n\nTo enable on an existing cluster\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list).\n2. Select the cluster to be updated.\n3. Under the Details pane, within the Security heading, click on the pencil named Application-layer secrets encryption.\n4. Enable `Encrypt secrets at the application layer` and choose a kms key.\n5. Click `SAVE CHANGES`.\n\nUsing Command Line: \n\nTo create a key:\nCreate a key ring:\n```\ngcloud kms keyrings create --location --project \n```\nCreate a key:\n```\ngcloud kms keys create --location --keyring --purpose encryption --project \n```\nGrant the Kubernetes Engine Service Agent service account the `Cloud KMS CryptoKey Encrypter/Decrypter` role: \n```\ngcloud kms keys add-iam-policy-binding --location --keyring --member serviceAccount: --role roles/cloudkms.cryptoKeyEncrypterDecrypter --project \n```\nTo create a new cluster with Application-layer Secrets Encryption: \n```\ngcloud container clusters create --cluster-version=latest --zone --database-encryption-key projects//locations//keyRings//cryptoKeys/ --project \n```\nTo enable on an existing cluster:\n```\ngcloud container clusters update --zone --database-encryption-key projects//locations//keyRings//cryptoKeys/ --project \n```" + scored: false + - id: "5.4" + text: Node Metadata + checks: + - id: 5.4.1 + text: Ensure legacy Compute Engine instance metadata APIs are Disabled + type: automated + remediation: "The legacy GCE metadata endpoint must be disabled upon the cluster or node-pool creation. For GKE versions 1.12 and newer, the legacy GCE metadata endpoint is disabled by default.\n\nUsing Google Cloud Console:\n\nTo update an existing cluster, create a new Node pool with the legacy GCE metadata endpoint disabled:\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. Click on the name of cluster to be upgraded and click ADD NODE POOL.\n3. Navigate to the Metadata pane and ensure that GCE instance metadata is set to the key:value pair of `disable-legacy-endpoints: true`\n4. Click SAVE\n\nWorkloads will need be be migrated from any existing non-conforming Node pools, to the new Node pool, then delete non-conforming Node pools to complete the remediation. \n\nUsing Command Line:\n\nTo update an existing cluster, create a new Node pool with the legacy GCE metadata endpoint disabled:\n```\ngcloud container node-pools create [POOL_NAME] \\\n --metadata disable-legacy-endpoints=true \\\n --cluster [CLUSTER_NAME] \\\n --zone [COMPUTE_ZONE]\n```\nYou will need to migrate workloads from any existing non-conforming Node pools, to the new Node pool, then delete non-conforming Node pools to complete the remediation." + scored: false + - id: 5.4.2 + text: Ensure the GKE Metadata Server is Enabled + type: automated + remediation: "The GKE Metadata Server requires Workload Identity to be enabled on a cluster. Modify the cluster to enable Workload Identity and enable the GKE Metadata Server.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. From the list of clusters, select the cluster for which Workload Identity is disabled.\n3. Under the `DETAILS` pane, navigate down to the `Security` subsection.\n4. Click on the pencil icon named `Edit Workload Identity`, click on `Enable Workload Identity` in the pop-up window, and select a workload pool from the drop-down box. By default, it will be the namespace of the Cloud project containing the cluster, for example: `.svc.id.goog`.\n5. Click `SAVE CHANGES` and wait for the cluster to update.\n6. Once the cluster has updated, select each Node pool within the cluster Details page.\n7. For each Node pool, select `EDIT` within the Node pool details page.\n8. Within the `Edit node pool` pane, check the `Enable GKE Metadata Server` checkbox.\n9. Click `SAVE`.\n\nUsing Command Line\n```\ngcloud container clusters update --identity-namespace=.svc.id.goog\n```\nNote that existing Node pools are unaffected. New Node pools default to `--workload-metadata-from-node=GKE_METADATA_SERVER`.\n\nTo modify an existing Node pool to enable GKE Metadata Server: \n```\ngcloud container node-pools update --cluster= --workload-metadata-from-node=GKE_METADATA_SERVER\n```\nWorkloads may need modification in order for them to use Workload Identity as described within: [https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)." + scored: false + - id: "5.5" + text: Node Configuration and Maintenance + checks: + - id: 5.5.1 + text: Ensure Container-Optimized OS (cos_containerd) is used for GKE node images + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the Kubernetes cluster which does not use COS. + 3. Under the Node pools heading, select the Node Pool that requires alteration. + 4. Click `EDIT`. + 5. Under the Image Type heading click `CHANGE`. + 6. From the pop-up menu select `Container-optimised OS with containerd (cos_containerd) (default)` and click `CHANGE` + 7. Repeat for all non-compliant Node pools. + + Using Command Line: + + To set the node image to `cos` for an existing cluster's Node pool: + ``` + gcloud container clusters upgrade --image-type cos_containerd --zone --node-pool + ``` + scored: false + - id: 5.5.2 + text: Ensure Node Auto-Repair is enabled for GKE nodes + type: automated + remediation: |- + Using Google Cloud Console + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Select the Kubernetes cluster containing the node pool for which auto-repair is disabled. + 3. Select the Node pool by clicking on the name of the pool. + 4. Navigate to the Node pool details pane and click `EDIT`. + 5. Under the `Management` heading, check the `Enable auto-repair` box. + 6. Click `SAVE`. + 7. Repeat steps 2-6 for every cluster and node pool with auto-upgrade disabled. + + Using Command Line + + To enable node auto-repair for an existing cluster's Node pool: + ``` + gcloud container node-pools update --cluster --zone --enable-autorepair + ``` + scored: false + - id: 5.5.3 + text: Ensure Node Auto-Upgrade is enabled for GKE nodes + type: automated + remediation: |- + Using Google Cloud Console + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the Kubernetes cluster containing the node pool for which auto-upgrade disabled. + 3. Select the Node pool by clicking on the name of the pool. + 4. Navigate to the Node pool details pane and click `EDIT`. + 5. Under the Management heading, check the `Enable auto-repair` box. + 6. Click `SAVE`. + 7. Repeat steps 2-6 for every cluster and node pool with auto-upgrade disabled. + + Using Command Line + + To enable node auto-upgrade for an existing cluster's Node pool, run the following command: + ``` + gcloud container node-pools update --cluster --zone --enable-autoupgrade + ``` + scored: false + - id: 5.5.4 + text: When creating New Clusters - Automate GKE version management using Release Channels + type: manual + remediation: |- + Currently, cluster Release Channels are only configurable at cluster provisioning time. + + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Click `CREATE`, and choose `CONFIGURE` for the required cluster mode. + 3. Under the Control plane version heading, click the `Release Channels` button. + 4. Select the `Regular` or `Stable` channels from the Release Channel drop-down menu. + 5. Configure the rest of the cluster settings as required. + 6. Click `CREATE`. + + Using Command Line: + Create a new cluster by running the following command: + ``` + gcloud container clusters create --zone --release-channel + ``` + where `` is `stable` or `regular`, according to requirements. + scored: false + - id: 5.5.5 + text: Ensure Shielded GKE Nodes are Enabled + type: automated + remediation: |- + Note: From version 1.18, clusters will have Shielded GKE nodes enabled by default. + + Using Google Cloud Console: + + To update an existing cluster to use Shielded GKE nodes: + 1. Navigate to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster which for which `Shielded GKE Nodes` is to be enabled. + 3. With in the `Details` pane, under the `Security` heading, click on the pencil icon named `Edit Shields GKE nodes`. + 4. Check the box named `Enable Shield GKE nodes`. + 5. Click `SAVE CHANGES`. + + Using Command Line: + + To migrate an existing cluster, the flag `--enable-shielded-nodes` needs to be specified in the cluster update command: + ``` + gcloud container clusters update --zone --enable-shielded-nodes + ``` + scored: false + - id: 5.5.6 + text: Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled + type: automated + remediation: "Once a Node pool is provisioned, it cannot be updated to enable Integrity Monitoring. New Node pools must be created within the cluster with Integrity Monitoring enabled.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. From the list of clusters, click on the cluster requiring the update and click `ADD NODE POOL`.\n3. Ensure that the 'Integrity monitoring' checkbox is checked under the 'Shielded options' Heading.\n4. Click `SAVE`.\n\nWorkloads from existing non-conforming Node pools will need to be migrated to the newly created Node pool, then delete non-conforming Node pools to complete the remediation\n\nUsing Command Line\n\nTo create a Node pool within the cluster with Integrity Monitoring enabled, run the following command: \n```\ngcloud container node-pools create --cluster --zone --shielded-integrity-monitoring\n```\nWorkloads from existing non-conforming Node pools will need to be migrated to the newly created Node pool, then delete non-conforming Node pools to complete the remediation" + scored: false + - id: 5.5.7 + text: Ensure Secure Boot for Shielded GKE Nodes is Enabled + type: automated + remediation: "Once a Node pool is provisioned, it cannot be updated to enable Secure Boot. New Node pools must be created within the cluster with Secure Boot enabled.\n\nUsing Google Cloud Console:\n\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. From the list of clusters, click on the cluster requiring the update and click `ADD NODE POOL`.\n3. Ensure that the `Secure boot` checkbox is checked under the `Shielded options` Heading.\n4. Click `SAVE`.\n\nWorkloads will need to be migrated from existing non-conforming Node pools to the newly created Node pool, then delete the non-conforming pools.\n\nUsing Command Line:\n\nTo create a Node pool within the cluster with Secure Boot enabled, run the following command: \n```\ngcloud container node-pools create --cluster --zone --shielded-secure-boot\n```\n\nWorkloads will need to be migrated from existing non-conforming Node pools to the newly created Node pool, then delete the non-conforming pools." + scored: false + - id: "5.6" + text: Cluster Networking + checks: + - id: 5.6.1 + text: Enable VPC Flow Logs and Intranode Visibility + type: automated + remediation: |- + Enable Intranode Visibility: + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select Kubernetes clusters for which intranode visibility is disabled. + 3. Within the `Details` pane, under the `Network` section, click on the pencil icon named `Edit intranode visibility`. + 4. Check the box next to `Enable Intranode visibility`. + 5. Click `SAVE CHANGES`. + + Using Command Line: + + To enable intranode visibility on an existing cluster, run the following command: + ``` + gcloud container clusters update --enable-intra-node-visibility + ``` + + Enable VPC Flow Logs: + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select Kubernetes clusters for which VPC Flow Logs are disabled. + 3. Select `Nodes` tab. + 4. Select Node Pool without VPC Flow Logs enabled. + 5. Select an Instance Group within the node pool. + 6. Select an `Instance Group Member`. + 7. Select the `Subnetwork` under Network Interfaces. + 8. Click on `EDIT`. + 9. Set Flow logs to `On`. + 10. Click `SAVE`. + + Using Command Line: + 1. Find the subnetwork name associated with the cluster. + ``` + gcloud container clusters describe --region --format json | jq '.subnetwork' + ``` + 2. Update the subnetwork to enable VPC Flow Logs. + ``` + gcloud compute networks subnets update --enable-flow-logs + ``` + scored: false + - id: 5.6.2 + text: Ensure use of VPC-native clusters + type: automated + remediation: "Alias IPs cannot be enabled on an existing cluster. To create a new cluster using Alias IPs, follow the instructions below. \n\nUsing Google Cloud Console:\n\nIf using Standard configuration mode:\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. Click `CREATE CLUSTER`, and select Standard configuration mode.\n3. Configure your cluster as desired , then, click `Networking` under `CLUSTER` in the navigation pane.\n4. In the 'VPC-native' section, leave 'Enable VPC-native (using alias IP)' selected\n5. Click CREATE.\n\nIf using Autopilot configuration mode:\n\nNote that this is VPC-native only and cannot be disable:\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list).\n2. Click CREATE CLUSTER, and select Autopilot configuration mode.\n3. Configure your cluster as required \n4. Click `CREATE`.\n\nUsing Command Line\n\nTo enable Alias IP on a new cluster, run the following command:\n```\ngcloud container clusters create --zone --enable-ip-alias\n```\n\nIf using Autopilot configuration mode:\n```\ngcloud container clusters create-auto --zone \n```" + scored: false + - id: 5.6.3 + text: Ensure Control Plane Authorized Networks is Enabled + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Select Kubernetes clusters for which Control Plane Authorized Networks is disabled + 3. Within the Details pane, under the Networking heading, click on the pencil icon named Edit control plane authorised networks. + 4. Check the box next to Enable control plane authorised networks. + 5. Click SAVE CHANGES. + + Using Command Line: + + To enable Control Plane Authorized Networks for an existing cluster, run the following command: + ``` + gcloud container clusters update --zone --enable-master-authorized-networks + ``` + + Along with this, you can list authorized networks using the `--master-authorized-networks` flag which contains a list of up to 20 external networks that are allowed to connect to your cluster's control plane through HTTPS. You provide these networks as a comma-separated list of addresses in CIDR notation (such as `90.90.100.0/24`). + scored: false + - id: 5.6.4 + text: Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled + type: automated + remediation: |- + Once a cluster is created without enabling Private Endpoint only, it cannot be remediated. Rather, the cluster must be recreated. + + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Click CREATE CLUSTER, and choose CONFIGURE for the Standard mode cluster. + 3. Configure the cluster as required then click Networking under CLUSTER in the navigation pane. + 4. Under IPv4 network access, click the Private cluster radio button. + 5. Uncheck the Access control plane using its external IP address checkbox. + 6. In the Control plane IP range textbox, provide an IP range for the control plane. + 7. Configure the other settings as required, and click CREATE. + + Using Command Line: + + Create a cluster with a Private Endpoint enabled and Public Access disabled by including the `--enable-private-endpoint` flag within the cluster create command: + ``` + gcloud container clusters create --enable-private-endpoint + ``` + Setting this flag also requires the setting of `--enable-private-nodes`, `--enable-ip-alias` and `--master-ipv4-cidr=`. + scored: false + - id: 5.6.5 + text: Ensure clusters are created with Private Nodes + type: automated + remediation: |- + Once a cluster is created without enabling Private Nodes, it cannot be remediated. Rather the cluster must be recreated. + + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Click CREATE CLUSTER. + 3. Configure the cluster as required then click Networking under CLUSTER in the navigation pane. + 4. Under IPv4 network access, click the Private cluster radio button. + 5. Configure the other settings as required, and click CREATE. + + Using Command Line: + + To create a cluster with Private Nodes enabled, include the `--enable-private-nodes` flag within the cluster create command: + ``` + gcloud container clusters create --enable-private-nodes + ``` + Setting this flag also requires the setting of `--enable-ip-alias` and `--master-ipv4-cidr=`. + scored: false + - id: 5.6.6 + text: Consider firewalling GKE worker nodes + type: manual + remediation: |- + Using Google Cloud Console: + + 1. Go to Firewall Rules by visiting: [https://console.cloud.google.com/networking/firewalls/list](https://console.cloud.google.com/networking/firewalls/list) + 2. Click CREATE FIREWALL RULE. + 3. Configure the firewall rule as required. Ensure the firewall targets the nodes correctly, either selecting the nodes using tags (under Targets, select Specified target tags, and set Target tags to ``), or using the Service account associated with node (under Targets, select Specified service account, set Service account scope as appropriate, and Target service account to ``). + 4. Click `CREATE`. + + Using Command Line: + + Use the following command to generate firewall rules, setting the variables as appropriate: + ``` + gcloud compute firewall-rules create --network --priority --direction --action --target-tags --target-service-accounts --source-ranges --source-tags --source-service-accounts --destination-ranges --rules + ``` + scored: false + - id: 5.6.7 + text: Ensure Network Policy is Enabled and set as appropriate + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster for which Network policy is disabled. + 3. Under the details pane, within the Networking section, click on the pencil icon named Edit network policy. + 4. Set 'Network policy for control plane' to 'Enabled'. + 5. Click `SAVE CHANGES`. + 6. Once the cluster has updated, repeat steps 1-3. + 7. Set 'Network Policy for nodes' to 'Enabled'. + 8. Click `SAVE CHANGES`. + + Using Command Line: + + To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on: + ``` + gcloud container clusters update --zone --update-addons NetworkPolicy=ENABLED + ``` + + Then, enable Network Policy: + ``` + gcloud container clusters update --zone --enable-network-policy + ``` + scored: false + - id: 5.6.8 + text: Ensure use of Google-managed SSL Certificates + type: manual + remediation: |- + If services of `type:LoadBalancer` are discovered, consider replacing the Service with an Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the instructions as listed at: [https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs). + scored: false + - id: "5.7" + text: Logging + checks: + - id: 5.7.1 + text: Ensure Logging and Cloud Monitoring is Enabled + type: automated + remediation: |- + Using Google Cloud Console: + To enable Logging: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster for which Logging is disabled. + 3. Under the details pane, within the Features section, click on the pencil icon named `Edit logging`. + 4. Check the box next to `Enable Logging`. + 5. In the drop-down Components box, select the components to be logged. + 6. Click `SAVE CHANGES`, and wait for the cluster to update. + + To enable Cloud Monitoring: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster for which Logging is disabled. + 3. Under the details pane, within the Features section, click on the pencil icon named `Edit Cloud Monitoring`. + 4. Check the box next to `Enable Cloud Monitoring`. + 5. In the drop-down Components box, select the components to be logged. + 6. Click `SAVE CHANGES`, and wait for the cluster to update. + + Using Command Line: + To enable Logging for an existing cluster, run the following command: + + gcloud container clusters update --zone --logging= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--logging for a list of available components for logging. + + To enable Cloud Monitoring for an existing cluster, run the following command: + + gcloud container clusters update --zone --monitoring= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--monitoring for a list of available components for Cloud Monitoring. + scored: false + - id: 5.7.2 + text: Enable Linux auditd logging + type: manual + remediation: |- + Using Command Line: + + Download the example manifests: + ``` + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml > cos-auditd-logging.yaml + ``` + Edit the example manifests if needed. Then, deploy them: + ``` + kubectl apply -f cos-auditd-logging.yaml + ``` + Verify that the logging Pods have started. If a different Namespace was defined in the manifests, replace `cos-auditd` with the name of the namespace being used: + ``` + kubectl get pods --namespace=cos-auditd + ``` + scored: false + - id: "5.8" + text: Authentication and Authorization + checks: + - id: 5.8.1 + text: Ensure authentication using Client Certificates is Disabled + type: automated + remediation: "Currently, there is no way to remove a client certificate from an existing cluster. Thus a new cluster must be created.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. Click CREATE CLUSTER\n3. Configure as required and the click on 'Availability, networking, security, and additional features' section \n4. Ensure that the 'Issue a client certificate' checkbox is not ticked\n5. Click CREATE.\n\nUsing Command Line\n\nCreate a new cluster without a Client Certificate:\n```\ngcloud container clusters create [CLUSTER_NAME] \\ \n --no-issue-client-certificate\n```" + scored: false + - id: 5.8.2 + text: Manage Kubernetes RBAC users with Google Groups for GKE + type: manual + remediation: |- + Follow the G Suite Groups instructions at: [https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#google-groups-for-gke](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#google-groups-for-gke). + + Then, create a cluster with: + ``` + gcloud container clusters create --security-group + ``` + Finally create `Roles`, `ClusterRoles`, `RoleBindings`, and `ClusterRoleBindings` that reference the G Suite Groups. + scored: false + - id: 5.8.3 + text: Ensure Legacy Authorization (ABAC) is Disabled + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select Kubernetes clusters for which Legacy Authorization is enabled. + 3. Click EDIT. + 4. Set 'Legacy Authorization' to 'Disabled'. + 5. Click SAVE. + + Using Command Line: + + To disable Legacy Authorization for an existing cluster, run the following command: + ``` + gcloud container clusters update --zone --no-enable-legacy-authorization + ``` + scored: false + - id: "5.9" + text: Storage + checks: + - id: 5.9.1 + text: Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) + type: manual + remediation: |- + This cannot be remediated by updating an existing cluster. The node pool must either be recreated or a new cluster created. + + Using Google Cloud Console: + + FOR NODE BOOT DISKS: + + To create a new node pool: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Select Kubernetes clusters for which node boot disk CMEK is disabled. + 3. Click `ADD NODE POOL`. + 4. In the Nodes section, under machine configuration, ensure Boot disk type is `Standard persistent disk` or `SSD persistent disk`. + 5. Select `Enable customer-managed encryption for Boot Disk` and select the Cloud KMS encryption key to be used. + 6. Click `CREATE`. + + To create a new cluster: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Click `CREATE` and click `CONFIGURE for the required cluster mode. + 3. Under `NODE POOLS, expand the default-pool list and click `Nodes. + 4. In the Configure node settings pane, select `Standard persistent disk` or `SSD Persistent Disk` as the Boot disk type. + 5. Select `Enable customer-managed encryption for Boot Disk` check box and choose the Cloud KMS encryption key to be used. + 6. Configure the rest of the cluster settings as required. + 7. Click `CREATE`. + + FOR ATTACHED DISKS: + + This is not possible using Google Cloud Console. + + Using Command Line: + + FOR NODE BOOT DISKS: + + Create a new node pool using customer-managed encryption keys for the node boot disk, of `` either `pd-standard` or `pd-ssd`: + ``` + gcloud container node-pools create --disk-type --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + ``` + + Create a cluster using customer-managed encryption keys for the node boot disk, of `` either `pd-standard` or `pd-ssd`: + ``` + gcloud container clusters create --disk-type --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + ``` + + FOR ATTACHED DISKS: + + Follow the instructions detailed at: [https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). + scored: false + - id: "5.10" + text: Other Cluster Configurations + checks: + - id: 5.10.1 + text: Ensure Kubernetes Web UI is Disabled + type: automated + remediation: |- + Using Google Cloud Console: + + Currently not possible, due to the add-on having been removed. Must use the command line. + + Using Command Line: + + To disable the Kubernetes Dashboard on an existing cluster, run the following command: + ``` + gcloud container clusters update --zone --update-addons=KubernetesDashboard=DISABLED + ``` + scored: false + - id: 5.10.2 + text: Ensure that Alpha clusters are not used for production workloads + type: automated + remediation: "Alpha features cannot be disabled. To remediate, a new cluster must be created.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/](https://console.cloud.google.com/kubernetes/)\n2. Click CREATE CLUSTER, and choose CONFIGURE for the Standard mode cluster.\n3. Note: Within Features in the the CLUSTER section, under the Other heading, Enable Kubernetes alpha features in this cluster will not be available by default. It will only be available if the cluster is created with a Static version for the Control plane version, along with both Automatically upgrade nodes to the next available version and Enable auto-repair being checked under the Node pool details for each node.\n4. Configure the other settings as required and click CREATE.\n\nUsing Command Line: \n\nUpon creating a new cluster \n```\ngcloud container clusters create [CLUSTER_NAME] \\\n --zone [COMPUTE_ZONE]\n```\nDo not use the --enable-kubernetes-alpha argument." + scored: false + - id: 5.10.3 + text: Consider GKE Sandbox for running untrusted workloads + type: manual + remediation: "Once a node pool is created, GKE Sandbox cannot be enabled, rather a new node pool is required. The default node pool (the first node pool in your cluster, created when the cluster is created) cannot use GKE Sandbox.\n\nUsing Google Cloud Console:\n\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/](https://console.cloud.google.com/kubernetes/).\n2. Select a cluster and click `ADD NODE POOL`.\n3. Configure the Node pool with following settings: \n - For the node version, select `v1.12.6-gke.8` or higher.\n - For the node image, select `Container-Optimized OS with Containerd (cos_containerd) (default)`.\n - Under `Security`, select `Enable sandbox with gVisor`.\n4. Configure other Node pool settings as required.\n5. Click `SAVE`.\n\nUsing Command Line:\n\nTo enable GKE Sandbox on an existing cluster, a new Node pool must be created, which can be done using:\n```\n gcloud container node-pools create --zone --cluster --image-type=cos_containerd --sandbox=\"type=gvisor\"\n```" + scored: false + - id: 5.10.4 + text: Ensure use of Binary Authorization + type: automated + remediation: |- + Using Google Cloud Console + + 1. Go to Binary Authorization by visiting: [https://console.cloud.google.com/security/binary-authorization](https://console.cloud.google.com/security/binary-authorization). + 2. Enable the Binary Authorization API (if disabled). + 3. Create an appropriate policy for use with the cluster. See [https://cloud.google.com/binary-authorization/docs/policy-yaml-reference](https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + 4. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 5. Select the cluster for which Binary Authorization is disabled. + 6. Under the details pane, within the Security section, click on the pencil icon named `Edit Binary Authorization`. + 7. Check the box next to `Enable Binary Authorization`. + 8. Choose whether to `Audit`, `Enforce` or both `Audit and Enforce` the policy and provide a directory for the policy to be used. + 9. Click `SAVE CHANGES`. + + Using Command Line: + + Update the cluster to enable Binary Authorization: + ``` + gcloud container cluster update --zone --binauthz-evaluation-mode= + ``` + + See: [https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode](https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode) for more details around the evaluation modes available. + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: [https://cloud.google.com/binary-authorization/docs/policy-yaml-reference](https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + + Import the policy file into Binary Authorization: + ``` + gcloud container binauthz policy import + ``` + scored: false + - id: 5.10.5 + text: Enable Cloud Security Command Center (Cloud SCC) + type: manual + remediation: |- + Note: The Security Command Center Asset APIs have been deprecated, pending removal on or after 26th June 2024. Cloud Asset Inventory should be used instead. + + Follow the instructions at: [https://cloud.google.com/security-command-center/docs/quickstart-scc-setup](https://cloud.google.com/security-command-center/docs/quickstart-scc-setup). + scored: false + - id: 5.10.6 + text: Enable Security Posture + type: manual + remediation: |- + Enable security posture via the UI, gCloud or API. + https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/master.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/master.yaml new file mode 100644 index 00000000..0add5032 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/master.yaml @@ -0,0 +1,5 @@ +controls: "" +version: gke-1.5.0 +id: 1 +text: Control Plane Components +type: master diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/node.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/node.yaml new file mode 100644 index 00000000..b214bf17 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/node.yaml @@ -0,0 +1,71 @@ +controls: "" +version: gke-1.5.0 +id: 3 +text: Worker Nodes +type: node +groups: + - id: "3.1" + text: Worker Node Configuration Files + checks: + - id: 3.1.1 + text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + type: manual + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the below command (based on the file location on your system) on the each worker + node. For example, + ``` + chmod 644 + ``` + scored: false + - id: 3.1.2 + text: Ensure that the proxy kubeconfig file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + type: manual + tests: + test_items: + - flag: root:root + remediation: |- + Run the below command (based on the file location on your system) on each worker node. For example, + + ``` + chown root:root + ``` + scored: false + - id: 3.1.3 + text: Ensure that the kubelet configuration file has permissions set to 600 + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: manual + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "600" + remediation: |- + Run the following command (using the kubelet config file location): + + ``` + chmod 600 + ``` + scored: false + - id: 3.1.4 + text: Ensure that the kubelet configuration file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: manual + tests: + test_items: + - flag: root:root + remediation: |- + Run the following command (using the config file location identified in the Audit step): + + ``` + chown root:root + ``` + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/policies.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/policies.yaml new file mode 100644 index 00000000..510bedaf --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.5.0/policies.yaml @@ -0,0 +1,158 @@ +controls: "" +version: gke-1.5.0 +id: 4 +text: Policies +type: policies +groups: + - id: "4.1" + text: RBAC and Service Accounts + checks: + - id: 4.1.1 + text: Ensure that the cluster-admin role is only used where required + type: manual + remediation: "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. \n\nWhere possible, first bind users to a lower-privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n```\nkubectl delete clusterrolebinding [name]\n```" + scored: false + - id: 4.1.2 + text: Minimize access to secrets + type: manual + remediation: Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster. + scored: false + - id: 4.1.3 + text: Minimize wildcard use in Roles and ClusterRoles + type: manual + remediation: Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + scored: false + - id: 4.1.4 + text: Minimize access to create pods + type: manual + remediation: Where possible, remove `create` access to `pod` objects in the cluster. + scored: false + - id: 4.1.5 + text: Ensure that default service accounts are not actively used + type: manual + remediation: "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\nModify the configuration of each default service account to include this value \n\n```\nautomountServiceAccountToken: false\n```" + scored: true + - id: 4.1.6 + text: Ensure that Service Account Tokens are only mounted where necessary + type: manual + remediation: Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + scored: false + - id: 4.1.7 + text: Avoid use of system:masters group + type: manual + remediation: Remove the `system:masters` group from all users in the cluster. + scored: false + - id: 4.1.8 + text: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster + type: manual + remediation: Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + - id: 4.1.9 + text: Minimize access to create persistent volumes + type: manual + remediation: Where possible, remove `create` access to `PersistentVolume` objects in the cluster. + scored: false + - id: 4.1.10 + text: Minimize access to the proxy sub-resource of nodes + type: manual + remediation: Where possible, remove access to the `proxy` sub-resource of `node` objects. + scored: false + - id: 4.1.11 + text: Minimize access to the approval sub-resource of certificatesigningrequests objects + type: manual + remediation: Where possible, remove access to the `approval` sub-resource of `certificatesigningrequest` objects. + scored: false + - id: 4.1.12 + text: Minimize access to webhook configuration objects + type: manual + remediation: Where possible, remove access to the `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` objects + scored: false + - id: 4.1.13 + text: Minimize access to the service account token creation + type: manual + remediation: Where possible, remove access to the `token` sub-resource of `serviceaccount` objects. + scored: false + - id: "4.2" + text: Pod Security Standards + checks: + - id: 4.2.1 + text: Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. + type: manual + remediation: |- + Ensure that Pod Security Admission is in place for every namespace which contains user workloads. + + Run the following command to enforce the Baseline profile in a namespace:- + + ``` + kubectl label namespace pod-security.kubernetes.io/enforce=baseline + ``` + scored: false + - id: "4.3" + text: Network Policies and CNI + checks: + - id: 4.3.1 + text: Ensure that the CNI in use supports Network Policies + type: manual + remediation: To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin will be updated. See recommendation 5.6.7. + scored: false + - id: 4.3.2 + text: Ensure that all Namespaces have Network Policies defined + type: manual + remediation: |- + Follow the documentation and create `NetworkPolicy` objects as needed. + See: [https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy) for more information. + scored: false + - id: "4.4" + text: Secrets Management + checks: + - id: 4.4.1 + text: Prefer using secrets as files over secrets as environment variables + type: manual + remediation: If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. + scored: false + - id: 4.4.2 + text: Consider external secret storage + type: manual + remediation: Refer to the secrets management options offered by the cloud service provider or a third-party secrets management solution. + scored: false + - id: "4.5" + text: Extensible Admission Control + checks: + - id: 4.5.1 + text: Configure Image Provenance using ImagePolicyWebhook admission controller + type: manual + remediation: |- + Follow the Kubernetes documentation and setup image provenance. + + Also see recommendation 5.10.5. + scored: false + - id: "4.6" + text: General Policies + checks: + - id: 4.6.1 + text: Create administrative boundaries between resources using namespaces + type: manual + remediation: Follow the documentation and create namespaces for objects in your deployment as you need them. + scored: false + - id: 4.6.2 + text: Ensure that the seccomp profile is set to RuntimeDefault in the pod definitions + type: manual + remediation: |- + Use security context to enable the `RuntimeDefault` seccomp profile in your pod definitions. An example is as below: + + ``` + securityContext: + seccompProfile: + type: RuntimeDefault + ``` + scored: false + - id: 4.6.3 + text: Apply Security Context to Pods and Containers + type: manual + remediation: Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Google Container-Optimized OS Benchmark. + scored: false + - id: 4.6.4 + text: The default namespace should not be used + type: manual + remediation: Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/config.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/config.yaml new file mode 100755 index 00000000..b7839455 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/controlplane.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/controlplane.yaml new file mode 100755 index 00000000..6d6a4744 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/controlplane.yaml @@ -0,0 +1,17 @@ +controls: "" +version: gke-1.6.0 +id: 2 +text: Control Plane Configuration +type: controlplane +groups: + - id: "2.1" + text: Authentication and Authorization + checks: + - id: 2.1.1 + text: Client certificate authentication should not be used for users + type: automated + remediation: |- + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates. + + You can remediate the availability of client certificates in your GKE cluster. See Recommendation 5.8.1. + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/managedservices.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/managedservices.yaml new file mode 100755 index 00000000..faef3503 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/managedservices.yaml @@ -0,0 +1,653 @@ +controls: "" +version: gke-1.6.0 +id: 5 +text: Managed services +type: managedservices +groups: + - id: "5.1" + text: Image Registry and Image Scanning + checks: + - id: 5.1.1 + text: Ensure Image Vulnerability Scanning is enabled + type: automated + remediation: |- + #### For Images Hosted in GCR: + + ##### Using Google Cloud Console + + 1. Go to GCR by visiting: [https://console.cloud.google.com/gcr](https://console.cloud.google.com/gcr) + 2. Select Settings and, under the Vulnerability Scanning heading, click the TURN ON button. + + ##### Using Command Line + ``` + gcloud services enable containeranalysis.googleapis.com + ``` + + #### For Images Hosted in AR: + + ##### Using Google Cloud Console + + 1. Go to GCR by visiting: [https://console.cloud.google.com/artifacts](https://console.cloud.google.com/artifacts) + 2. Select Settings and, under the Vulnerability Scanning heading, click the ENABLE button. + + ##### Using Command Line + ``` + gcloud services enable containerscanning.googleapis.com + ``` + scored: false + - id: 5.1.2 + text: Minimize user access to Container Image repositories + type: manual + remediation: "#### For Images Hosted in AR:\n\nUsing Google Cloud Console: \n\n1. Go to Artifacts Browser by visiting [https://console.cloud.google.com/artifacts](https://console.cloud.google.com/artifacts)\n2. From the list of artifacts select each repository with format `Docker`\n3. Under the Permissions tab, modify the roles for each member and ensure only authorized users have the Artifact Registry Administrator, Artifact Registry Reader, Artifact Registry Repository Administrator and Artifact Registry Writer roles.\n\nUsing Command Line: \n```\ngcloud artifacts repositories set-iam-policy --location \n```\n\nTo learn how to configure policy files see: https://cloud.google.com/artifact-registry/docs/access-control#grant\n\n#### For Images Hosted in GCR:\nUsing Google Cloud Console: \n\nTo modify roles granted at the GCR bucket level:\n1. Go to Storage Browser by visiting: [https://console.cloud.google.com/storage/browser](https://console.cloud.google.com/storage/browser).\n2. From the list of storage buckets, select `artifacts..appspot.com` for the GCR bucket\n3. Under the Permissions tab, modify permissions of the identified member via the drop-down role menu and change the Role to `Storage Object Viewer` for read-only access.\n\nFor a User or Service account with Project level permissions inherited by the GCR bucket, or the `Service Account User Role`:\n1. Go to IAM by visiting: [https://console.cloud.google.com/iam-admin/iam](https://console.cloud.google.com/iam-admin/iam)\n2. Find the User or Service account to be modified and click on the corresponding pencil icon.\n3. Remove the `create`/`modify` role (`Storage Admin` / `Storage Object Admin` / `Storage Object Creator` / `Service Account User`) on the user or service account.\n4. If required add the `Storage Object Viewer` role - note with caution that this permits the account to view all objects stored in GCS for the project.\n\nUsing Command Line:\n\nTo change roles at the GCR bucket level:\nFirstly, run the following if read permissions are required:\n```\ngsutil iam ch ::objectViewer gs://artifacts..appspot.com\n```\nThen remove the excessively privileged role (`Storage Admin` / `Storage Object Admin` / `Storage Object Creator`) using:\n```\ngsutil iam ch -d :: gs://artifacts..appspot.com\n```\nwhere:\n- `` can be one of the following:\n - `user`, if the `` is a Google account.\n - `serviceAccount`, if `` specifies a Service account.\n - `` can be one of the following:\n - a Google account (for example, `someone@example.com`).\n - a Cloud IAM service account.\n\nTo modify roles defined at the project level and subsequently inherited within the GCR bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly and apply it using:\n```\ngcloud projects set-iam-policy \n```" + scored: false + - id: 5.1.3 + text: Minimize cluster access to read-only for Container Image repositories + type: manual + remediation: "#### For Images Hosted in AR:\n\nUsing Google Cloud Console:\n\n1. Go to Artifacts Browser by visiting [https://console.cloud.google.com/artifacts](https://console.cloud.google.com/artifacts) \n2. From the list of repositories, for each repository with Format Docker\n3. Under the Permissions tab, modify the permissions for GKE Service account and ensure that only the Artifact Registry Viewer role is set.\n\nUsing Command Line:\nAdd artifactregistry.reader role\n```\ngcloud artifacts repositories add-iam-policy-binding \\\n--location= \\\n--member='serviceAccount:' \\\n--role='roles/artifactregistry.reader'\n```\n\nRemove any roles other than `artifactregistry.reader`\n\n```\ngcloud artifacts repositories remove-iam-policy-binding \\\n--location \\\n--member='serviceAccount:' \\\n--role=''\n```\n\n#### For Images Hosted in GCR:\n\nUsing Google Cloud Console:\n\nFor an account explicitly granted access to the bucket:\n1. Go to Storage Browser by visiting: [https://console.cloud.google.com/storage/browser](https://console.cloud.google.com/storage/browser).\n 2. From the list of storage buckets, select `artifacts..appspot.com` for the GCR bucket.\n 3. Under the Permissions tab, modify permissions of the identified GKE Service Account via the drop-down role menu and change to the Role to `Storage Object Viewer` for read-only access.\n\nFor an account that inherits access to the bucket through Project level permissions:\n1. Go to IAM console by visiting: [https://console.cloud.google.com/iam-admin](https://console.cloud.google.com/iam-admin).\n2. From the list of accounts, identify the required service account and select the corresponding pencil icon.\n3. Remove the `Storage Admin` / `Storage Object Admin` / `Storage Object Creator` roles.\n4. Add the `Storage Object Viewer` role - note with caution that this permits the account to view all objects stored in GCS for the project.\n5. Click `SAVE`.\n\nUsing Command Line:\n\nFor an account explicitly granted to the bucket:\nFirstly add read access to the Kubernetes Service Account:\n```\ngsutil iam ch ::objectViewer gs://artifacts..appspot.com\n```\nwhere:\n- `` can be one of the following:\n - `user`, if the `` is a Google account.\n - `serviceAccount`, if `` specifies a Service account.\n - `` can be one of the following:\n - a Google account (for example, `someone@example.com`).\n - a Cloud IAM service account.\n\nThen remove the excessively privileged role (`Storage Admin` / `Storage Object Admin` / `Storage Object Creator`) using:\n```\ngsutil iam ch -d :: gs://artifacts..appspot.com\n```\nFor an account that inherits access to the GCR Bucket through Project level permissions, modify the Projects IAM policy file accordingly, then upload it using:\n```\ngcloud projects set-iam-policy \n```" + scored: false + - id: 5.1.4 + text: Ensure only trusted container images are used + type: manual + remediation: |- + Using Google Cloud Console: + + 1. Go to Binary Authorization by visiting: [https://console.cloud.google.com/security/binary-authorization](https://console.cloud.google.com/security/binary-authorization) + 2. Enable Binary Authorization API (if disabled). + 3. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 4. Select Kubernetes cluster for which Binary Authorization is disabled. + 5. Within the `Details` pane, under the `Security` heading, click on the pencil icon called `Edit binary authorization`. + 6. Ensure that `Enable Binary Authorization` is checked. + 7. Click `SAVE CHANGES`. + 8. Return to the Binary Authorization by visiting: [https://console.cloud.google.com/security/binary-authorization](https://console.cloud.google.com/security/binary-authorization). + 9. Set an appropriate policy for the cluster and enter the approved container registries under Image paths. + + Using Command Line: + + Update the cluster to enable Binary Authorization: + ``` + gcloud container cluster update --enable-binauthz + ``` + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: [https://cloud.google.com/binary-authorization/docs/policy-yaml-reference](https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + + Import the policy file into Binary Authorization: + ``` + gcloud container binauthz policy import + ``` + scored: false + - id: "5.2" + text: Identity and Access Management (IAM) + checks: + - id: 5.2.1 + text: Ensure GKE clusters are not running using the Compute Engine default service account + type: automated + remediation: |- + Using Google Cloud Console: + + To create a minimally privileged service account: + 1. Go to Service Accounts by visiting: [https://console.cloud.google.com/iam-admin/serviceaccounts](https://console.cloud.google.com/iam-admin/serviceaccounts). + 2. Click on `CREATE SERVICE ACCOUNT`. + 3. Enter Service Account Details. + 4. Click `CREATE AND CONTINUE`. + 5. Within Service Account permissions add the following roles: + - `Logs Writer`. + - `Monitoring Metric Writer`. + - `Monitoring Viewer. + 6. Click `CONTINUE`. + 7. Grant users access to this service account and create keys as required. + 8. Click `DONE`. + + To create a Node pool to use the Service account: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Click on the cluster name within which the Node pool will be launched. + 3. Click on `ADD NODE POOL`. + 4. Within the Node Pool details, select the `Security` subheading, and under `Identity defaults, select the minimally privileged service account from the Service Account drop-down. + 5. Click `CREATE to launch the Node pool. + + Note: The workloads will need to be migrated to the new Node pool, and the old node pools that use the default service account should be deleted to complete the remediation. + + Using Command Line: + + To create a minimally privileged service account: + ``` + gcloud iam service-accounts create --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=gcloud iam service-accounts list --format='value(email)' --filter='displayName:GKE Node Service Account' + ``` + Grant the following roles to the service account: + ``` + export PROJECT_ID=gcloud config get-value project + gcloud projects add-iam-policy-binding --member serviceAccount: --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding --member serviceAccount: --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding --member serviceAccount: --role roles/logging.logWriter + ``` + To create a new Node pool using the Service account, run the following command: + ``` + gcloud container node-pools create --service-account=@.iam.gserviceaccount.com--cluster= --zone + ``` + Note: The workloads will need to be migrated to the new Node pool, and the old node pools that use the default service account should be deleted to complete the remediation. + scored: false + - id: 5.2.2 + text: Prefer using dedicated GCP Service Accounts and Workload Identity + type: manual + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. From the list of clusters, select the cluster for which Workload Identity is disabled. + 3. Within the `Details` pane, under the `Security` section, click on the pencil icon named `Edit workload identity`. + 4. Enable Workload Identity and set the workload pool to the namespace of the Cloud project containing the cluster, for example: `.svc.id.goog`. + 5. Click `SAVE CHANGES` and wait for the cluster to update. + 6. Once the cluster has updated, select each Node pool within the cluster Details page. + 7. For each Node pool, select `EDIT` within the Node pool Details page + 8. Within the Edit node pool pane, check the 'Enable GKE Metadata Server' checkbox and click `SAVE`. + + Using Command Line: + ``` + gcloud container clusters update --zone --workload-pool .svc.id.goog + ``` + Note that existing Node pools are unaffected. New Node pools default to `--workload-metadata-from-node=GKE_METADATA_SERVER`. + Then, modify existing Node pools to enable `GKE_METADATA_SERVER`: + ``` + gcloud container node-pools update --cluster --zone --workload-metadata=GKE_METADATA + ``` + Workloads may need to be modified in order for them to use Workload Identity as described within: [https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). Also consider the effects on the availability of hosted workloads as Node pools are updated. It may be more appropriate to create new Node Pools. + scored: false + - id: "5.3" + text: Cloud Key Management Service (Cloud KMS) + checks: + - id: 5.3.1 + text: Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS + type: automated + remediation: "To enable Application-layer Secrets Encryption, several configuration items are required. These include: \n- A key ring \n- A key \n- A GKE service account with `Cloud KMS CryptoKey Encrypter/Decrypter` role\n\nOnce these are created, Application-layer Secrets Encryption can be enabled on an existing or new cluster. \n\nUsing Google Cloud Console:\n\nTo create a key\n1. Go to Cloud KMS by visiting [https://console.cloud.google.com/security/kms](https://console.cloud.google.com/security/kms).\n2. Select `CREATE KEY RING`.\n3. Enter a Key ring name and the region where the keys will be stored.\n4. Click `CREATE`.\n5. Enter a Key name and appropriate rotation period within the Create key pane.\n6. Click `CREATE`.\n\nTo enable on a new cluster\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list).\n2. Click `CREATE CLUSTER`, and choose the required cluster mode.\n3. Within the `Security` heading, under `CLUSTER`, check `Encrypt secrets at the application layer` checkbox.\n4. Select the kms key as the customer-managed key and, if prompted, grant permissions to the GKE Service account.\n5. Click `CREATE`.\n\nTo enable on an existing cluster\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list).\n2. Select the cluster to be updated.\n3. Under the Details pane, within the Security heading, click on the pencil named Application-layer secrets encryption.\n4. Enable `Encrypt secrets at the application layer` and choose a kms key.\n5. Click `SAVE CHANGES`.\n\nUsing Command Line: \n\nTo create a key:\nCreate a key ring:\n```\ngcloud kms keyrings create --location --project \n```\nCreate a key:\n```\ngcloud kms keys create --location --keyring --purpose encryption --project \n```\nGrant the Kubernetes Engine Service Agent service account the `Cloud KMS CryptoKey Encrypter/Decrypter` role: \n```\ngcloud kms keys add-iam-policy-binding --location --keyring --member serviceAccount: --role roles/cloudkms.cryptoKeyEncrypterDecrypter --project \n```\nTo create a new cluster with Application-layer Secrets Encryption: \n```\ngcloud container clusters create --cluster-version=latest --zone --database-encryption-key projects//locations//keyRings//cryptoKeys/ --project \n```\nTo enable on an existing cluster:\n```\ngcloud container clusters update --zone --database-encryption-key projects//locations//keyRings//cryptoKeys/ --project \n```" + scored: false + - id: "5.4" + text: Node Metadata + checks: + - id: 5.4.1 + text: Ensure the GKE Metadata Server is Enabled + type: automated + remediation: "The GKE Metadata Server requires Workload Identity to be enabled on a cluster. Modify the cluster to enable Workload Identity and enable the GKE Metadata Server.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. From the list of clusters, select the cluster for which Workload Identity is disabled.\n3. Under the `DETAILS` pane, navigate down to the `Security` subsection.\n4. Click on the pencil icon named `Edit Workload Identity`, click on `Enable Workload Identity` in the pop-up window, and select a workload pool from the drop-down box. By default, it will be the namespace of the Cloud project containing the cluster, for example: `.svc.id.goog`.\n5. Click `SAVE CHANGES` and wait for the cluster to update.\n6. Once the cluster has updated, select each Node pool within the cluster Details page.\n7. For each Node pool, select `EDIT` within the Node pool details page.\n8. Within the `Edit node pool` pane, check the `Enable GKE Metadata Server` checkbox.\n9. Click `SAVE`.\n\nUsing Command Line\n```\ngcloud container clusters update --identity-namespace=.svc.id.goog\n```\nNote that existing Node pools are unaffected. New Node pools default to `--workload-metadata-from-node=GKE_METADATA_SERVER`.\n\nTo modify an existing Node pool to enable GKE Metadata Server: \n```\ngcloud container node-pools update --cluster= --workload-metadata-from-node=GKE_METADATA_SERVER\n```\nWorkloads may need modification in order for them to use Workload Identity as described within: [https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)." + scored: false + - id: "5.5" + text: Node Configuration and Maintenance + checks: + - id: 5.5.1 + text: Ensure Container-Optimized OS (cos_containerd) is used for GKE node images + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the Kubernetes cluster which does not use COS. + 3. Under the Node pools heading, select the Node Pool that requires alteration. + 4. Click `EDIT`. + 5. Under the Image Type heading click `CHANGE`. + 6. From the pop-up menu select `Container-optimised OS with containerd (cos_containerd) (default)` and click `CHANGE` + 7. Repeat for all non-compliant Node pools. + + Using Command Line: + + To set the node image to `cos` for an existing cluster's Node pool: + ``` + gcloud container clusters upgrade --image-type cos_containerd --zone --node-pool + ``` + scored: false + - id: 5.5.2 + text: Ensure Node Auto-Repair is enabled for GKE nodes + type: automated + remediation: |- + Using Google Cloud Console + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Select the Kubernetes cluster containing the node pool for which auto-repair is disabled. + 3. Select the Node pool by clicking on the name of the pool. + 4. Navigate to the Node pool details pane and click `EDIT`. + 5. Under the `Management` heading, check the `Enable auto-repair` box. + 6. Click `SAVE`. + 7. Repeat steps 2-6 for every cluster and node pool with auto-upgrade disabled. + + Using Command Line + + To enable node auto-repair for an existing cluster's Node pool: + ``` + gcloud container node-pools update --cluster --zone --enable-autorepair + ``` + scored: false + - id: 5.5.3 + text: Ensure Node Auto-Upgrade is enabled for GKE nodes + type: automated + remediation: |- + Using Google Cloud Console + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the Kubernetes cluster containing the node pool for which auto-upgrade disabled. + 3. Select the Node pool by clicking on the name of the pool. + 4. Navigate to the Node pool details pane and click `EDIT`. + 5. Under the Management heading, check the `Enable auto-repair` box. + 6. Click `SAVE`. + 7. Repeat steps 2-6 for every cluster and node pool with auto-upgrade disabled. + + Using Command Line + + To enable node auto-upgrade for an existing cluster's Node pool, run the following command: + ``` + gcloud container node-pools update --cluster --zone --enable-autoupgrade + ``` + scored: false + - id: 5.5.4 + text: When creating New Clusters - Automate GKE version management using Release Channels + type: automated + remediation: |- + Currently, cluster Release Channels are only configurable at cluster provisioning time. + + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Click `CREATE`, and choose `CONFIGURE` for the required cluster mode. + 3. Under the Control plane version heading, click the `Release Channels` button. + 4. Select the `Regular` or `Stable` channels from the Release Channel drop-down menu. + 5. Configure the rest of the cluster settings as required. + 6. Click `CREATE`. + + Using Command Line: + Create a new cluster by running the following command: + ``` + gcloud container clusters create --zone --release-channel + ``` + where `` is `stable` or `regular`, according to requirements. + scored: false + - id: 5.5.5 + text: Ensure Shielded GKE Nodes are Enabled + type: automated + remediation: |- + Note: From version 1.18, clusters will have Shielded GKE nodes enabled by default. + + Using Google Cloud Console: + + To update an existing cluster to use Shielded GKE nodes: + 1. Navigate to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster which for which `Shielded GKE Nodes` is to be enabled. + 3. With in the `Details` pane, under the `Security` heading, click on the pencil icon named `Edit Shields GKE nodes`. + 4. Check the box named `Enable Shield GKE nodes`. + 5. Click `SAVE CHANGES`. + + Using Command Line: + + To migrate an existing cluster, the flag `--enable-shielded-nodes` needs to be specified in the cluster update command: + ``` + gcloud container clusters update --zone --enable-shielded-nodes + ``` + scored: false + - id: 5.5.6 + text: Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled + type: automated + remediation: "Once a Node pool is provisioned, it cannot be updated to enable Integrity Monitoring. New Node pools must be created within the cluster with Integrity Monitoring enabled.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. From the list of clusters, click on the cluster requiring the update and click `ADD NODE POOL`.\n3. Ensure that the 'Integrity monitoring' checkbox is checked under the 'Shielded options' Heading.\n4. Click `SAVE`.\n\nWorkloads from existing non-conforming Node pools will need to be migrated to the newly created Node pool, then delete non-conforming Node pools to complete the remediation\n\nUsing Command Line\n\nTo create a Node pool within the cluster with Integrity Monitoring enabled, run the following command: \n```\ngcloud container node-pools create --cluster --zone --shielded-integrity-monitoring\n```\nWorkloads from existing non-conforming Node pools will need to be migrated to the newly created Node pool, then delete non-conforming Node pools to complete the remediation" + scored: false + - id: 5.5.7 + text: Ensure Secure Boot for Shielded GKE Nodes is Enabled + type: automated + remediation: "Once a Node pool is provisioned, it cannot be updated to enable Secure Boot. New Node pools must be created within the cluster with Secure Boot enabled.\n\nUsing Google Cloud Console:\n\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. From the list of clusters, click on the cluster requiring the update and click `ADD NODE POOL`.\n3. Ensure that the `Secure boot` checkbox is checked under the `Shielded options` Heading.\n4. Click `SAVE`.\n\nWorkloads will need to be migrated from existing non-conforming Node pools to the newly created Node pool, then delete the non-conforming pools.\n\nUsing Command Line:\n\nTo create a Node pool within the cluster with Secure Boot enabled, run the following command: \n```\ngcloud container node-pools create --cluster --zone --shielded-secure-boot\n```\n\nWorkloads will need to be migrated from existing non-conforming Node pools to the newly created Node pool, then delete the non-conforming pools." + scored: false + - id: "5.6" + text: Cluster Networking + checks: + - id: 5.6.1 + text: Enable VPC Flow Logs and Intranode Visibility + type: automated + remediation: |- + Enable Intranode Visibility: + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select Kubernetes clusters for which intranode visibility is disabled. + 3. Within the `Details` pane, under the `Network` section, click on the pencil icon named `Edit intranode visibility`. + 4. Check the box next to `Enable Intranode visibility`. + 5. Click `SAVE CHANGES`. + + Using Command Line: + + To enable intranode visibility on an existing cluster, run the following command: + ``` + gcloud container clusters update --enable-intra-node-visibility + ``` + + Enable VPC Flow Logs: + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select Kubernetes clusters for which VPC Flow Logs are disabled. + 3. Select `Nodes` tab. + 4. Select Node Pool without VPC Flow Logs enabled. + 5. Select an Instance Group within the node pool. + 6. Select an `Instance Group Member`. + 7. Select the `Subnetwork` under Network Interfaces. + 8. Click on `EDIT`. + 9. Set Flow logs to `On`. + 10. Click `SAVE`. + + Using Command Line: + 1. Find the subnetwork name associated with the cluster. + ``` + gcloud container clusters describe --region --format json | jq '.subnetwork' + ``` + 2. Update the subnetwork to enable VPC Flow Logs. + ``` + gcloud compute networks subnets update --enable-flow-logs + ``` + scored: false + - id: 5.6.2 + text: Ensure use of VPC-native clusters + type: automated + remediation: "Alias IPs cannot be enabled on an existing cluster. To create a new cluster using Alias IPs, follow the instructions below. \n\nUsing Google Cloud Console:\n\nIf using Standard configuration mode:\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. Click `CREATE CLUSTER`, and select Standard configuration mode.\n3. Configure your cluster as desired , then, click `Networking` under `CLUSTER` in the navigation pane.\n4. In the 'VPC-native' section, leave 'Enable VPC-native (using alias IP)' selected\n5. Click CREATE.\n\nIf using Autopilot configuration mode:\n\nNote that this is VPC-native only and cannot be disable:\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list).\n2. Click CREATE CLUSTER, and select Autopilot configuration mode.\n3. Configure your cluster as required \n4. Click `CREATE`.\n\nUsing Command Line\n\nTo enable Alias IP on a new cluster, run the following command:\n```\ngcloud container clusters create --zone --enable-ip-alias\n```\n\nIf using Autopilot configuration mode:\n```\ngcloud container clusters create-auto --zone \n```" + scored: false + - id: 5.6.3 + text: Ensure Control Plane Authorized Networks is Enabled + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Select Kubernetes clusters for which Control Plane Authorized Networks is disabled + 3. Within the Details pane, under the Networking heading, click on the pencil icon named Edit control plane authorised networks. + 4. Check the box next to Enable control plane authorised networks. + 5. Click SAVE CHANGES. + + Using Command Line: + + To enable Control Plane Authorized Networks for an existing cluster, run the following command: + ``` + gcloud container clusters update --zone --enable-master-authorized-networks + ``` + + Along with this, you can list authorized networks using the `--master-authorized-networks` flag which contains a list of up to 20 external networks that are allowed to connect to your cluster's control plane through HTTPS. You provide these networks as a comma-separated list of addresses in CIDR notation (such as `90.90.100.0/24`). + scored: false + - id: 5.6.4 + text: Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled + type: automated + remediation: |- + Once a cluster is created without enabling Private Endpoint only, it cannot be remediated. Rather, the cluster must be recreated. + + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Click CREATE CLUSTER, and choose CONFIGURE for the Standard mode cluster. + 3. Configure the cluster as required then click Networking under CLUSTER in the navigation pane. + 4. Under IPv4 network access, click the Private cluster radio button. + 5. Uncheck the Access control plane using its external IP address checkbox. + 6. In the Control plane IP range textbox, provide an IP range for the control plane. + 7. Configure the other settings as required, and click CREATE. + + Using Command Line: + + Create a cluster with a Private Endpoint enabled and Public Access disabled by including the `--enable-private-endpoint` flag within the cluster create command: + ``` + gcloud container clusters create --enable-private-endpoint + ``` + Setting this flag also requires the setting of `--enable-private-nodes`, `--enable-ip-alias` and `--master-ipv4-cidr=`. + scored: false + - id: 5.6.5 + text: Ensure clusters are created with Private Nodes + type: automated + remediation: |- + Once a cluster is created without enabling Private Nodes, it cannot be remediated. Rather the cluster must be recreated. + + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Click CREATE CLUSTER. + 3. Configure the cluster as required then click Networking under CLUSTER in the navigation pane. + 4. Under IPv4 network access, click the Private cluster radio button. + 5. Configure the other settings as required, and click CREATE. + + Using Command Line: + + To create a cluster with Private Nodes enabled, include the `--enable-private-nodes` flag within the cluster create command: + ``` + gcloud container clusters create --enable-private-nodes + ``` + Setting this flag also requires the setting of `--enable-ip-alias` and `--master-ipv4-cidr=`. + scored: false + - id: 5.6.6 + text: Consider firewalling GKE worker nodes + type: manual + remediation: |- + Using Google Cloud Console: + + 1. Go to Firewall Rules by visiting: [https://console.cloud.google.com/networking/firewalls/list](https://console.cloud.google.com/networking/firewalls/list) + 2. Click CREATE FIREWALL RULE. + 3. Configure the firewall rule as required. Ensure the firewall targets the nodes correctly, either selecting the nodes using tags (under Targets, select Specified target tags, and set Target tags to ``), or using the Service account associated with node (under Targets, select Specified service account, set Service account scope as appropriate, and Target service account to ``). + 4. Click `CREATE`. + + Using Command Line: + + Use the following command to generate firewall rules, setting the variables as appropriate: + ``` + gcloud compute firewall-rules create --network --priority --direction --action --target-tags --target-service-accounts --source-ranges --source-tags --source-service-accounts --destination-ranges --rules + ``` + scored: false + - id: 5.6.7 + text: Ensure use of Google-managed SSL Certificates + type: automated + remediation: |- + If services of `type:LoadBalancer` are discovered, consider replacing the Service with an Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the instructions as listed at: [https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs). + scored: false + - id: "5.7" + text: Logging + checks: + - id: 5.7.1 + text: Ensure Logging and Cloud Monitoring is Enabled + type: automated + remediation: |- + Using Google Cloud Console: + To enable Logging: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster for which Logging is disabled. + 3. Under the details pane, within the Features section, click on the pencil icon named `Edit logging`. + 4. Check the box next to `Enable Logging`. + 5. In the drop-down Components box, select the components to be logged. + 6. Click `SAVE CHANGES`, and wait for the cluster to update. + + To enable Cloud Monitoring: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select the cluster for which Logging is disabled. + 3. Under the details pane, within the Features section, click on the pencil icon named `Edit Cloud Monitoring`. + 4. Check the box next to `Enable Cloud Monitoring`. + 5. In the drop-down Components box, select the components to be logged. + 6. Click `SAVE CHANGES`, and wait for the cluster to update. + + Using Command Line: + To enable Logging for an existing cluster, run the following command: + + gcloud container clusters update --zone --logging= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--logging for a list of available components for logging. + + To enable Cloud Monitoring for an existing cluster, run the following command: + + gcloud container clusters update --zone --monitoring= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--monitoring for a list of available components for Cloud Monitoring. + scored: false + - id: 5.7.2 + text: Enable Linux auditd logging + type: manual + remediation: |- + Using Command Line: + + Download the example manifests: + ``` + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml > cos-auditd-logging.yaml + ``` + Edit the example manifests if needed. Then, deploy them: + ``` + kubectl apply -f cos-auditd-logging.yaml + ``` + Verify that the logging Pods have started. If a different Namespace was defined in the manifests, replace `cos-auditd` with the name of the namespace being used: + ``` + kubectl get pods --namespace=cos-auditd + ``` + scored: false + - id: "5.8" + text: Authentication and Authorization + checks: + - id: 5.8.1 + text: Ensure authentication using Client Certificates is Disabled + type: automated + remediation: "Currently, there is no way to remove a client certificate from an existing cluster. Thus a new cluster must be created.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list)\n2. Click CREATE CLUSTER\n3. Configure as required and the click on 'Availability, networking, security, and additional features' section \n4. Ensure that the 'Issue a client certificate' checkbox is not ticked\n5. Click CREATE.\n\nUsing Command Line\n\nCreate a new cluster without a Client Certificate:\n```\ngcloud container clusters create [CLUSTER_NAME] \\ \n --no-issue-client-certificate\n```" + scored: false + - id: 5.8.2 + text: Manage Kubernetes RBAC users with Google Groups for GKE + type: manual + remediation: |- + Follow the G Suite Groups instructions at: [https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#google-groups-for-gke](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#google-groups-for-gke). + + Then, create a cluster with: + ``` + gcloud container clusters create --security-group + ``` + Finally create `Roles`, `ClusterRoles`, `RoleBindings`, and `ClusterRoleBindings` that reference the G Suite Groups. + scored: false + - id: 5.8.3 + text: Ensure Legacy Authorization (ABAC) is Disabled + type: automated + remediation: |- + Using Google Cloud Console: + + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 2. Select Kubernetes clusters for which Legacy Authorization is enabled. + 3. Click EDIT. + 4. Set 'Legacy Authorization' to 'Disabled'. + 5. Click SAVE. + + Using Command Line: + + To disable Legacy Authorization for an existing cluster, run the following command: + ``` + gcloud container clusters update --zone --no-enable-legacy-authorization + ``` + scored: false + - id: "5.9" + text: Storage + checks: + - id: 5.9.1 + text: Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) + type: manual + remediation: |- + This cannot be remediated by updating an existing cluster. The node pool must either be recreated or a new cluster created. + + Using Google Cloud Console: + + This is not possible using Google Cloud Console. + + Using Command Line: + + Follow the instructions detailed at: [https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). + scored: false + - id: 5.9.2 + text: Enable Customer-Managed Encryption Keys (CMEK) for Boot Disks + type: automated + remediation: |- + This cannot be remediated by updating an existing cluster. The node pool must either be recreated or a new cluster created. + + Using Google Cloud Console: + + To create a new node pool: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Select Kubernetes clusters for which node boot disk CMEK is disabled. + 3. Click `ADD NODE POOL`. + 4. In the Nodes section, under machine configuration, ensure Boot disk type is `Standard persistent disk` or `SSD persistent disk`. + 5. Select `Enable customer-managed encryption for Boot Disk` and select the Cloud KMS encryption key to be used. + 6. Click `CREATE`. + + To create a new cluster: + 1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) + 2. Click `CREATE` and click `CONFIGURE for the required cluster mode. + 3. Under `NODE POOLS, expand the default-pool list and click `Nodes. + 4. In the Configure node settings pane, select `Standard persistent disk` or `SSD Persistent Disk` as the Boot disk type. + 5. Select `Enable customer-managed encryption for Boot Disk` check box and choose the Cloud KMS encryption key to be used. + 6. Configure the rest of the cluster settings as required. + 7. Click `CREATE`. + + Using Command Line: + + Create a new node pool using customer-managed encryption keys for the node boot disk, of `` either `pd-standard` or `pd-ssd`: + ``` + gcloud container node-pools create --disk-type --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + ``` + + Create a cluster using customer-managed encryption keys for the node boot disk, of `` either `pd-standard` or `pd-ssd`: + ``` + gcloud container clusters create --disk-type --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + ``` + scored: false + - id: "5.10" + text: Other Cluster Configurations + checks: + - id: 5.10.1 + text: Ensure Kubernetes Web UI is Disabled + type: automated + remediation: |- + Using Google Cloud Console: + + Currently not possible, due to the add-on having been removed. Must use the command line. + + Using Command Line: + + To disable the Kubernetes Dashboard on an existing cluster, run the following command: + ``` + gcloud container clusters update --zone --update-addons=KubernetesDashboard=DISABLED + ``` + scored: false + - id: 5.10.2 + text: Ensure that Alpha clusters are not used for production workloads + type: automated + remediation: "Alpha features cannot be disabled. To remediate, a new cluster must be created.\n\nUsing Google Cloud Console\n\n1. Go to Kubernetes Engine by visiting [https://console.cloud.google.com/kubernetes/](https://console.cloud.google.com/kubernetes/)\n2. Click CREATE CLUSTER, and choose \"SWITCH TO STANDARD CLUSTER\" in the upper right corner of the screen.\n3. Under Features in the the CLUSTER section, \"Enable Kubernetes alpha features in this cluster\" will not be available by default and to use Kubernetes alpha features in this cluster, first disable release channels. \nNote: It will only be available if the cluster is created with a Static version for the Control plane version, along with both Automatically upgrade nodes to the next available version and Enable auto-repair being checked under the Node pool details for each node.\n4. Configure the other settings as required and click CREATE.\n\nUsing Command Line: \n\nUpon creating a new cluster \n```\ngcloud container clusters create [CLUSTER_NAME] \\\n --zone [COMPUTE_ZONE]\n```\nDo not use the --enable-kubernetes-alpha argument." + scored: false + - id: 5.10.3 + text: Consider GKE Sandbox for running untrusted workloads + type: automated + remediation: "Once a node pool is created, GKE Sandbox cannot be enabled, rather a new node pool is required. The default node pool (the first node pool in your cluster, created when the cluster is created) cannot use GKE Sandbox.\n\nUsing Google Cloud Console:\n\n1. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/](https://console.cloud.google.com/kubernetes/).\n2. Select a cluster and click `ADD NODE POOL`.\n3. Configure the Node pool with following settings: \n - For the node version, select `v1.12.6-gke.8` or higher.\n - For the node image, select `Container-Optimized OS with Containerd (cos_containerd) (default)`.\n - Under `Security`, select `Enable sandbox with gVisor`.\n4. Configure other Node pool settings as required.\n5. Click `SAVE`.\n\nUsing Command Line:\n\nTo enable GKE Sandbox on an existing cluster, a new Node pool must be created, which can be done using:\n```\n gcloud container node-pools create --zone --cluster --image-type=cos_containerd --sandbox=\"type=gvisor\"\n```" + scored: false + - id: 5.10.4 + text: Ensure use of Binary Authorization + type: automated + remediation: |- + Using Google Cloud Console + + 1. Go to Binary Authorization by visiting: [https://console.cloud.google.com/security/binary-authorization](https://console.cloud.google.com/security/binary-authorization). + 2. Enable the Binary Authorization API (if disabled). + 3. Create an appropriate policy for use with the cluster. See [https://cloud.google.com/binary-authorization/docs/policy-yaml-reference](https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + 4. Go to Kubernetes Engine by visiting: [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list). + 5. Select the cluster for which Binary Authorization is disabled. + 6. Under the details pane, within the Security section, click on the pencil icon named `Edit Binary Authorization`. + 7. Check the box next to `Enable Binary Authorization`. + 8. Choose `Enforce` policy and provide a directory for the policy to be used. + 9. Click `SAVE CHANGES`. + + Using Command Line: + + Update the cluster to enable Binary Authorization: + ``` + gcloud container cluster update --zone --binauthz-evaluation-mode= + + Example: + gcloud container clusters update $CLUSTER_NAME --zone $COMPUTE_ZONE --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE + + ``` + + See: [https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode](https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode) for more details around the evaluation modes available. + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: [https://cloud.google.com/binary-authorization/docs/policy-yaml-reference](https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + + Import the policy file into Binary Authorization: + ``` + gcloud container binauthz policy import + ``` + scored: false + - id: 5.10.5 + text: Enable Security Posture + type: manual + remediation: |- + Enable security posture via the UI, gCloud or API. + https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration + scored: false diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/master.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/master.yaml new file mode 100755 index 00000000..0f7b433c --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/master.yaml @@ -0,0 +1,5 @@ +controls: "" +version: gke-1.6.0 +id: 1 +text: Control Plane Components +type: master diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/node.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/node.yaml new file mode 100755 index 00000000..a5a52758 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/node.yaml @@ -0,0 +1,380 @@ +controls: "" +version: gke-1.6.0 +id: 3 +text: Worker Nodes +type: node +groups: + - id: "3.1" + text: Worker Node Configuration Files + checks: + - id: 3.1.1 + text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + type: automated + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the below command (based on the file location on your system) on each worker + node. For example, + ``` + chmod 644 + ``` + scored: false + - id: 3.1.2 + text: Ensure that the proxy kubeconfig file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + type: automated + tests: + test_items: + - flag: root:root + remediation: |- + Run the below command (based on the file location on your system) on each worker node. For example, + + ``` + chown root:root + ``` + scored: false + - id: 3.1.3 + text: Ensure that the kubelet configuration file has permissions set to 600 + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: automated + tests: + test_items: + - flag: permissions + compare: + op: bitmask + value: "644" + remediation: |- + Run the following command (using the kubelet config file location): + + ``` + chmod 600 + ``` + scored: false + - id: 3.1.4 + text: Ensure that the kubelet configuration file ownership is set to root:root + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: automated + tests: + test_items: + - flag: root:root + remediation: |- + Run the following command (using the config file location identified in the Audit step): + + ``` + chown root:root + ``` + scored: false + - id: "3.2" + text: Kubelet + checks: + - id: 3.2.1 + text: Ensure that the Anonymous Auth is Not Enabled + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --anonymous-auth + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: "false" + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nDisable Anonymous Authentication by setting the following parameter:\n\n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n```\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--anonymous-auth=false\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.2 + text: Ensure that the --authorization-mode argument is not set to AlwaysAllow + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nEnable Webhook Authentication by setting the following parameter:\n\n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n```\n\nNext, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n```\n\"authorization\": { \"mode\": \"Webhook }\n```\n\nFiner detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.3 + text: Ensure that a Client CA File is Configured + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: "**Remediation Method 1:**\n\nIf configuring via the Kubelet config file, you first need to locate the file.\n\nTo do this, SSH to each node and execute the following command to find the kubelet process:\n\n```\nps -ef | grep kubelet\n```\n\nThe output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n```\nsudo less /path/to/kubelet-config.json\n```\n\nConfigure the client certificate authority file by setting the following parameter appropriately:\n\n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n```\n\n**Remediation Method 2:**\n\nIf using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\nFor systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\n--client-ca-file=\n```\n\n**For Both Remediation Steps:**\n\nBased on your system, restart the `kubelet` service and check the service status. \n\nThe following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n```" + scored: true + - id: 3.2.4 + text: Ensure that the --read-only-port is disabled + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --read-only-port + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: "0" + remediation: |- + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0 + + ``` + "readOnlyPort": 0 + ``` + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --read-only-port=0 + ``` + + For each remediation: + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: false + - id: 3.2.5 + text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: "0" + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet-config.yaml` and set the below parameter to a non-zero value in the format of #h#m#s + + ``` + "streamingConnectionIdleTimeout": "4h0m0s" + ``` + + You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --streaming-connection-idle-timeout=4h0m0s + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"streamingConnectionIdleTimeout":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: true + - id: 3.2.6 + text: Ensure that the --make-iptables-util-chains argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: "true" + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + + ``` + "makeIPTablesUtilChains": true + ``` + + Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --make-iptables-util-chains:true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediations:** + Based on your system, restart the `kubelet` service and check status + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.7 + text: Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture + type: "automated" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: |- + If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level. + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + Based on your system, restart the `kubelet` service. For example: + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + ``` + scored: true + - id: 3.2.8 + text: Ensure that the --rotate-certificates argument is not present or is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.yaml file `/etc/kubernetes/kubelet/kubelet-config.yaml` and set the below parameter to true + + ``` + "RotateCertificate":true + ``` + + Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --RotateCertificate=true + ``` + scored: true + - id: 3.2.9 + text: Ensure that the RotateKubeletServerCertificate argument is set to true + audit: /bin/ps -fC $kubeletbin + audit_config: /bin/cat $kubeletconf + type: automated + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet-config.yaml` and set the below parameter to true + + ``` + "featureGates": { + "RotateKubeletServerCertificate":true + }, + ``` + + Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file. + + **Remediation Method 2:** + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + ``` + --rotate-kubelet-server-certificate=true + ``` + + **Remediation Method 3:** + + If using the api configz endpoint consider searching for the status of `"RotateKubeletServerCertificate":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes + ``` + kubectl proxy --port=8001 & + + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from "kubectl get nodes") + + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + ``` + + **For all three remediation methods:** + Restart the `kubelet` service and check status. The example below is for when using systemctl to manage services: + + ``` + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + ``` + scored: true diff --git a/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/policies.yaml b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/policies.yaml new file mode 100755 index 00000000..6487b5d5 --- /dev/null +++ b/cmd/linter/kubebench/kubebench-rules/gke-1.6.0/policies.yaml @@ -0,0 +1,143 @@ +controls: "" +version: gke-1.6.0 +id: 4 +text: Policies +type: policies +groups: + - id: "4.1" + text: RBAC and Service Accounts + checks: + - id: 4.1.1 + text: Ensure that the cluster-admin role is only used where required + type: automated + remediation: "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. \n\nWhere possible, first bind users to a lower-privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n```\nkubectl delete clusterrolebinding [name]\n```" + scored: false + - id: 4.1.2 + text: Minimize access to secrets + type: automated + remediation: Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster. + scored: false + - id: 4.1.3 + text: Minimize wildcard use in Roles and ClusterRoles + type: automated + remediation: Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + scored: false + - id: 4.1.4 + text: Ensure that default service accounts are not actively used + type: automated + remediation: "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\nModify the configuration of each default service account to include this value \n\n```\nautomountServiceAccountToken: false\n```" + scored: false + - id: 4.1.5 + text: Ensure that Service Account Tokens are only mounted where necessary + type: automated + remediation: Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + scored: true + - id: 4.1.6 + text: Avoid use of system:masters group + type: automated + remediation: Remove the `system:masters` group from all users in the cluster. + scored: false + - id: 4.1.7 + text: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster + type: manual + remediation: Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + - id: 4.1.8 + text: Avoid bindings to system:anonymous + type: automated + remediation: "Identify all `clusterrolebindings` and `rolebindings` to the user system:anonymous. Check if they are used and review the permissions associated with the binding using the commands in the Audit section above or refer to GKE [documentation](https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). \n\nStrongly consider replacing unsafe bindings with an authenticated, user-defined group. Where possible, bind to non-default, user-defined groups with least-privilege roles.\n\nIf there are any unsafe bindings to the user `system:anonymous`, proceed to delete them after consideration for cluster operations with only necessary, safer bindings. \n\n```\nkubectl delete clusterrolebinding\n[CLUSTER_ROLE_BINDING_NAME]\n```\n\n```\nkubectl delete rolebinding\n[ROLE_BINDING_NAME]\n--namespace\n[ROLE_BINDING_NAMESPACE]\n```" + scored: false + - id: 4.1.9 + text: Avoid non-default bindings to system:unauthenticated + type: automated + remediation: "Identify all non-default `clusterrolebindings` and `rolebindings` to the group `system:unauthenticated`. Check if they are used and review the permissions associated with the binding using the commands in the Audit section above or refer to GKE [documentation](https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). \n\nStrongly consider replacing non-default, unsafe bindings with an authenticated, user-defined group. Where possible, bind to non-default, user-defined groups with least-privilege roles.\n\nIf there are any non-default, unsafe bindings to the group `system:unauthenticated`, proceed to delete them after consideration for cluster operations with only necessary, safer bindings. \n\n```\nkubectl delete clusterrolebinding\n[CLUSTER_ROLE_BINDING_NAME]\n```\n\n```\nkubectl delete rolebinding\n[ROLE_BINDING_NAME]\n--\nnamespace\n[ROLE_BINDING_NAMESPACE]\n```" + scored: false + - id: 4.1.10 + text: Avoid non-default bindings to system:authenticated + type: automated + remediation: "Identify all non-default `clusterrolebindings` and `rolebindings` to the group `system:authenticated`. Check if they are used and review the permissions associated with the binding using the commands in the Audit section above or refer to GKE documentation. \n\nStrongly consider replacing non-default, unsafe bindings with an authenticated, user-defined group. Where possible, bind to non-default, user-defined groups with least-privilege roles.\n\nIf there are any non-default, unsafe bindings to the group `system:authenticated`, proceed to delete them after consideration for cluster operations with only necessary, safer bindings. \n\n```\nkubectl delete clusterrolebinding\n[CLUSTER_ROLE_BINDING_NAME]\n```\n\n```\nkubectl delete rolebinding\n[ROLE_BINDING_NAME]\n--namespace\n[ROLE_BINDING_NAMESPACE]\n```" + scored: false + - id: "4.2" + text: Pod Security Standards + checks: + - id: 4.2.1 + text: Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. + type: manual + remediation: |- + Ensure that Pod Security Admission is in place for every namespace which contains user workloads. + + Run the following command to enforce the Baseline profile in a namespace: + + kubectl label namespace pod-security.kubernetes.io/enforce=baseline + scored: false + - id: "4.3" + text: Network Policies and CNI + checks: + - id: 4.3.1 + text: Ensure that the CNI in use supports Network Policies + type: manual + remediation: To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin will be updated. See recommendation 5.6.7. + scored: false + - id: 4.3.2 + text: Ensure that all Namespaces have Network Policies defined + type: automated + remediation: |- + Follow the documentation and create `NetworkPolicy` objects as needed. + See: [https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy) for more information. + scored: false + - id: "4.4" + text: Secrets Management + checks: + - id: 4.4.1 + text: Prefer using secrets as files over secrets as environment variables + type: automated + remediation: If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. + scored: false + - id: 4.4.2 + text: Consider external secret storage + type: manual + remediation: Refer to the secrets management options offered by the cloud service provider or a third-party secrets management solution. + scored: false + - id: "4.5" + text: Extensible Admission Control + checks: + - id: 4.5.1 + text: Configure Image Provenance using ImagePolicyWebhook admission controller + type: manual + remediation: |- + Follow the Kubernetes documentation and setup image provenance. + + Also see recommendation 5.10.4. + scored: false + - id: "4.6" + text: General Policies + checks: + - id: 4.6.1 + text: Create administrative boundaries between resources using namespaces + type: manual + remediation: Follow the documentation and create namespaces for objects in your deployment as you need them. + scored: false + - id: 4.6.2 + text: Ensure that the seccomp profile is set to RuntimeDefault in the pod definitions + type: automated + remediation: |- + Use security context to enable the `RuntimeDefault` seccomp profile in your pod definitions. An example is as below: + + ``` + { + "namespace": "kube-system", + "name": "metrics-server-v0.7.0-dbcc8ddf6-gz7d4", + "seccompProfile": "RuntimeDefault" + } + ``` + scored: false + - id: 4.6.3 + text: Apply Security Context to Pods and Containers + type: manual + remediation: Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Google Container-Optimized OS Benchmark. + scored: false + - id: 4.6.4 + text: The default namespace should not be used + type: automated + remediation: Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. + scored: false