diff --git a/charts/k8s-job/.helmignore b/charts/k8s-job/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/charts/k8s-job/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/k8s-job/Chart.yaml b/charts/k8s-job/Chart.yaml new file mode 100644 index 00000000..b6ec5ff0 --- /dev/null +++ b/charts/k8s-job/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +name: k8s-job +description: A Helm chart to package a job for Kubernetes +# This will be updated with the release tag in the CI/CD pipeline before publishing. This has to be a valid semver for +# the linter to accept. +version: 0.0.1-replace +home: https://github.com/gruntwork-io/helm-kubernetes-services +maintainers: + - name: Gruntwork + email: info@gruntwork.io + url: https://gruntwork.io diff --git a/charts/k8s-job/README.md b/charts/k8s-job/README.md new file mode 100644 index 00000000..37cb3c2d --- /dev/null +++ b/charts/k8s-job/README.md @@ -0,0 +1,359 @@ +# Kubernetes Job Helm Chart + +This Helm Chart can be used to deploy your job container under a +[Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource onto your Kubernetes +cluster. You can use this Helm Chart to run and deploy a one time job or periodic task such as a security scanner application or data science pipeline job. + + +## How to use this chart? + +* See the [root README](/README.adoc) for general instructions on using Gruntwork Helm Charts. +* See the [examples](/examples) folder for example usage. +* See the provided [values.yaml](./values.yaml) file for the required and optional configuration values that you can set + on this chart. + +back to [root README](/README.adoc#core-concepts) + +## What resources does this Helm Chart deploy? + +The following resources will be deployed with this Helm Chart, depending on which configuration values you use: + +- `Job`: A standalone `Job` running the image specified in the + `containerImage` input value. + +back to [root README](/README.adoc#core-concepts) + +## How do I deploy additional resources not managed by the chart? + +You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` +key. You provide each resource manifest directly as a value under `customResources.resources` and set +`customResources.enabled` to `true`. For examples of custom resources, take a look at the examples in +[test/fixtures/custom_resources_values.yaml](../../test/fixtures/custom_resources_values.yaml) and +[test/fixtures/multiple_custom_resources_values.yaml](../../test/fixtures/multiple_custom_resources_values.yaml). + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I set and share configurations with the application? + +While you can bake most application configuration values into the application container, you might need to inject +dynamic configuration variables into the container. These are typically values that change depending on the environment, +such as the MySQL database endpoint. Additionally, you might also want a way to securely share secrets with the +container such that they are not hard coded in plain text in the container or in the Helm Chart values yaml file. To +support these use cases, this Helm Chart provides three ways to share configuration values with the application +container: + +- [Directly setting environment variables](#directly-setting-environment-variables) +- [Using ConfigMaps](#using-configmaps) +- [Using Secrets](#using-secrets) + +### Directly setting environment variables + +The simplest way to set a configuration value for the container is to set an environment variable for the container +runtime. These variables are set by Kubernetes before the container application is booted, which can then be looked up +using the standard OS lookup functions for environment variables. + +You can use the `envVars` input value to set an environment variable at deploy time. For example, the following entry in +a `values.yaml` file will set the `DB_HOST` environment variable to `mysql.default.svc.cluster.local` and the `DB_PORT` +environment variable to `3306`: + +```yaml +envVars: + DB_HOST: "mysql.default.svc.cluster.local" + DB_PORT: 3306 +``` + +One thing to be aware of when using environment variables is that they are set at start time of the container. This +means that updating the environment variables require restarting the containers so that they propagate. + +### Using ConfigMaps + +While environment variables are an easy way to inject configuration values, what if you want to share the configuration +across multiple deployments? If you wish to use the direct environment variables approach, you would have no choice but +to copy paste the values across each deployment. When this value needs to change, you are now faced with going through +each deployment and updating the reference. + +For this situation, `ConfigMaps` would be a better option. `ConfigMaps` help decouple configuration values from the +`Deployment` and `Pod` config, allowing you to share the values across the deployments. `ConfigMaps` are dedicated +resources in Kubernetes that store configuration values as key value pairs. + +For example, suppose you had a `ConfigMap` to store the database information. You might store the information as two key +value pairs: one for the host (`dbhost`) and one for the port (`dbport`). You can create a `ConfigMap` directly using +`kubectl`, or by using a resource file. + +To directly create the `ConfigMap`: + +``` +kubectl create configmap my-config --from-literal=dbhost=mysql.default.svc.cluster.local --from-literal=dbport=3306 +``` + +Alternatively, you can manage the `ConfigMap` as code using a kubernetes resource config: + +```yaml +# my-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-config +data: + dbhost: mysql.default.svc.cluster.local + dbport: 3306 +``` + +You can then apply this resource file using `kubectl`: + +``` +kubectl apply -f my-config.yaml +``` + +`kubectl` supports multiple ways to seed the `ConfigMap`. You can read all the different ways to create a `ConfigMap` in +[the official +documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap). + +Once the `ConfigMap` is created, you can access the `ConfigMap` within the `Pod` by configuring the access during +deployment. This Helm Chart provides the `configMaps` input value to configure what `ConfigMaps` should be shared with +the application container. With a single-standing Job there is one way to access a `ConfigMap`: + +- [Accessing the `ConfigMap` as Environment Variables](#accessing-the-configmap-as-environment-variables) + +**NOTE**: It is generally not recommended to use `ConfigMaps` to store sensitive data. For those use cases, use +`Secrets` or an external secret store. + +##### Accessing the ConfigMap as Environment Variables + +You can set the values of the `ConfigMap` as environment variables in the application container. To do so, you set the +`as` attribute of the `configMaps` input value to `environment`. For example, to share the `my-config` `ConfigMap` above +using the same environment variables as the example in [Directly setting environment +variables](#directly-settings-environment-variables), you would set the `configMaps` as follows: + +```yaml +configMaps: + my-config: + as: environment + items: + dbhost: + envVarName: DB_HOST + dbport: + envVarName: DB_PORT +``` + +In this configuration for the Helm Chart, we specify that we want to share the `my-config` `ConfigMap` as environment +variables with the main application container. Additionally, we want to map the `dbhost` config value to the `DB_HOST` +environment variable, and similarly map the `dbport` config value to the `DB_PORT` environment variable. + +Note that like directly setting environment variables, these are set at container start time, and thus the containers +need to be restarted when the `ConfigMap` is updated for the new values to be propagated. You can use files instead if +you wish the `ConfigMap` changes to propagate immediately. + + + +### Using Secrets + +In general, it is discouraged to store sensitive information such as passwords in `ConfigMaps`. Instead, Kubernetes +provides `Secrets` as an alternative resource to store sensitive data. Similar to `ConfigMaps`, `Secrets` are key value +pairs that store configuration values that can be managed independently of the `Pod` and containers. However, unlike +`ConfigMaps`, `Secrets` have the following properties: + +- A secret is only sent to a node if a pod on that node requires it. They are automatically garbage collected when there + are no more `Pods` referencing it on the node. +- A secret is stored in `tmpfs` on the node, so that it is only available in memory. +- Starting with Kubernetes 1.7, they can be encrypted at rest in `etcd` (note: this feature was in alpha state until + Kubernetes 1.13). + +You can read more about the protections and risks of using `Secrets` in [the official +documentation](https://kubernetes.io/docs/concepts/configuration/secret/#security-properties). + +Creating a `Secret` is very similar to creating a `ConfigMap`. For example, suppose you had a `Secret` to store the +database password. Like `ConfigMaps`, you can create a `Secret` directly using `kubectl`: + +``` +kubectl create secret generic my-secret --from-literal=password=1f2d1e2e67df +``` + +The `generic` keyword indicates the `Secret` type. Almost all use cases for your application should use this type. Other +types include `docker-registry` for specifying credentials for accessing a private docker registry, and `tls` for +specifying TLS certificates to access the Kubernetes API. + +You can also manage the `Secret` as code, although you may want to avoid this for `Secrets` to avoid leaking them in +unexpected locations (e.g source control). Unlike `ConfigMaps`, `Secrets` require values to be stored as base64 encoded +values when using resource files. So the configuration for the above example will be: + +```yaml +# my-secret.yaml +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: my-secret +data: + password: MWYyZDFlMmU2N2Rm +``` + +Note that `MWYyZDFlMmU2N2Rm` is the base 64 encoded version of `1f2d1e2e67df`. You can then apply this resource config +using `kubectl`: + +``` +kubectl apply -f my-secret.yaml +``` + +Similar to `ConfigMaps`, this Helm Chart supports two ways to inject `Secrets` into the application container: as +environment variables, or as files. The syntax to share the values is very similar to the `configMaps` input value, only +you use the `secrets` input value. The properties of each approach is very similar to `ConfigMaps`. Refer to [the +previous section](#using-configmaps) for more details on each approach. Here, we show you examples of the input values +to use for each approach. + +**Mounting secrets as environment variables**: In this example, we mount the `my-secret` `Secret` created above as the +environment variable `DB_PASSWORD`. + +```yaml +secrets: + my-secret: + as: environment + items: + password: + envVarName: DB_PASSWORD +``` + +**Mounting secrets as files**: In this example, we mount the `my-secret` `Secret` as the file `/etc/db/password`. + +```yaml +secrets: + my-secret: + as: volume + mountPath: /etc/db + items: + password: + filePath: password +``` + +**NOTE**: The volumes are different between `secrets` and `configMaps`. This means that if you use the same `mountPath` +for different secrets and config maps, you can end up with only one. It is undefined which `Secret` or `ConfigMap` ends +up getting mounted. To be safe, use a different `mountPath` for each one. + +**NOTE**: If you want mount the volumes created with `secrets` or `configMaps` on your init or sidecar containers, you will +have to append `-volume` to the volume name in . In the example above, the resulting volume will be `my-secret-volume`. + +```yaml +sideCarContainers: + sidecar: + image: sidecar/container:latest + volumeMounts: + - name: my-secret-volume + mountPath: /etc/db +``` + +### Which configuration method should I use? + +Which configuration method you should use depends on your needs. Here is a summary of the pro and con of each +approach: + +##### Directly setting environment variables + +**Pro**: + +- Simple setup +- Manage configuration values directly with application deployment config +- Most application languages support looking up environment variables + +**Con**: + +- Tightly couple configuration settings with application deployment +- Requires redeployment to update values +- Must store in plain text, and easy to leak into VCS + +**Best for**: + +- Iterating different configuration values during development +- Sotring non-sensitive values that are unique to each environment / deployment + +##### Using ConfigMaps + +**Pro**: + +- Keep config DRY by sharing a common set of configurations +- Independently update config values from the application deployment +- Automatically propagate new values when stored as files + +**Con**: + +- More overhead to manage the configuration +- Stored in plain text +- Available on all nodes automatically + +**Best for**: + +- Storing non-sensitive common configuration that are shared across environments +- Storing non-sensitive dynamic configuration values that change frequently + +##### Using Secrets + +**Pro**: + +- All the benefits of using `ConfigMaps` +- Can be encrypted at rest +- Opaque by default when viewing the values (harder to remember base 64 encoded version of "admin") +- Only available to nodes that use it, and only in memory + +**Con**: + +- All the challenges of using `ConfigMaps` +- Configured in plain text, making it difficult to manage as code securely +- Less safe than using dedicated secrets manager / store like HashiCorp Vault. + +**Best for**: + +- Storing sensitive configuration values + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I ensure a minimum number of Pods are available across node maintenance? + +Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary +maintenance](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions). +This chart exposes an input value `minPodsAvailable` that can be used to specify a minimum number of `Pods` to maintain +during a voluntary maintenance activity. Under the hood, this chart will create a corresponding `PodDisruptionBudget` to +ensure that a certain number of `Pods` are up before attempting to terminate additional ones. + +You can read more about `PodDisruptionBudgets` in [our blog post covering the +topic](https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085) +and in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#how-disruption-budgets-work). + + +back to [root README](/README.adoc#major-changes) + + +## How do I use a private registry? + +To pull container images from a private registry, the Kubernetes cluster needs to be able to authenticate to the docker +registry with a registry key. On managed Kubernetes clusters (e.g EKS, GKE, AKS), this is automated through the server +IAM roles that are assigned to the instance VMs. In most cases, if the instance VM IAM role has the permissions to +access the registry, the Kubernetes cluster will automatically be able to pull down images from the respective managed +registry (e.g ECR on EKS or GCR on GKE). + +Alternatively, you can specify docker registry keys in the Kubernetes cluster as `Secret` resources. This is helpful in +situations where you do not have the ability to assign registry access IAM roles to the node itself, or if you are +pulling images off of a different registry (e.g accessing GCR from EKS cluster). + +You can use `kubectl` to create a `Secret` in Kubernetes that can be used as a docker registry key: + +``` +kubectl create secret docker-registry NAME \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD \ + --docker-email=DOCKER_EMAIL +``` + +This command will create a `Secret` resource named `NAME` that holds the specified docker registry credentials. You can +then specify the cluster to use this `Secret` when pulling down images for the service `Deployment` in this chart by +using the `imagePullSecrets` input value: + +``` +imagePullSecrets: + - NAME +``` + +You can learn more about using private registries with Kubernetes in [the official +documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry). + +back to [root README](/README.adoc#day-to-day-operations) diff --git a/charts/k8s-job/linter_values.yaml b/charts/k8s-job/linter_values.yaml new file mode 100644 index 00000000..f800b2d7 --- /dev/null +++ b/charts/k8s-job/linter_values.yaml @@ -0,0 +1,42 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS TO USE WITH HELM LINT +# This file declares a complete configuration value for this chart, with required values defined so that it can be used +# with helm lint to lint the chart. This should only specify the required values of the chart, and be combined with the +# default values of the chart. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES +# These values are expected to be defined and passed in by the operator when deploying this helm chart. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# this chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. +# +# EXAMPLE: +# +# containerImage: +# repository: nginx +# tag: stable +# pullPolicy: IfNotPresent +containerImage: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. The label is keyed under "gruntwork.io/app-name" +applicationName: "linter" diff --git a/charts/k8s-job/templates/_capabilities_helpers.tpl b/charts/k8s-job/templates/_capabilities_helpers.tpl new file mode 100644 index 00000000..33f0bafe --- /dev/null +++ b/charts/k8s-job/templates/_capabilities_helpers.tpl @@ -0,0 +1,4 @@ +{{/* Allow KubeVersion to be overridden. This is mostly used for testing purposes. */}} +{{- define "gruntwork.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}} +{{- end -}} diff --git a/charts/k8s-job/templates/_helpers.tpl b/charts/k8s-job/templates/_helpers.tpl new file mode 100644 index 00000000..adc013e8 --- /dev/null +++ b/charts/k8s-job/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-job.name" -}} + {{- .Values.applicationName | required "applicationName is required" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "k8s-job.fullname" -}} + {{- $name := required "applicationName is required" .Values.applicationName -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "k8s-job.chart" -}} + {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Convert octal to decimal (e.g 644 => 420). For file permission modes, many people are more familiar with octal notation. +However, due to yaml/json limitations, all the Kubernetes resources require file modes to be reported in decimal. +*/}} +{{- define "k8s-job.fileModeOctalToDecimal" -}} + {{- $digits := splitList "" (toString .) -}} + + {{/* Make sure there are exactly 3 digits */}} + {{- if ne (len $digits) 3 -}} + {{- fail (printf "File mode octal expects exactly 3 digits: %s" .) -}} + {{- end -}} + + {{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} + {{- $accumulator := dict "res" 0 -}} + {{- range $idx, $digit := $digits -}} + {{- $digitI := atoi $digit -}} + + {{/* atoi from sprig swallows conversion errors, so we double check to make sure it is a valid conversion */}} + {{- if and (eq $digitI 0) (ne $digit "0") -}} + {{- fail (printf "Digit %d of %s is not a number: %s" $idx . $digit) -}} + {{- end -}} + + {{/* Make sure each digit is less than 8 */}} + {{- if ge $digitI 8 -}} + {{- fail (printf "%s is not a valid octal digit" $digit) -}} + {{- end -}} + + {{/* Since we don't have math.Pow, we hard code */}} + {{- if eq $idx 0 -}} + {{/* 8^2 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 64)) -}} + {{- else if eq $idx 1 -}} + {{/* 8^1 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 8)) -}} + {{- else -}} + {{/* 8^0 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 1)) -}} + {{- end -}} + {{- end -}} + {{- "res" | index $accumulator | toString | printf -}} +{{- end -}} diff --git a/charts/k8s-job/templates/_job_spec.tpl b/charts/k8s-job/templates/_job_spec.tpl new file mode 100644 index 00000000..46d2c26e --- /dev/null +++ b/charts/k8s-job/templates/_job_spec.tpl @@ -0,0 +1,200 @@ +{{- /* +Common job spec. This template requires the +context: +- Values +- Release +- Chart +You can construct this context using dict: +(dict "Values" .Values "Release" .Release "Chart" .Chart "isCanary" true) +*/ -}} +{{- define "k8s-job.jobSpec" -}} +{{- /* +We must decide whether or not there are volumes to inject. The logic to decide whether or not to inject is based on +whether or not there are configMaps OR secrets that are specified as volume mounts (`as: volume` attributes). We do this +by using a map to track whether or not we have seen a volume type. We have to use a map because we can't update a +variable in helm chart templates. + +Similarly, we need to decide whether or not there are environment variables to add + +We need this because certain sections are omitted if there are no volumes or environment variables to add. +*/ -}} + +{{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} +{{- $hasInjectionTypes := dict "hasVolume" false "hasEnvVars" false "exposePorts" false -}} +{{- if .Values.envVars -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} +{{- end -}} +{{- if .Values.additionalContainerEnv -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} +{{- end -}} +{{- $allContainerPorts := values .Values.containerPorts -}} +{{- range $allContainerPorts -}} + {{/* We are exposing ports if there is at least one key in containerPorts that is not disabled (disabled = false or + omitted) + */}} + {{- if or (not (hasKey . "disabled")) (not .disabled) -}} + {{- $_ := set $hasInjectionTypes "exposePorts" true -}} + {{- end -}} +{{- end -}} +{{- $allSecrets := values .Values.secrets -}} +{{- range $allSecrets -}} + {{- if eq (index . "as") "volume" -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} + {{- else if eq (index . "as") "environment" -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} + {{- else if eq (index . "as") "envFrom" }} + {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} + {{- else if eq (index . "as") "none" -}} + {{- /* noop */ -}} + {{- else -}} + {{- fail printf "secrets config has unknown type: %s" (index . "as") -}} + {{- end -}} +{{- end -}} +{{- $allConfigMaps := values .Values.configMaps -}} +{{- range $allConfigMaps -}} + {{- if eq (index . "as") "volume" -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} + {{- else if eq (index . "as") "environment" -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} + {{- else if eq (index . "as") "envFrom" }} + {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} + {{- else if eq (index . "as") "none" -}} + {{- /* noop */ -}} + {{- else -}} + {{- fail printf "configMaps config has unknown type: %s" (index . "as") -}} + {{- end -}} +{{- end -}} +{{- if gt (len .Values.persistentVolumes) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +{{- if gt (len .Values.scratchPaths) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +{{- if gt (len .Values.emptyDirs) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "k8s-job.fullname" . }} + labels: + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + helm.sh/chart: {{ include "k8s-job.chart" . }} + app.kubernetes.io/name: {{ include "k8s-job.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- range $key, $value := .Values.additionalJobLabels }} + {{ $key }}: {{ $value }} + {{- end}} +{{- with .Values.jobAnnotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "k8s-job.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "k8s-job.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + gruntwork.io/deployment-type: main + {{- end }} + {{- range $key, $value := .Values.additionalPodLabels }} + {{ $key }}: {{ $value }} + {{- end }} + + {{- with .Values.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + {{- if .Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- end}} + + restartPolicy: {{ toYaml .Values.restartPolicy | indent 12 }} + containers: + - name: {{ .Values.applicationName }} + {{- $repo := required ".Values.containerImage.repository is required" .Values.containerImage.repository }} + {{- $tag := required ".Values.containerImage.tag is required" .Values.containerImage.tag }} + image: "{{ $repo }}:{{ $tag }}" + imagePullPolicy: {{ .Values.containerImage.pullPolicy | default "IfNotPresent" }} + {{- if .Values.containerCommand }} + command: +{{ toYaml .Values.containerCommand | indent 12 }} + {{- if .Values.containerArgs }} + args: +{{ toYaml .Values.containerArgs | indent 12 }} + {{- end }} + securityContext: +{{ toYaml .Values.securityContext | indent 12 }} + {{- end}} + resources: +{{ toYaml .Values.containerResources | indent 12 }} + + {{- /* START ENV VAR LOGIC */ -}} + {{- if index $hasInjectionTypes "hasEnvVars" }} + env: + {{- end }} + {{- range $key, $value := .Values.envVars }} + - name: {{ $key }} + value: {{ quote $value }} + {{- end }} + {{- if .Values.additionalContainerEnv }} +{{ toYaml .Values.additionalContainerEnv | indent 12 }} + {{- end }} + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "environment" }} + {{- range $configKey, $keyEnvVarConfig := $value.items }} + - name: {{ required "envVarName is required on configMaps items when using environment" $keyEnvVarConfig.envVarName | quote }} + valueFrom: + configMapKeyRef: + name: {{ $name }} + key: {{ $configKey }} + {{- end }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "environment" }} + {{- range $secretKey, $keyEnvVarConfig := $value.items }} + - name: {{ required "envVarName is required on secrets items when using environment" $keyEnvVarConfig.envVarName | quote }} + valueFrom: + secretKeyRef: + name: {{ $name }} + key: {{ $secretKey }} + {{- end }} + {{- end }} + {{- end }} + {{- if index $hasInjectionTypes "hasEnvFrom" }} + envFrom: + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "envFrom" }} + - configMapRef: + name: {{ $name }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "envFrom" }} + - secretRef: + name: {{ $name }} + {{- end }} + {{- end }} + {{- end }} + {{- /* END ENV VAR LOGIC */ -}} + + {{- /* START IMAGE PULL SECRETS LOGIC */ -}} + {{- if gt (len .Values.imagePullSecrets) 0 }} + imagePullSecrets: + {{- range $secretName := .Values.imagePullSecrets }} + - name: {{ $secretName }} + {{- end }} + {{- end }} + {{- /* END IMAGE PULL SECRETS LOGIC */ -}} + + +{{- end -}} diff --git a/charts/k8s-job/templates/job.yaml b/charts/k8s-job/templates/job.yaml new file mode 100644 index 00000000..907f8fff --- /dev/null +++ b/charts/k8s-job/templates/job.yaml @@ -0,0 +1,5 @@ +{{- /* +The standalone Job to be deployed. This resource manages the creation and replacement +of Jobs you schedule. +*/ -}} +{{ include "k8s-job.jobSpec" }} diff --git a/charts/k8s-job/values.yaml b/charts/k8s-job/values.yaml new file mode 100644 index 00000000..ed4a7953 --- /dev/null +++ b/charts/k8s-job/values.yaml @@ -0,0 +1,164 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS +# This file declares the configuration input values for the k8s-job Helm chart. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES +# These values are expected to be defined and passed in by the operator when deploying this helm chart. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# this chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. +# +# EXAMPLE: +# +# containerImage: +# repository: nginx +# tag: stable +# pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. The label is keyed under "gruntwork.io/app-name" + + +#---------------------------------------------------------------------------------------------------------------------- +# OPTIONAL VALUES +# These values have defaults, but may be overridden by the operator +#---------------------------------------------------------------------------------------------------------------------- + +# containerCommand is a list of strings that indicate a custom command to run for the container in place of the default +# configured on the image. Omit to run the default command configured on the image. +# +# Example (run echo "Hello World"): +# +# containerCommand: +# - "echo" +# - "Hello World" +containerCommand: null + +# containerArgs is a list of strings that indicate custom arguments when a pod is created. Omit and no arguments will be injected. +# +# Example (run echo "Hello World"): +# +# containerArgs: +# - "echo" +# - "Hello World" +containerArgs: null + +# restartPolicy is a container and pod configuration option which decides which action to take if a container's process +# exits with a non-zero code. The default option "Never" will not attempt to restart the container. The "OnFailure" option +# will re-run the container. +# +# Read more: https://kubernetes.io/docs/concepts/workloads/controllers/job/#handling-pod-and-container-failures +# +# Example (restart on failure) +# +# spec: +# restartPolicy: OnFailure +# containers: +# - name: busybox +# ... +restartPolicy: Never + +# envVars is a map of strings to strings that specifies hard coded environment variables that should be set on the +# application container. The keys will be mapped to environment variable keys, with the values mapping to the +# environment variable values. +# +# NOTE: If you wish to set environment variables using Secrets, see the `secrets` setting in this file. +# +# The following example configures two environment variables, DB_HOST and DB_PORT: +# +# EXAMPLE: +# +# envVars: +# DB_HOST: "mysql.default.svc.cluster.local" +# DB_PORT: 3306 +envVars: {} + +# additionalContainerEnv is a list of additional environment variables +# definitions that will be inserted into the Container's environment YAML. +# +# Example: +# additionalContainerEnv: +# - name: DD_AGENT_HOST +# valueFrom: +# fieldRef: +# fieldPath: status.hostIP +# - name: DD_ENTITY_ID +# valueFrom: +# fieldRef: +# fieldPath: metadata.uid +additionalContainerEnv: {} + +# containerResources specifies the amount of resources the application container will require. Only specify if you have +# specific resource needs. +# NOTE: This variable is injected directly into the pod spec. See the official documentation for what this might look +# like: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +containerResources: {} + +# imagePullSecrets lists the Secret resources that should be used for accessing private registries. Each item in the +# list is a string that corresponds to the Secret name. +imagePullSecrets: [] + +# customResources is a map that lets you define Kubernetes resources you want installed and configured as part of this chart. +# The expected keys of customResources are: +# - enabled (bool) : Whether or not the provided custom resource definitions should be created. +# - resources (map) : A map of custom Kubernetes resources you want to install during the installation of the chart. +# +# NOTE: By default enabled = false, and no custom resources will be created. If you provide any resources, be sure to +# provide them as quoted using "|", and set enabled: true. +# +# The following example creates a custom ConfigMap and a Secret. +# +# EXAMPLE: +# +# customResources: +# enabled: true +# resources: +# custom_configmap: | +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: example +# data: +# key: value +# custom_secret: | +# apiVersion: v1 +# kind: Secret +# metadata: +# name: example +# type: Opaque +# data: +# key: dmFsdWU= +customResources: + enabled: false + resources: {} + +# fullnameOverride is a string that allows overriding the default fullname that appears as the +# application name and is used as the application name by kubernetes. +fullnameOverride: "" + +# jobAnnotations will add the provided map to the annotations for the Job resource created by this chart. +# The keys and values are free form, but subject to the limitations of Kubernetes resource annotations. +# NOTE: This variable is injected directly into the Job spec. +jobAnnotations: {} + +# additionalJobLabels will add the provided map to the labels for the Job resource created by this chart. +# this is in addition to the helm template related labels created by the chart +# The keys and values are free form, but subject to the limitations of Kubernetes labelling. +# NOTE: This variable is injected directly into the Job spec. +additionalJobLabels: {} diff --git a/examples/k8s-job-busybox/README.md b/examples/k8s-job-busybox/README.md new file mode 100644 index 00000000..e69de29b diff --git a/examples/k8s-job-busybox/values.yaml b/examples/k8s-job-busybox/values.yaml new file mode 100644 index 00000000..a6e5a059 --- /dev/null +++ b/examples/k8s-job-busybox/values.yaml @@ -0,0 +1,59 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS FOR NGINX EXAMPLE +# This file declares the required values for the k8s-service helm chart to deploy nginx. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES OF CHART +# These are the required values defined by the k8s-service chart. Here we will set them to deploy an nginx container. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# the k8s-service chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. We deploy a specific, locked tag so that we +# don't inadvertently upgrade nginx during a deployment that changes some other unrelated input value. +containerImage: + repository: busybox + tag: 1.34 + pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. Here we use nginx as the name since we are deploying nginx. +applicationName: "busybox" + +#---------------------------------------------------------------------------------------------------------------------- +# OVERRIDE OPTIONAL VALUES +# These values have defaults in the k8s-service chart, but we override a few of them for the purposes of this demo. +#---------------------------------------------------------------------------------------------------------------------- +# containerCommand is a list of strings that indicate a custom command to run for the container in place of the default +# configured on the image. Omit to run the default command configured on the image. +# +# Example (run echo "Hello World"): +# +# containerCommand: +# - "echo" +# - "Hello World" +containerCommand: "/bin/sh" + +# containerArgs is a list of strings that indicate custom arguments when a pod is created. Omit and no arguments will be injected. +# +# Example (run echo "Hello World"): +# +# containerArgs: +# - "echo" +# - "Hello World" +containerArgs: + - "-c" + - "while true; do echo hello; sleep 10;done" \ No newline at end of file diff --git a/test/k8s_job_template_test.go b/test/k8s_job_template_test.go new file mode 100644 index 00000000..a92c30b6 --- /dev/null +++ b/test/k8s_job_template_test.go @@ -0,0 +1,250 @@ +//go:build all || tpl +// +build all tpl + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test each of the required values. Here, we take advantage of the fact that linter_values.yaml is supposed to define +// all the required values, so we check the template rendering by nulling out each field. +func TestK8SJobRequiredValuesAreRequired(t *testing.T) { + t.Parallel() + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-job")) + require.NoError(t, err) + + eachRequired := []string{ + "containerImage.repository", + "containerImage.tag", + "applicationName", + } + for _, requiredVal := range eachRequired { + // Capture the range value and force it into this scope. Otherwise, it is defined outside this block so it can + // change when the subtests parallelize and switch contexts. + requiredVal := requiredVal + t.Run(requiredVal, func(t *testing.T) { + t.Parallel() + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + // We then use SetValues to null out the value. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-job", "linter_values.yaml")}, + SetValues: map[string]string{requiredVal: "null"}, + } + _, err := helm.RenderTemplateE(t, options, helmChartPath, strings.ToLower(t.Name()), []string{}) + assert.Error(t, err) + }) + } +} + +// Test each of the optional values defined in linter_values.yaml. Here, we take advantage of the fact that +// linter_values.yaml is supposed to define all the required values, so we check the template rendering by nulling out +// each field. +func TestK8SJobOptionalValuesAreOptional(t *testing.T) { + t.Parallel() + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-job")) + require.NoError(t, err) + + eachOptional := []string{ + "containerImage.pullPolicy", + } + for _, optionalVal := range eachOptional { + // Capture the range value and force it into this scope. Otherwise, it is defined outside this block so it can + // change when the subtests parallelize and switch contexts. + optionalVal := optionalVal + t.Run(optionalVal, func(t *testing.T) { + t.Parallel() + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + // We then use SetValues to null out the value. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-job", "linter_values.yaml")}, + SetValues: map[string]string{optionalVal: "null"}, + } + // Make sure it renders without error + helm.RenderTemplate(t, options, helmChartPath, "all", []string{}) + }) + } +} + +// Test that annotations render correctly to annotate the Job resource +func TestK8SJobAnnotationsRenderCorrectly(t *testing.T) { + t.Parallel() + + uniqueID := random.UniqueId() + // ERROR: Need to find function that can inject annotations into a job + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{"jobAnnotations.unique-id": uniqueID}) + + assert.Equal(t, len(job.Annotations), 1) + assert.Equal(t, job.Annotations["unique-id"], uniqueID) +} + +func TestK8SJobSecurityContextAnnotationRenderCorrectly(t *testing.T) { + t.Parallel() + job := renderK8SServiceDeploymentWithSetValues( + t, + map[string]string{ + "securityContext.privileged": "true", + "securityContext.runAsUser": "1000", + }, + ) + renderedContainers := job.Spec.Template.Spec.Containers + require.Equal(t, len(renderedContainers), 1) + testContainer := renderedContainers[0] + assert.NotNil(t, testContainer.SecurityContext) + assert.True(t, *testContainer.SecurityContext.Privileged) + assert.Equal(t, *testContainer.SecurityContext.RunAsUser, int64(1000)) +} + +// Test that default imagePullSecrets do not render any +func TestK8SJobNoImagePullSecrets(t *testing.T) { + t.Parallel() + + job := renderK8SServiceDeploymentWithSetValues( + t, + map[string]string{}, + ) + + renderedImagePullSecrets := job.Spec.Template.Spec.ImagePullSecrets + require.Equal(t, len(renderedImagePullSecrets), 0) +} + +func TestK8SJobMultipleImagePullSecrets(t *testing.T) { + t.Parallel() + + job := renderK8SServiceDeploymentWithSetValues( + t, + map[string]string{ + "imagePullSecrets[0]": "docker-private-registry-key", + "imagePullSecrets[1]": "gcr-registry-key", + }, + ) + + renderedImagePullSecrets := job.Spec.Template.Spec.ImagePullSecrets + require.Equal(t, len(renderedImagePullSecrets), 2) + assert.Equal(t, renderedImagePullSecrets[0].Name, "docker-private-registry-key") + assert.Equal(t, renderedImagePullSecrets[1].Name, "gcr-registry-key") +} + +// Test that omitting containerCommand does not set command attribute on the Job container spec. +func TestK8SJobDefaultHasNullCommandSpec(t *testing.T) { + t.Parallel() + + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{}) + renderedContainers := job.Spec.Template.Spec.Containers + require.Equal(t, len(renderedContainers), 1) + appContainer := renderedContainers[0] + assert.Nil(t, appContainer.Command) +} + +// Test that setting containerCommand sets the command attribute on the Job container spec. +func TestK8SJobWithContainerCommandHasCommandSpec(t *testing.T) { + t.Parallel() + + job := renderK8SServiceDeploymentWithSetValues( + t, + map[string]string{ + "containerCommand[0]": "echo", + "containerCommand[1]": "Hello world", + }, + ) + renderedContainers := job.Spec.Template.Spec.Containers + require.Equal(t, len(renderedContainers), 1) + appContainer := renderedContainers[0] + assert.Equal(t, appContainer.Command, []string{"echo", "Hello world"}) +} + +func TestK8SJobMainJobContainersLabeledCorrectly(t *testing.T) { + t.Parallel() + job := renderK8SServiceDeploymentWithSetValues( + t, + map[string]string{ + "containerImage.repository": "nginx", + "containerImage.tag": "1.16.0", + }, + ) + // Ensure a "main" type job is properly labeled as such + assert.Equal(t, job.Spec.Selector.MatchLabels["gruntwork.io/job-type"], "main") +} + +func TestK8SJobAddingAdditionalLabels(t *testing.T) { + t.Parallel() + first_custom_job_label_value := "first-custom-value" + second_custom_job_label_value := "second-custom-value" + job := renderK8SServiceDeploymentWithSetValues(t, + map[string]string{"additionalJobLabels.first-label": first_custom_job_label_value, + "additionalJobLabels.second-label": second_custom_job_label_value}) + + assert.Equal(t, job.Labels["first-label"], first_custom_job_label_value) + assert.Equal(t, job.Labels["second-label"], second_custom_job_label_value) +} + +func TestK8SJobFullnameOverride(t *testing.T) { + t.Parallel() + + overiddenName := "overidden-name" + + job := renderK8SServiceDeploymentWithSetValues(t, + map[string]string{ + "fullnameOverride": overiddenName, + }, + ) + + assert.Equal(t, job.Name, overiddenName) +} + +func TestK8SJobEnvFrom(t *testing.T) { + t.Parallel() + + t.Run("BothConfigMapsAndSecretsEnvFrom", func(t *testing.T) { + job := renderK8SServiceDeploymentWithSetValues(t, + map[string]string{ + "configMaps.test-configmap.as": "envFrom", + "secrets.test-secret.as": "envFrom", + }, + ) + + assert.NotNil(t, job.Spec.Template.Spec.Containers[0].EnvFrom) + assert.Equal(t, len(job.Spec.Template.Spec.Containers[0].EnvFrom), 2) + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name, "test-configmap") + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[1].SecretRef.Name, "test-secret") + }) + + t.Run("OnlyConfigMapsEnvFrom", func(t *testing.T) { + job := renderK8SServiceDeploymentWithSetValues(t, + map[string]string{ + "configMaps.test-configmap.as": "envFrom", + }, + ) + + assert.NotNil(t, job.Spec.Template.Spec.Containers[0].EnvFrom) + assert.Equal(t, len(job.Spec.Template.Spec.Containers[0].EnvFrom), 1) + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name, "test-configmap") + }) + + t.Run("OnlySecretsEnvFrom", func(t *testing.T) { + job := renderK8SServiceDeploymentWithSetValues(t, + map[string]string{ + "secrets.test-secret.as": "envFrom", + }, + ) + + assert.NotNil(t, job.Spec.Template.Spec.Containers[0].EnvFrom) + assert.Equal(t, len(job.Spec.Template.Spec.Containers[0].EnvFrom), 1) + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[0].SecretRef.Name, "test-secret") + }) + +} diff --git a/test/k8s_job_test.go b/test/k8s_job_test.go new file mode 100644 index 00000000..f148369f --- /dev/null +++ b/test/k8s_job_test.go @@ -0,0 +1,90 @@ +//go:build all || integration +// +build all integration + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + http_helper "github.com/gruntwork-io/terratest/modules/http-helper" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/require" + "golang.org/x/mod/semver" +) + +// Test that: +// +// 1. We can deploy the example Job +// 2. The Job succeeds without errors + +func TestK8SJobBusyboxExample(t *testing.T) { + t.Parallel() + + workingDir := filepath.Join(".", "stages", t.Name()) + + //os.Setenv("SKIP_setup", "true") + //os.Setenv("SKIP_create_namespace", "true") + //os.Setenv("SKIP_install", "true") + //os.Setenv("SKIP_validate_job_deployment", "true") + //os.Setenv("SKIP_upgrade", "true") + //os.Setenv("SKIP_validate_upgrade", "true") + //os.Setenv("SKIP_delete", "true") + //os.Setenv("SKIP_delete_namespace", "true") + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-job")) + require.NoError(t, err) + examplePath, err := filepath.Abs(filepath.Join("..", "examples", "k8s-job-busybox")) + require.NoError(t, err) + + // Create a test namespace to deploy resources into, to avoid colliding with other tests + test_structure.RunTestStage(t, "setup", func() { + kubectlOptions := k8s.NewKubectlOptions("", "", "") + test_structure.SaveKubectlOptions(t, workingDir, kubectlOptions) + + uniqueID := random.UniqueId() + test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) + }) + kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) + uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") + testNamespace := fmt.Sprintf("k8s-job-busybox-%s", strings.ToLower(uniqueID)) + + defer test_structure.RunTestStage(t, "delete_namespace", func() { + k8s.DeleteNamespace(t, kubectlOptions, testNamespace) + }) + + test_structure.RunTestStage(t, "create_namespace", func() { + k8s.CreateNamespace(t, kubectlOptions, testNamespace) + }) + + kubectlOptions.Namespace = testNamespace + + // Use the values file in the example and deploy the chart in the test namespace + // Set a random release name + releaseName := fmt.Sprintf("k8s-job-busybox-%s", strings.ToLower(uniqueID)) + options := &helm.Options{ + KubectlOptions: kubectlOptions, + ValuesFiles: []string{filepath.Join(examplePath, "values.yaml")}, + } + + defer test_structure.RunTestStage(t, "delete", func() { + helm.Delete(t, options, releaseName, true) + }) + + test_structure.RunTestStage(t, "install", func() { + helm.Install(t, options, helmChartPath, releaseName) + }) + + test_structure.RunTestStage(t, "validate_job_deployment", func() { + verifyPodsCreatedSuccessfully(t, kubectlOptions, "busybox", releaseName, NumPodsExpected) + + }) +}