From a59299897cc6629e4cbb9ebad655dcf982d7c8e9 Mon Sep 17 00:00:00 2001 From: Sirish Bathina Date: Wed, 28 Oct 2020 16:44:35 -1000 Subject: [PATCH] FIO in go (#25) * inital changes for go fio * Most of it * root command updates * more changes * fioscript * update message to point to fio script --- cmd/rootCmd.go | 61 +- docs/run_fio2.sh | 183 +++++ go.mod | 2 +- go.sum | 4 +- pkg/fio/fio.go | 392 ++++++++++ pkg/fio/fio_jobs.go | 43 ++ pkg/fio/fio_test.go | 1042 +++++++++++++++++++++++++++ pkg/kubestr/fio.go | 20 + pkg/kubestr/kubestr.go | 5 + pkg/kubestr/storage_provisioners.go | 5 +- 10 files changed, 1723 insertions(+), 34 deletions(-) create mode 100755 docs/run_fio2.sh create mode 100644 pkg/fio/fio.go create mode 100644 pkg/fio/fio_jobs.go create mode 100644 pkg/fio/fio_test.go create mode 100644 pkg/kubestr/fio.go diff --git a/cmd/rootCmd.go b/cmd/rootCmd.go index 0c96326..f145a8e 100644 --- a/cmd/rootCmd.go +++ b/cmd/rootCmd.go @@ -39,23 +39,28 @@ var ( }, } - // fioCheckerStorageClass string - // fioCmd = &cobra.Command{ - // Use: "fio", - // Short: "Runs an fio test on a given storage class", - // Long: `Run an fio test on a storageclass and calculates - // its performance.`, - // Run: func(cmd *cobra.Command, args []string) { - // Fio(output, fioCheckerStorageClass) - // }, - // } + fioCheckerStorageClass string + fioCheckerConfigMap string + fioCheckerTestName string + fioCmd = &cobra.Command{ + Use: "fio", + Short: "Runs an fio test", + Long: `Run an fio test`, + Run: func(cmd *cobra.Command, args []string) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + Fio(ctx, output, fioCheckerStorageClass, fioCheckerConfigMap, fioCheckerTestName) + }, + } ) func init() { rootCmd.PersistentFlags().StringVarP(&output, "output", "o", "", "Options(json)") - // rootCmd.AddCommand(fioCmd) - // fioCmd.Flags().StringVarP(&fioCheckerStorageClass, "storageclass", "s", "", "The Name of a storageclass") + rootCmd.AddCommand(fioCmd) + fioCmd.Flags().StringVarP(&fioCheckerStorageClass, "storageclass", "s", "", "The Name of a storageclass") + fioCmd.Flags().StringVarP(&fioCheckerConfigMap, "configmap", "c", "", "The Name of a configmap in the current namespace") + fioCmd.Flags().StringVarP(&fioCheckerTestName, "testname", "t", "", "The Name of a predefined kubestr fio test") // //rootCmd.AddCommand(provCmd) } @@ -96,7 +101,7 @@ func Baseline(ctx context.Context, output string) { } fmt.Println("Available Storage Provisioners:") fmt.Println() - time.Sleep(500 * time.Millisecond) + time.Sleep(500 * time.Millisecond) // Added to introduce lag. for _, provisioner := range provisionerList { provisioner.Print() fmt.Println() @@ -104,18 +109,18 @@ func Baseline(ctx context.Context, output string) { } } -// // Fio executes the FIO test. -// func Fio(output, storageclass string) { -// p := kubestr.NewKubestr() -// result := p.Baseline() -// if output == "json" { -// jsonRes, _ := json.MarshalIndent(result, "", " ") -// fmt.Println(string(jsonRes)) -// return -// } -// for _, retval := range result { -// retval.Print() -// fmt.Println() -// } -// return -// } +// Fio executes the FIO test. +func Fio(ctx context.Context, output, storageclass, configMapName, testName string) { + p, err := kubestr.NewKubestr() + if err != nil { + fmt.Println(err.Error()) + return + } + result := p.FIO(ctx, storageclass, configMapName, testName) + if output == "json" { + jsonRes, _ := json.MarshalIndent(result, "", " ") + fmt.Println(string(jsonRes)) + return + } + result.Print() +} diff --git a/docs/run_fio2.sh b/docs/run_fio2.sh new file mode 100755 index 0000000..ba2b921 --- /dev/null +++ b/docs/run_fio2.sh @@ -0,0 +1,183 @@ +#!/usr/bin/env bash + +# COLOR CONSTANTS +GREEN='\033[0;32m' +LIGHT_BLUE='\033[1;34m' +RED='\033[0;31m' +NC='\033[0m' + +readonly -a REQUIRED_TOOLS=( + kubectl +) + +DEFAULT_IMAGE_TAG="latest" +DEFAULT_JOB_NAME="kubestr" + +helpFunction() +{ + echo "" + echo "This scripts runs Kubestr as a Job in a cluster" + echo "Usage: $0 -i image -n namespace" + echo -e "\t-i The Kubestr image" + echo -e "\t-n The kubernetes namespace where the job will run" + echo -e "\t-s The storageclass to run the fio test against" + echo -e "\t-z The size of volume to run the fio test against" + echo -e "\t-f An FIO file to run the fio test against" + exit 1 # Exit script after printing help +} + +while getopts "i:n:s:z:f:" opt +do + case "$opt" in + i ) image="$OPTARG" ;; + n ) namespace="$OPTARG" ;; + s ) storageclass="$OPTARG" ;; + z ) size="$OPTARG" ;; + f ) file="$OPTARG" ;; + ? ) helpFunction ;; # Print helpFunction in case parameter is non-existent + esac +done + +if [ -z "$namespace" ] +then + echo "Namespace option not provided, using default namespace"; + namespace="default" +fi + +if [ -z "$storageclass" ] +then + echo "storageclass is needed" + exit 1 +fi + +print_heading() { + printf "${LIGHT_BLUE}$1${NC}\n" +} + +print_error(){ + printf "${RED}$1${NC}\n" +} + +print_success(){ + printf "${GREEN}$1${NC}\n" +} + +check_tools() { + print_heading "Checking for tools" + for tool in "${REQUIRED_TOOLS[@]}" + do + if ! command -v "${tool}" > /dev/null 2>&1 + then + print_error " --> Unable to find ${tool}" + failed=1 + else + print_success " --> Found ${tool}" + fi + done +} + +check_kubectl_access() { + print_heading "Checking access to the Kubernetes context $(kubectl config current-context)" + if [[ $(kubectl get ns ${namespace}) ]]; then + print_success " --> Able to access the ${namespace} Kubernetes namespace" + else + print_error " --> Unable to access the ${namespace} Kubernetes namespace" + failed=1 + fi +} + +check_image() { + print_heading "Kubestr image" + if [ -z "$image" ] + then + # need to change this to public dockerhub + image=ghcr.io/kastenhq/kubestr:${DEFAULT_IMAGE_TAG} + fi + print_success " --> ${image}" +} + +failed=0 +check_tools && check_image && check_kubectl_access +if [[ ${failed} != 0 ]]; then + print_error "Pre-checks failed" + exit 1 +fi + +additional_cm_cmd="" +if [ -n "$file" ] +then + additional_cm_cmd="${additional_cm_cmd} --from-file=${file}" +fi + +if [ -n "$size" ] +then + additional_cm_cmd="${additional_cm_cmd} --from-literal=pvcsize=${size}" +fi + +if [ -n "$storageclass" ] +then + additional_cm_cmd="${additional_cm_cmd} --from-literal=storageclass=${storageclass}" +fi + + +kubectl create configmap --namespace ${namespace} fio-config ${additional_cm_cmd} +kubectl label configmap --namespace ${namespace} fio-config createdbyfio=true + +printf "\n" +print_heading "Running Kubestr Job in ${namespace} namspace" +cat > kubestr.yaml << EOF +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${DEFAULT_JOB_NAME} + namespace: ${namespace} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ${DEFAULT_JOB_NAME} +subjects: + - kind: ServiceAccount + name: ${DEFAULT_JOB_NAME} + namespace: ${namespace} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${DEFAULT_JOB_NAME} + namespace: ${namespace} +spec: + template: + spec: + containers: + - image: ${image} + imagePullPolicy: Always + name: ${DEFAULT_JOB_NAME} + command: [ "/kubestr" ] + args: ["fio", "-c", "fio-config"] + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: Never + serviceAccount: ${DEFAULT_JOB_NAME} + backoffLimit: 4 +EOF + +kubectl apply -f kubestr.yaml + +trap "kubectl delete -f kubestr.yaml" EXIT + +while [[ $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" && $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} -o 'jsonpath={..phase}') != "Succeeded" ]]; +do echo "Waiting for pod $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} --output=jsonpath='{.items[*].metadata.name}') to be ready - $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} -o 'jsonpath={..status.containerStatuses[0].state.waiting.reason}')" && sleep 1; +done +echo "Pod Ready!" +echo "" +pod=$(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} --output=jsonpath='{.items[*].metadata.name}') +kubectl logs -n ${namespace} ${pod} -f +echo "" diff --git a/go.mod b/go.mod index 7cfeeac..fab93c6 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.14 replace ( github.com/graymeta/stow => github.com/kastenhq/stow v0.1.2-kasten - github.com/kanisterio/kanister => github.com/kanisterio/kanister v0.0.0-20200806084024-f6822ca9fb14 + github.com/kanisterio/kanister => github.com/kanisterio/kanister v0.0.0-20201019101000-6d342798b895 ) require ( diff --git a/go.sum b/go.sum index c955659..9b258ba 100644 --- a/go.sum +++ b/go.sum @@ -450,8 +450,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kanisterio/kanister v0.0.0-20200806084024-f6822ca9fb14 h1:PdjSnS1B6xb2Rz0Fq/dp4zzb9A6AR3d89gc2Qqh6j5s= -github.com/kanisterio/kanister v0.0.0-20200806084024-f6822ca9fb14/go.mod h1:Ei95BfPEMotC0gWsssDrbb6hSYKX6J10d95tyEsKao4= +github.com/kanisterio/kanister v0.0.0-20201019101000-6d342798b895 h1:X7m1VJRfsEybvABjKINGYT6/Calvc/s59cKbM2JPVH4= +github.com/kanisterio/kanister v0.0.0-20201019101000-6d342798b895/go.mod h1:Ei95BfPEMotC0gWsssDrbb6hSYKX6J10d95tyEsKao4= github.com/kastenhq/stow v0.1.2-kasten/go.mod h1:ABI2whmZOX25JbmbVuHRLFuPiGnv5lxXhduCtof7UHk= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= diff --git a/pkg/fio/fio.go b/pkg/fio/fio.go new file mode 100644 index 0000000..a73fdfe --- /dev/null +++ b/pkg/fio/fio.go @@ -0,0 +1,392 @@ +package fio + +import ( + "context" + "fmt" + "os" + + kankube "github.com/kanisterio/kanister/pkg/kube" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + // DefaultNS describes the default namespace + DefaultNS = "default" + // PodNamespaceEnvKey describes the pod namespace env variable + PodNamespaceEnvKey = "POD_NAMESPACE" + // DefaultFIOJob describes the default FIO job + DefaultFIOJob = "default-fio" + // ConfigMapSCKey describes the storage class key in a config map + ConfigMapSCKey = "storageclass" + // ConfigMapSizeKey describes the size key in a config map + ConfigMapSizeKey = "pvcsize" + ConfigMapPredefinedTestKey = "fiotest.fio" + // DefaultPVCSize is the default PVC size + DefaultPVCSize = "100Gi" + // PVCGenerateName is the name to generate for the PVC + PVCGenerateName = "kubestr-fio-pvc-" + // PodGenerateName is the name to generate for the POD + PodGenerateName = "kubestr-fio-pod-" + // ContainerName is the name of the container that runs the job + ContainerName = "kubestr-fio" + // PodNameEnvKey is the name of the variable used to get the current pod name + PodNameEnvKey = "HOSTNAME" + // ConfigMapMountPath is the path where we mount the configmap + ConfigMapMountPath = "/etc/fio-config" + // VolumeMountPath is the path where we mount the volume + VolumeMountPath = "/dataset" + // CreatedByFIOLabel is the key that desrcibes the label used to mark configmaps + CreatedByFIOLabel = "createdbyfio" +) + +// FIO is an interface that represents FIO related commands +type FIO interface { + RunFio(ctx context.Context, args *RunFIOArgs) (string, error) // , test config +} + +// FIOrunner implments FIO +type FIOrunner struct { + Cli kubernetes.Interface + fioSteps fioSteps +} + +type RunFIOArgs struct { + StorageClass string + ConfigMapName string + JobName string +} + +func (f *FIOrunner) RunFio(ctx context.Context, args *RunFIOArgs) (string, error) { + f.fioSteps = &fioStepper{ + cli: f.Cli, + podReady: &podReadyChecker{cli: f.Cli}, + podSpecMerger: &podSpecMerger{cli: f.Cli}, + kubeExecutor: &kubeExecutor{cli: f.Cli}, + } + return f.RunFioHelper(ctx, args) + +} + +func (f *FIOrunner) RunFioHelper(ctx context.Context, args *RunFIOArgs) (string, error) { + // create a configmap with test parameters + if f.Cli == nil { // for UT purposes + return "", fmt.Errorf("cli uninitialized") + } + if args == nil { + args = &RunFIOArgs{} + } + + configMap, err := f.fioSteps.loadConfigMap(ctx, args) + if err != nil { + return "", errors.Wrap(err, "Unable to create a ConfigMap") + } + defer func() { + _ = f.fioSteps.deleteConfigMap(context.TODO(), configMap) + }() + + testFileName, err := fioTestFilename(configMap.Data) + if err != nil { + return "", errors.Wrap(err, "Failed to get test file name.") + } + + size := configMap.Data[ConfigMapSizeKey] + if size == "" { + size = DefaultPVCSize + } + + storageClass := configMap.Data[ConfigMapSCKey] + if storageClass == "" { + return "", fmt.Errorf("StorageClass must be provided") + } + + if err := f.fioSteps.storageClassExists(ctx, storageClass); err != nil { + return "", errors.Wrap(err, "Cannot find StorageClass") + } + + pvc, err := f.fioSteps.createPVC(ctx, storageClass, size) + if err != nil { + return "", errors.Wrap(err, "Failed to create PVC") + } + defer func() { + _ = f.fioSteps.deletePVC(context.TODO(), pvc.Name) + }() + fmt.Println("PVC created", pvc.Name) + + pod, err := f.fioSteps.createPod(ctx, pvc.Name, configMap.Name, testFileName) + defer func() { + _ = f.fioSteps.deletePod(context.TODO(), pod.Name) + }() + if err != nil { + return "", errors.Wrap(err, "Failed to create POD") + } + fmt.Println("Pod created", pod.Name) + + fmt.Printf("Running FIO test (%s) on StorageClass (%s) with a PVC of Size (%s)\n", testFileName, storageClass, size) + return f.fioSteps.runFIOCommand(ctx, pod.Name, ContainerName, testFileName) +} + +type fioSteps interface { + storageClassExists(ctx context.Context, storageClass string) error + loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) + createPVC(ctx context.Context, storageclass, size string) (*v1.PersistentVolumeClaim, error) + deletePVC(ctx context.Context, pvcName string) error + createPod(ctx context.Context, pvcName, configMapName, testFileName string) (*v1.Pod, error) + deletePod(ctx context.Context, podName string) error + runFIOCommand(ctx context.Context, podName, containerName, testFileName string) (string, error) + deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap) error +} + +type fioStepper struct { + cli kubernetes.Interface + podReady waitForPodReadyInterface + podSpecMerger podSpecMergeInterface + kubeExecutor kubeExecInterface +} + +func (s *fioStepper) storageClassExists(ctx context.Context, storageClass string) error { + if _, err := s.cli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{}); err != nil { + return err + } + return nil +} + +func getConfigMapJob(jobName string) *v1.ConfigMap { + if jobName == "" { + jobName = DefaultFIOJob + } + cm, ok := fioJobs[jobName] + if !ok { + return nil + } + return cm +} + +func (s *fioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) { + if args.ConfigMapName == "" { + cm := getConfigMapJob(args.JobName) + if cm == nil { + return nil, fmt.Errorf("Predefined job (%s) not found", args.JobName) + } + cm.Labels = map[string]string{CreatedByFIOLabel: "true"} + cmResult, err := s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Create(ctx, cm, metav1.CreateOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "Unable to create configMap for predefined job (%s)", args.JobName) + } + args.ConfigMapName = cmResult.Name + } + // fetch configmap + configMap, err := s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Get(ctx, args.ConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "Failed to load configMap (%s) in namespace (%s)", args.ConfigMapName, GetPodNamespace()) + } + // storage class, size, test + if args.StorageClass != "" { + configMap.Data[ConfigMapSCKey] = args.StorageClass + } + + if val, ok := configMap.Data[ConfigMapSizeKey]; !ok || val == "" { + configMap.Data[ConfigMapSizeKey] = DefaultPVCSize + } + + // if entry fio entry exists use it. + for key := range configMap.Data { + if key != ConfigMapSizeKey && key != ConfigMapSCKey { + return configMap, nil + } + } + // otherwise load one + cm := getConfigMapJob(args.JobName) + if cm == nil { + return nil, fmt.Errorf("Predefined job (%s) not found in configmap", args.JobName) + } + configMap.Data[ConfigMapPredefinedTestKey] = cm.Data[ConfigMapPredefinedTestKey] + return s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Update(ctx, configMap, metav1.UpdateOptions{}) +} + +func (s *fioStepper) createPVC(ctx context.Context, storageclass, size string) (*v1.PersistentVolumeClaim, error) { + sizeResource, err := resource.ParseQuantity(size) + if err != nil { + return nil, errors.Wrapf(err, "Unable to parse PVC size (%s)", size) + } + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: PVCGenerateName, + }, + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &storageclass, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): sizeResource, + }, + }, + }, + } + return s.cli.CoreV1().PersistentVolumeClaims(GetPodNamespace()).Create(ctx, pvc, metav1.CreateOptions{}) +} + +func (s *fioStepper) deletePVC(ctx context.Context, pvcName string) error { + return s.cli.CoreV1().PersistentVolumeClaims(GetPodNamespace()).Delete(ctx, pvcName, metav1.DeleteOptions{}) +} + +func (s *fioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName string) (*v1.Pod, error) { + if pvcName == "" || configMapName == "" || testFileName == "" { + return nil, fmt.Errorf("Create pod missing required arguments.") + } + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: PodGenerateName, + Namespace: GetPodNamespace(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: ContainerName, + Command: []string{"/bin/sh"}, + Args: []string{"-c", "tail -f /dev/null"}, + VolumeMounts: []v1.VolumeMount{ + {Name: "persistent-storage", MountPath: VolumeMountPath}, + {Name: "config-map", MountPath: ConfigMapMountPath}, + }, + }}, + Volumes: []v1.Volume{ + { + Name: "persistent-storage", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}, + }, + }, + { + Name: "config-map", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + }, + }, + }, + } + + mergedPodSpec, err := s.podSpecMerger.mergePodSpec(ctx, GetPodNamespace(), pod.Spec) + if err != nil { + return nil, errors.Wrap(err, "Failed to merge Pod Spec with parent pod.") + } + + pod.Spec = mergedPodSpec + podRes, err := s.cli.CoreV1().Pods(GetPodNamespace()).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + return podRes, err + } + + err = s.podReady.waitForPodReady(ctx, GetPodNamespace(), podRes.Name) + if err != nil { + return nil, err + } + + podRes, err = s.cli.CoreV1().Pods(GetPodNamespace()).Get(ctx, podRes.Name, metav1.GetOptions{}) + if err != nil { + return podRes, err + } + + return podRes, nil +} + +func (s *fioStepper) deletePod(ctx context.Context, podName string) error { + return s.cli.CoreV1().Pods(GetPodNamespace()).Delete(ctx, podName, metav1.DeleteOptions{}) +} + +func (s *fioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName string) (string, error) { + jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, testFileName) + command := []string{"fio", "--directory", VolumeMountPath, jobFilePath} + stdout, stderr, err := s.kubeExecutor.exec(GetPodNamespace(), podName, containerName, command) + if err != nil || stderr != "" { + return stdout, errors.Wrapf(err, "Error running command:(%v), stderr:(%s)", command, stderr) + } + return stdout, nil +} + +// deleteConfigMap only deletes a config map if it has the label +func (s *fioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap) error { + if val, ok := configMap.Labels[CreatedByFIOLabel]; ok && val == "true" { + return s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Delete(ctx, configMap.Name, metav1.DeleteOptions{}) + } + return nil +} + +// GetPodNamespace gets the pods namespace or returns default +func GetPodNamespace() string { + if val, ok := os.LookupEnv(PodNamespaceEnvKey); ok { + return val + } + return DefaultNS +} + +func fioTestFilename(configMap map[string]string) (string, error) { + potentialFilenames := []string{} + for key := range configMap { + if key != ConfigMapSCKey && key != ConfigMapSizeKey { + potentialFilenames = append(potentialFilenames, key) + } + } + if len(potentialFilenames) != 1 { + return "", fmt.Errorf("Unable to find fio file in configmap/more than one found %v", configMap) + } + return potentialFilenames[0], nil +} + +type waitForPodReadyInterface interface { + waitForPodReady(ctx context.Context, namespace string, name string) error +} + +type podReadyChecker struct { + cli kubernetes.Interface +} + +func (p *podReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error { + return kankube.WaitForPodReady(ctx, p.cli, namespace, name) +} + +type kubeExecInterface interface { + exec(namespace, podName, containerName string, command []string) (string, string, error) +} + +type kubeExecutor struct { + cli kubernetes.Interface +} + +func (k *kubeExecutor) exec(namespace, podName, containerName string, command []string) (string, string, error) { + return kankube.Exec(k.cli, namespace, podName, containerName, command, nil) +} + +type podSpecMergeInterface interface { + mergePodSpec(ctx context.Context, namespace string, podSpec v1.PodSpec) (v1.PodSpec, error) +} + +type podSpecMerger struct { + cli kubernetes.Interface +} + +func (m *podSpecMerger) mergePodSpec(ctx context.Context, namespace string, podSpec v1.PodSpec) (v1.PodSpec, error) { + currentPodName := os.Getenv(PodNameEnvKey) + if currentPodName == "" { + return podSpec, fmt.Errorf("Unable to retrieve Pod name from environment variable (%s)", PodNameEnvKey) + } + currentPod, err := m.cli.CoreV1().Pods(namespace).Get(ctx, currentPodName, metav1.GetOptions{}) + if err != nil { + return podSpec, fmt.Errorf("Failed to discover pod configuration for Pod (%s): (%s)\n", currentPodName, err.Error()) + } + if len(podSpec.Containers) != 1 { + return podSpec, fmt.Errorf("FIO pod doesn't have exactly 1 container.") + } + podSpec.NodeSelector = currentPod.Spec.NodeSelector + podSpec.Tolerations = currentPod.Spec.Tolerations + podSpec.Containers[0].Image = currentPod.Spec.Containers[0].Image + podSpec.SecurityContext = currentPod.Spec.SecurityContext + return podSpec, nil +} diff --git a/pkg/fio/fio_jobs.go b/pkg/fio/fio_jobs.go new file mode 100644 index 0000000..faa9d76 --- /dev/null +++ b/pkg/fio/fio_jobs.go @@ -0,0 +1,43 @@ +package fio + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var fioJobs = map[string]*v1.ConfigMap{ + DefaultFIOJob: { + ObjectMeta: metav1.ObjectMeta{ + GenerateName: DefaultFIOJob, + }, + Data: map[string]string{ + ConfigMapPredefinedTestKey: testJob1, + }, + }, +} + +var testJob1 = `[global] +randrepeat=0 +verify=0 +ioengine=libaio +direct=1 +gtod_reduce=1 +[job1] +name=read_iops +bs=4K +iodepth=64 +size=2G +readwrite=randread +time_based +ramp_time=2s +runtime=15s +[job2] +name=write_iops +bs=4K +iodepth=64 +size=2G +readwrite=randwrite +time_based +ramp_time=2s +runtime=15s +` diff --git a/pkg/fio/fio_test.go b/pkg/fio/fio_test.go new file mode 100644 index 0000000..31914a7 --- /dev/null +++ b/pkg/fio/fio_test.go @@ -0,0 +1,1042 @@ +package fio + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/pkg/errors" + . "gopkg.in/check.v1" + v1 "k8s.io/api/core/v1" + scv1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type FIOTestSuite struct{} + +var _ = Suite(&FIOTestSuite{}) + +func (s *FIOTestSuite) TestRunner(c *C) { + ctx := context.Background() + runner := &FIOrunner{ + Cli: nil, + } + _, err := runner.RunFio(ctx, nil) + c.Check(err, NotNil) +} + +func (s *FIOTestSuite) TestRunFioHelper(c *C) { + ctx := context.Background() + for i, tc := range []struct { + cli kubernetes.Interface + stepper *fakeFioStepper + args *RunFIOArgs + expectedSteps []string + checker Checker + expectedCM string + expectedSC string + expectedSize string + expectedTFN string + expectedPVC string + }{ + { // storageclass not found + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + }, + }, + cPVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "PVC", + }, + }, + cPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod", + }, + }, + }, + args: &RunFIOArgs{ + StorageClass: "sc", + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "DCM"}, + expectedSC: "sc", + expectedSize: DefaultPVCSize, + expectedTFN: "testfile.fio", + expectedCM: "CM1", + expectedPVC: "PVC", + }, + { // storage class provided by config map + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSCKey: "sc", + }, + }, + cPVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "PVC", + }, + }, + cPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod", + }, + }, + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: IsNil, + expectedSteps: []string{"LCM", "SCE", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"}, + expectedSC: "sc", + expectedSize: DefaultPVCSize, + expectedTFN: "testfile.fio", + expectedCM: "CM1", + expectedPVC: "PVC", + }, + { // use size provided by Configmap + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSCKey: "SC2", + ConfigMapSizeKey: "10Gi", + }, + }, + cPVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "PVC", + }, + }, + cPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod", + }, + }, + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: IsNil, + expectedSteps: []string{"LCM", "SCE", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"}, + expectedSC: "SC2", + expectedSize: "10Gi", + expectedTFN: "testfile.fio", + expectedCM: "CM1", + expectedPVC: "PVC", + }, + { // fio test error + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSCKey: "SC2", + ConfigMapSizeKey: "10Gi", + }, + }, + cPVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "PVC", + }, + }, + cPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod", + }, + }, + rFIOErr: fmt.Errorf("run fio error"), + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "SCE", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"}, + expectedSC: "SC2", + expectedSize: "10Gi", + expectedTFN: "testfile.fio", + expectedCM: "CM1", + expectedPVC: "PVC", + }, + { // create pod error + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSCKey: "SC2", + ConfigMapSizeKey: "10Gi", + }, + }, + cPVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "PVC", + }, + }, + cPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Pod", + }, + }, + cPodErr: fmt.Errorf("pod create error"), + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "SCE", "CPVC", "CPOD", "DPOD", "DPVC", "DCM"}, + }, + { // create PVC error + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSCKey: "SC2", + ConfigMapSizeKey: "10Gi", + }, + }, + cPVCErr: fmt.Errorf("pvc create error"), + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "SCE", "CPVC", "DCM"}, + }, + { // storageclass not found + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSCKey: "SC2", + ConfigMapSizeKey: "10Gi", + }, + }, + sceErr: fmt.Errorf("storageclass not found error"), + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "SCE", "DCM"}, + }, + { // testfilename retrieval error, more than one provided + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + "testfile.fio2": "testfiledata", + ConfigMapSCKey: "SC2", + ConfigMapSizeKey: "10Gi", + }, + }, + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "DCM"}, + }, + { // storageclass not provided in args or configmap + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + }, + Data: map[string]string{ + "testfile.fio": "testfiledata", + ConfigMapSizeKey: "10Gi", + }, + }, + cPVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "PVC", + }, + }, + }, + args: &RunFIOArgs{ + ConfigMapName: "CM1", + JobName: "job", + }, + checker: NotNil, + expectedSteps: []string{"LCM", "DCM"}, + }, + { // load configmap error + cli: fake.NewSimpleClientset(), + stepper: &fakeFioStepper{ + lcmErr: fmt.Errorf("failed to load configmap"), + }, + args: nil, + checker: NotNil, + expectedSteps: []string{"LCM"}, + }, + } { + c.Log(i) + fio := &FIOrunner{ + Cli: tc.cli, + fioSteps: tc.stepper, + } + _, err := fio.RunFioHelper(ctx, tc.args) + c.Check(err, tc.checker) + c.Assert(tc.stepper.steps, DeepEquals, tc.expectedSteps) + if err == nil { + c.Assert(tc.expectedSC, Equals, tc.stepper.sceExpSC) + c.Assert(tc.expectedCM, Equals, tc.stepper.lcmExpCM) + c.Assert(tc.expectedSC, Equals, tc.stepper.cPVCExpSC) + c.Assert(tc.expectedSize, Equals, tc.stepper.cPVCExpSize) + c.Assert(tc.expectedTFN, Equals, tc.stepper.cPodExpFN) + c.Assert(tc.expectedCM, Equals, tc.stepper.cPodExpCM) + c.Assert(tc.expectedPVC, Equals, tc.stepper.cPodExpPVC) + } + } +} + +type fakeFioStepper struct { + steps []string + + sceExpSC string + sceErr error + + lcmExpCM string + lcmConfigMap *v1.ConfigMap + lcmErr error + + cPVCExpSC string + cPVCExpSize string + cPVC *v1.PersistentVolumeClaim + cPVCErr error + + dPVCErr error + + cPodExpFN string + cPodExpCM string + cPodExpPVC string + cPod *v1.Pod + cPodErr error + + dPodErr error + + rFIOout string + rFIOErr error +} + +func (f *fakeFioStepper) storageClassExists(ctx context.Context, storageClass string) error { + f.steps = append(f.steps, "SCE") + f.sceExpSC = storageClass + return f.sceErr +} +func (f *fakeFioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) { + f.steps = append(f.steps, "LCM") + f.lcmExpCM = args.ConfigMapName + return f.lcmConfigMap, f.lcmErr +} +func (f *fakeFioStepper) createPVC(ctx context.Context, storageclass, size string) (*v1.PersistentVolumeClaim, error) { + f.steps = append(f.steps, "CPVC") + f.cPVCExpSC = storageclass + f.cPVCExpSize = size + return f.cPVC, f.cPVCErr +} +func (f *fakeFioStepper) deletePVC(ctx context.Context, pvcName string) error { + f.steps = append(f.steps, "DPVC") + return f.dPVCErr +} +func (f *fakeFioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName string) (*v1.Pod, error) { + f.steps = append(f.steps, "CPOD") + f.cPodExpCM = configMapName + f.cPodExpFN = testFileName + f.cPodExpPVC = pvcName + return f.cPod, f.cPodErr +} +func (f *fakeFioStepper) deletePod(ctx context.Context, podName string) error { + f.steps = append(f.steps, "DPOD") + return f.dPodErr +} +func (f *fakeFioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName string) (string, error) { + f.steps = append(f.steps, "RFIOC") + return f.rFIOout, f.rFIOErr +} +func (f *fakeFioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap) error { + f.steps = append(f.steps, "DCM") + return nil +} + +func (s *FIOTestSuite) TestStorageClassExists(c *C) { + ctx := context.Background() + for _, tc := range []struct { + cli kubernetes.Interface + storageClass string + checker Checker + }{ + { + cli: fake.NewSimpleClientset(), + storageClass: "sc", + checker: NotNil, + }, + { + cli: fake.NewSimpleClientset(&scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc"}}), + storageClass: "sc", + checker: IsNil, + }, + } { + stepper := &fioStepper{cli: tc.cli} + err := stepper.storageClassExists(ctx, tc.storageClass) + c.Check(err, tc.checker) + } +} + +func (s *FIOTestSuite) TestLoadConfigMap(c *C) { + ctx := context.Background() + for i, tc := range []struct { + cli kubernetes.Interface + configMapName string + jobName string + args *RunFIOArgs + cmChecker Checker + errChecker Checker + failCreates bool + hasLabel bool + }{ + { // provided cm name not found + cli: fake.NewSimpleClientset(), + args: &RunFIOArgs{ + ConfigMapName: "nonexistantcm", + }, + cmChecker: IsNil, + errChecker: NotNil, + }, + { // specified config map found + cli: fake.NewSimpleClientset(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + Namespace: "default", + }, + Data: map[string]string{}, + }), + args: &RunFIOArgs{ + ConfigMapName: "CM1", + }, + cmChecker: NotNil, + errChecker: IsNil, + }, + { // specified config map not found in namespace + cli: fake.NewSimpleClientset(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM1", + Namespace: "badns", + }, + Data: map[string]string{}, + }), + args: &RunFIOArgs{ + ConfigMapName: "CM1", + }, + cmChecker: IsNil, + errChecker: NotNil, + }, + { // creates the default job ConfigMap + cli: fake.NewSimpleClientset(), + cmChecker: NotNil, + errChecker: IsNil, + args: &RunFIOArgs{}, + hasLabel: true, + }, + { // job doesn't exist. + cli: fake.NewSimpleClientset(), + cmChecker: IsNil, + errChecker: NotNil, + args: &RunFIOArgs{ + JobName: "nonExistentJob", + }, + jobName: "nonExistentJob", + }, + { // Fails to create default job + cli: fake.NewSimpleClientset(), + cmChecker: IsNil, + errChecker: NotNil, + args: &RunFIOArgs{}, + failCreates: true, + }, + } { + c.Log(i) + stepper := &fioStepper{cli: tc.cli} + if tc.failCreates { + stepper.cli.(*fake.Clientset).Fake.PrependReactor("create", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("Error creating object") + }) + } + cm, err := stepper.loadConfigMap(ctx, tc.args) + c.Check(err, tc.errChecker) + c.Check(cm, tc.cmChecker) + if cm != nil { + _, ok := cm.Labels[CreatedByFIOLabel] + c.Assert(ok, Equals, tc.hasLabel) + } + } +} + +func (s *FIOTestSuite) TestCreatePVC(c *C) { + ctx := context.Background() + for _, tc := range []struct { + cli kubernetes.Interface + storageclass string + size string + errChecker Checker + pvcChecker Checker + failCreates bool + }{ + { + cli: fake.NewSimpleClientset(), + storageclass: "fakesc", + size: "20Gi", + errChecker: IsNil, + pvcChecker: NotNil, + }, + { // Fails to create pvc + cli: fake.NewSimpleClientset(), + storageclass: "fakesc", + size: "10Gi", + pvcChecker: IsNil, + errChecker: NotNil, + failCreates: true, + }, + { // parse error + cli: fake.NewSimpleClientset(), + storageclass: "fakesc", + size: "Not a quantity", + pvcChecker: IsNil, + errChecker: NotNil, + }, + } { + stepper := &fioStepper{cli: tc.cli} + if tc.failCreates { + stepper.cli.(*fake.Clientset).Fake.PrependReactor("create", "*", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("Error creating object") + }) + } + pvc, err := stepper.createPVC(ctx, tc.storageclass, tc.size) + c.Check(err, tc.errChecker) + c.Check(pvc, tc.pvcChecker) + if pvc != nil { + c.Assert(pvc.GenerateName, Equals, PVCGenerateName) + c.Assert(*pvc.Spec.StorageClassName, Equals, tc.storageclass) + value, ok := pvc.Spec.Resources.Requests.Storage().AsInt64() + c.Assert(ok, Equals, true) + c.Assert(value, Equals, int64(21474836480)) + } + } +} + +func (s *FIOTestSuite) TestDeletePVC(c *C) { + ctx := context.Background() + stepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: GetPodNamespace(), + }})} + err := stepper.deletePVC(ctx, "pvc") + c.Assert(err, IsNil) + err = stepper.deletePVC(ctx, "pvc") + c.Assert(err, NotNil) +} + +func (s *FIOTestSuite) TestCreatPod(c *C) { + ctx := context.Background() + for _, tc := range []struct { + pvcName string + configMapName string + testFileName string + reactor []k8stesting.Reactor + podReadyErr error + podSpecMergerErr error + errChecker Checker + }{ + { + pvcName: "pvc", + configMapName: "cm", + testFileName: "testfile", + errChecker: IsNil, + }, + { + pvcName: "pvc", + configMapName: "cm", + testFileName: "testfile", + errChecker: NotNil, + reactor: []k8stesting.Reactor{ + &k8stesting.SimpleReactor{ + Verb: "create", + Resource: "*", + Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}}, nil + }, + }, + &k8stesting.SimpleReactor{ + Verb: "get", + Resource: "*", + Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("Error getting object") + }, + }, + }, + }, + { + pvcName: "pvc", + configMapName: "cm", + testFileName: "testfile", + errChecker: NotNil, + reactor: []k8stesting.Reactor{ + &k8stesting.SimpleReactor{ + Verb: "create", + Resource: "*", + Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}}, nil + }, + }, + }, + podReadyErr: fmt.Errorf("pod ready error"), + }, + { + pvcName: "pvc", + configMapName: "cm", + testFileName: "testfile", + errChecker: NotNil, + reactor: []k8stesting.Reactor{ + &k8stesting.SimpleReactor{ + Verb: "create", + Resource: "*", + Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("pod create error") + }, + }, + }, + }, + { + pvcName: "pvc", + configMapName: "cm", + testFileName: "sdf", + errChecker: NotNil, + podSpecMergerErr: fmt.Errorf("podspecmerger error"), + }, + { + pvcName: "pvc", + configMapName: "cm", + testFileName: "", + errChecker: NotNil, + }, + { + pvcName: "", + configMapName: "cm", + testFileName: "asdf", + errChecker: NotNil, + }, + { + pvcName: "pvc", + configMapName: "", + testFileName: "asd", + errChecker: NotNil, + }, + } { + stepper := &fioStepper{ + cli: fake.NewSimpleClientset(), + podReady: &fakePodReadyChecker{prcErr: tc.podReadyErr}, + podSpecMerger: &fakePodSpecMerger{psmErr: tc.podSpecMergerErr}, + } + if tc.reactor != nil { + stepper.cli.(*fake.Clientset).Fake.ReactionChain = tc.reactor + } + pod, err := stepper.createPod(ctx, tc.pvcName, tc.configMapName, tc.testFileName) + c.Check(err, tc.errChecker) + if err == nil { + c.Assert(pod.GenerateName, Equals, PodGenerateName) + c.Assert(len(pod.Spec.Volumes), Equals, 2) + for _, vol := range pod.Spec.Volumes { + switch vol.Name { + case "persistent-storage": + c.Assert(vol.VolumeSource.PersistentVolumeClaim.ClaimName, Equals, tc.pvcName) + case "config-map": + c.Assert(vol.VolumeSource.ConfigMap.Name, Equals, tc.configMapName) + } + } + c.Assert(len(pod.Spec.Containers), Equals, 1) + c.Assert(pod.Spec.Containers[0].Name, Equals, ContainerName) + c.Assert(pod.Spec.Containers[0].Command, DeepEquals, []string{"/bin/sh"}) + c.Assert(pod.Spec.Containers[0].Args, DeepEquals, []string{"-c", "tail -f /dev/null"}) + c.Assert(pod.Spec.Containers[0].VolumeMounts, DeepEquals, []v1.VolumeMount{ + {Name: "persistent-storage", MountPath: VolumeMountPath}, + {Name: "config-map", MountPath: ConfigMapMountPath}, + }) + } + } +} + +func (s *FIOTestSuite) TestDeletePod(c *C) { + ctx := context.Background() + stepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: GetPodNamespace(), + }})} + err := stepper.deletePod(ctx, "pod") + c.Assert(err, IsNil) + err = stepper.deletePod(ctx, "pod") + c.Assert(err, NotNil) +} + +func (s *FIOTestSuite) TestFioTestFileName(c *C) { + for _, tc := range []struct { + configMap map[string]string + retVal string + errChecker Checker + }{ + { + configMap: map[string]string{ + ConfigMapSCKey: "storageclass", + ConfigMapSizeKey: "10Gi", + "testfile.fio": "some test data", + }, + retVal: "testfile.fio", + errChecker: IsNil, + }, + { + configMap: map[string]string{ + ConfigMapSCKey: "storageclass", + ConfigMapSizeKey: "10Gi", + "testfile.fio": "some test data", + "testfile2.fio": "some test data2", // only support one file + }, + retVal: "", + errChecker: NotNil, + }, + } { + ret, err := fioTestFilename(tc.configMap) + c.Check(err, tc.errChecker) + c.Assert(ret, Equals, tc.retVal) + } +} + +func (s *FIOTestSuite) TestRunFioCommand(c *C) { + ctx := context.Background() + for _, tc := range []struct { + executor *fakeKubeExecutor + errChecker Checker + podName string + containerName string + testFileName string + }{ + { + executor: &fakeKubeExecutor{ + keErr: nil, + keStrErr: "", + keStdOut: "success", + }, + errChecker: IsNil, + podName: "pod", + containerName: "container", + testFileName: "tfName", + }, + { + executor: &fakeKubeExecutor{ + keErr: fmt.Errorf("kubeexec err"), + keStrErr: "", + keStdOut: "success", + }, + errChecker: NotNil, + podName: "pod", + containerName: "container", + testFileName: "tfName", + }, + } { + stepper := &fioStepper{ + kubeExecutor: tc.executor, + } + out, err := stepper.runFIOCommand(ctx, tc.podName, tc.containerName, tc.testFileName) + c.Check(err, tc.errChecker) + c.Assert(out, Equals, tc.executor.keStdOut) + c.Assert(tc.executor.keInPodName, Equals, tc.podName) + c.Assert(tc.executor.keInContainerName, Equals, tc.containerName) + c.Assert(len(tc.executor.keInCommand), Equals, 4) + c.Assert(tc.executor.keInCommand[0], Equals, "fio") + c.Assert(tc.executor.keInCommand[1], Equals, "--directory") + c.Assert(tc.executor.keInCommand[2], Equals, VolumeMountPath) + jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, tc.testFileName) + c.Assert(tc.executor.keInCommand[3], Equals, jobFilePath) + } +} + +func (s *FIOTestSuite) TestDeleteConfigMap(c *C) { + ctx := context.Background() + defaultNS := "default" + os.Setenv(PodNamespaceEnvKey, defaultNS) + for _, tc := range []struct { + cli kubernetes.Interface + cm *v1.ConfigMap + errChecker Checker + lenCMList int + }{ + { // Don't delete it unless it has the label + cli: fake.NewSimpleClientset(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: defaultNS, + }, + }), + cm: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: defaultNS, + }, + }, + errChecker: IsNil, + lenCMList: 1, + }, + { // Has label delete + cli: fake.NewSimpleClientset(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: defaultNS, + }, + }), + cm: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: defaultNS, + Labels: map[string]string{ + CreatedByFIOLabel: "true", + }, + }, + }, + errChecker: IsNil, + lenCMList: 0, + }, + { // No cm exists + cli: fake.NewSimpleClientset(), + cm: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: defaultNS, + Labels: map[string]string{ + CreatedByFIOLabel: "true", + }, + }, + }, + errChecker: NotNil, + }, + } { + stepper := &fioStepper{cli: tc.cli} + err := stepper.deleteConfigMap(ctx, tc.cm) + c.Check(err, tc.errChecker) + if err == nil { + list, err := stepper.cli.CoreV1().ConfigMaps(defaultNS).List(ctx, metav1.ListOptions{}) + c.Check(err, IsNil) + c.Assert(len(list.Items), Equals, tc.lenCMList) + } + } + os.Unsetenv(PodNamespaceEnvKey) +} + +func (s *FIOTestSuite) TestWaitForPodReady(c *C) { + ctx := context.Background() + prChecker := &podReadyChecker{ + cli: fake.NewSimpleClientset(), + } + err := prChecker.waitForPodReady(ctx, "somens", "somePod") + c.Check(err, NotNil) + prChecker.cli = fake.NewSimpleClientset(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somePod", + Namespace: "somens", + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }) +} + +func (s *FIOTestSuite) TestMergePodSpec(c *C) { + ctx := context.Background() + runAsUserInt64 := int64(1) + for _, tc := range []struct { + namespace string + inPodSpec v1.PodSpec + podName string + parentPod *v1.Pod + errChecker Checker + }{ + { + parentPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podName", + Namespace: "ns", + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{ + "node": "selector", + }, + Tolerations: []v1.Toleration{ + {Value: "toleration"}, + }, + Containers: []v1.Container{ + {Image: "Image"}, + }, + SecurityContext: &v1.PodSecurityContext{ + RunAsUser: &runAsUserInt64, + }, + }, + }, + namespace: "ns", + podName: "podName", + inPodSpec: v1.PodSpec{Containers: []v1.Container{{Name: "container1"}}}, + errChecker: IsNil, + }, + { + parentPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podName", + Namespace: "ns", + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{ + "node": "selector", + }, + Tolerations: []v1.Toleration{ + {Value: "toleration"}, + }, + Containers: []v1.Container{ + {Image: "Image"}, + }, + SecurityContext: &v1.PodSecurityContext{ + RunAsUser: &runAsUserInt64, + }, + }, + }, + namespace: "ns", + podName: "podName", + inPodSpec: v1.PodSpec{Containers: []v1.Container{{Name: "container1"}, {Name: "container2"}}}, + errChecker: NotNil, + }, + { + namespace: "ns", + podName: "podName", + inPodSpec: v1.PodSpec{Containers: []v1.Container{{Name: "container1"}}}, + errChecker: NotNil, + }, + { + namespace: "ns", + podName: "", + inPodSpec: v1.PodSpec{Containers: []v1.Container{{Name: "container1"}}}, + errChecker: NotNil, + }, + } { + tempHostname := os.Getenv(PodNameEnvKey) + defer func() { + os.Setenv(PodNameEnvKey, tempHostname) + }() + os.Setenv(PodNameEnvKey, tc.podName) + + cli := fake.NewSimpleClientset() + if tc.parentPod != nil { + cli = fake.NewSimpleClientset(tc.parentPod) + } + psm := podSpecMerger{cli} + outPodSpec, err := psm.mergePodSpec(ctx, tc.namespace, tc.inPodSpec) + c.Check(err, tc.errChecker) + if err == nil { + c.Assert(outPodSpec, Not(DeepEquals), tc.inPodSpec) + c.Assert(outPodSpec.NodeSelector, DeepEquals, tc.parentPod.Spec.NodeSelector) + c.Assert(outPodSpec.Tolerations, DeepEquals, tc.parentPod.Spec.Tolerations) + c.Assert(outPodSpec.Containers[0].Image, Equals, tc.parentPod.Spec.Containers[0].Image) + c.Assert(outPodSpec.SecurityContext, DeepEquals, tc.parentPod.Spec.SecurityContext) + } + os.Setenv(PodNameEnvKey, tempHostname) + } +} + +func (s *FIOTestSuite) TestGetPodNamespace(c *C) { + os.Setenv(PodNamespaceEnvKey, "ns") + ns := GetPodNamespace() + c.Assert(ns, Equals, "ns") + os.Unsetenv(PodNamespaceEnvKey) +} + +type fakePodReadyChecker struct { + prcErr error +} + +func (f *fakePodReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error { + return f.prcErr +} + +type fakePodSpecMerger struct { + psmErr error +} + +func (fm *fakePodSpecMerger) mergePodSpec(ctx context.Context, namespace string, podSpec v1.PodSpec) (v1.PodSpec, error) { + return podSpec, fm.psmErr +} + +type fakeKubeExecutor struct { + keErr error + keStdOut string + keStrErr string + keInNS string + keInPodName string + keInContainerName string + keInCommand []string +} + +func (fk *fakeKubeExecutor) exec(namespace, podName, containerName string, command []string) (string, string, error) { + fk.keInNS = namespace + fk.keInPodName = podName + fk.keInContainerName = containerName + fk.keInCommand = command + return fk.keStdOut, fk.keStrErr, fk.keErr +} diff --git a/pkg/kubestr/fio.go b/pkg/kubestr/fio.go new file mode 100644 index 0000000..48a041c --- /dev/null +++ b/pkg/kubestr/fio.go @@ -0,0 +1,20 @@ +package kubestr + +import ( + "context" + + "github.com/kastenhq/kubestr/pkg/fio" +) + +func (p *Kubestr) FIO(ctx context.Context, storageClass, configMap, jobName string) *TestOutput { + testName := "FIO test results-" + fioResult, err := p.fio.RunFio(ctx, &fio.RunFIOArgs{ + StorageClass: storageClass, + ConfigMapName: configMap, + JobName: jobName, + }) + if err != nil { + return makeTestOutput(testName, StatusError, err.Error(), nil) + } + return makeTestOutput(testName, StatusOK, fioResult, fioResult) +} diff --git a/pkg/kubestr/kubestr.go b/pkg/kubestr/kubestr.go index aec11ac..0d89b2b 100644 --- a/pkg/kubestr/kubestr.go +++ b/pkg/kubestr/kubestr.go @@ -2,6 +2,7 @@ package kubestr import ( "github.com/kanisterio/kanister/pkg/kube" + "github.com/kastenhq/kubestr/pkg/fio" "github.com/pkg/errors" sv1 "k8s.io/api/storage/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -17,6 +18,7 @@ type Kubestr struct { sdsfgValidator snapshotDataSourceFG storageClassList *sv1.StorageClassList volumeSnapshotClassList *unstructured.UnstructuredList + fio fio.FIO } const Logo = ` @@ -46,6 +48,9 @@ func NewKubestr() (*Kubestr, error) { cli: cli, dynCli: dynCli, }, + fio: &fio.FIOrunner{ + Cli: cli, + }, }, nil } diff --git a/pkg/kubestr/storage_provisioners.go b/pkg/kubestr/storage_provisioners.go index d0f86e8..954c541 100644 --- a/pkg/kubestr/storage_provisioners.go +++ b/pkg/kubestr/storage_provisioners.go @@ -133,15 +133,14 @@ func (v *Provisioner) Print() { if len(v.StorageClasses) > 0 { fmt.Println() fmt.Println(" To perform a FIO test, run-") - fmt.Println(" curl https://kastenhq.github.io/kubestr/run_fio.sh | bash /dev/stdin -s ") + fmt.Println(" curl https://kastenhq.github.io/kubestr/run_fio2.sh | bash /dev/stdin -s ") switch { case len(v.VolumeSnapshotClasses) == 0 && v.CSIDriver != nil && v.CSIDriver.SupportsSnapshots(): fmt.Println() fmt.Println(" This provisioner supports snapshots, however no Volume Snaphsot Classes were found.") case len(v.VolumeSnapshotClasses) > 0: fmt.Println() - fmt.Println(" (Coming soon) To perform a snapshot/restore test, run-") - fmt.Println(" curl https://kubestr/snaprestore.sh | bash ") + fmt.Println(" (Coming soon) Test snapshot/restore functionality.") } } fmt.Println()