1
0
Fork 0
mirror of https://github.com/kastenhq/kubestr.git synced 2024-12-14 11:57:56 +00:00

FIO in go (#25)

* inital changes for go fio

* Most of it

* root command updates

* more changes

* fioscript

* update message to point to fio script
This commit is contained in:
Sirish Bathina 2020-10-28 16:44:35 -10:00 committed by GitHub
parent b0e68d8a26
commit a59299897c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 1723 additions and 34 deletions

View file

@ -39,23 +39,28 @@ var (
},
}
// fioCheckerStorageClass string
// fioCmd = &cobra.Command{
// Use: "fio",
// Short: "Runs an fio test on a given storage class",
// Long: `Run an fio test on a storageclass and calculates
// its performance.`,
// Run: func(cmd *cobra.Command, args []string) {
// Fio(output, fioCheckerStorageClass)
// },
// }
fioCheckerStorageClass string
fioCheckerConfigMap string
fioCheckerTestName string
fioCmd = &cobra.Command{
Use: "fio",
Short: "Runs an fio test",
Long: `Run an fio test`,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
Fio(ctx, output, fioCheckerStorageClass, fioCheckerConfigMap, fioCheckerTestName)
},
}
)
func init() {
rootCmd.PersistentFlags().StringVarP(&output, "output", "o", "", "Options(json)")
// rootCmd.AddCommand(fioCmd)
// fioCmd.Flags().StringVarP(&fioCheckerStorageClass, "storageclass", "s", "", "The Name of a storageclass")
rootCmd.AddCommand(fioCmd)
fioCmd.Flags().StringVarP(&fioCheckerStorageClass, "storageclass", "s", "", "The Name of a storageclass")
fioCmd.Flags().StringVarP(&fioCheckerConfigMap, "configmap", "c", "", "The Name of a configmap in the current namespace")
fioCmd.Flags().StringVarP(&fioCheckerTestName, "testname", "t", "", "The Name of a predefined kubestr fio test")
// //rootCmd.AddCommand(provCmd)
}
@ -96,7 +101,7 @@ func Baseline(ctx context.Context, output string) {
}
fmt.Println("Available Storage Provisioners:")
fmt.Println()
time.Sleep(500 * time.Millisecond)
time.Sleep(500 * time.Millisecond) // Added to introduce lag.
for _, provisioner := range provisionerList {
provisioner.Print()
fmt.Println()
@ -104,18 +109,18 @@ func Baseline(ctx context.Context, output string) {
}
}
// // Fio executes the FIO test.
// func Fio(output, storageclass string) {
// p := kubestr.NewKubestr()
// result := p.Baseline()
// if output == "json" {
// jsonRes, _ := json.MarshalIndent(result, "", " ")
// fmt.Println(string(jsonRes))
// return
// }
// for _, retval := range result {
// retval.Print()
// fmt.Println()
// }
// return
// }
// Fio executes the FIO test.
func Fio(ctx context.Context, output, storageclass, configMapName, testName string) {
p, err := kubestr.NewKubestr()
if err != nil {
fmt.Println(err.Error())
return
}
result := p.FIO(ctx, storageclass, configMapName, testName)
if output == "json" {
jsonRes, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(jsonRes))
return
}
result.Print()
}

183
docs/run_fio2.sh Executable file
View file

@ -0,0 +1,183 @@
#!/usr/bin/env bash
# COLOR CONSTANTS
GREEN='\033[0;32m'
LIGHT_BLUE='\033[1;34m'
RED='\033[0;31m'
NC='\033[0m'
readonly -a REQUIRED_TOOLS=(
kubectl
)
DEFAULT_IMAGE_TAG="latest"
DEFAULT_JOB_NAME="kubestr"
helpFunction()
{
echo ""
echo "This scripts runs Kubestr as a Job in a cluster"
echo "Usage: $0 -i image -n namespace"
echo -e "\t-i The Kubestr image"
echo -e "\t-n The kubernetes namespace where the job will run"
echo -e "\t-s The storageclass to run the fio test against"
echo -e "\t-z The size of volume to run the fio test against"
echo -e "\t-f An FIO file to run the fio test against"
exit 1 # Exit script after printing help
}
while getopts "i:n:s:z:f:" opt
do
case "$opt" in
i ) image="$OPTARG" ;;
n ) namespace="$OPTARG" ;;
s ) storageclass="$OPTARG" ;;
z ) size="$OPTARG" ;;
f ) file="$OPTARG" ;;
? ) helpFunction ;; # Print helpFunction in case parameter is non-existent
esac
done
if [ -z "$namespace" ]
then
echo "Namespace option not provided, using default namespace";
namespace="default"
fi
if [ -z "$storageclass" ]
then
echo "storageclass is needed"
exit 1
fi
print_heading() {
printf "${LIGHT_BLUE}$1${NC}\n"
}
print_error(){
printf "${RED}$1${NC}\n"
}
print_success(){
printf "${GREEN}$1${NC}\n"
}
check_tools() {
print_heading "Checking for tools"
for tool in "${REQUIRED_TOOLS[@]}"
do
if ! command -v "${tool}" > /dev/null 2>&1
then
print_error " --> Unable to find ${tool}"
failed=1
else
print_success " --> Found ${tool}"
fi
done
}
check_kubectl_access() {
print_heading "Checking access to the Kubernetes context $(kubectl config current-context)"
if [[ $(kubectl get ns ${namespace}) ]]; then
print_success " --> Able to access the ${namespace} Kubernetes namespace"
else
print_error " --> Unable to access the ${namespace} Kubernetes namespace"
failed=1
fi
}
check_image() {
print_heading "Kubestr image"
if [ -z "$image" ]
then
# need to change this to public dockerhub
image=ghcr.io/kastenhq/kubestr:${DEFAULT_IMAGE_TAG}
fi
print_success " --> ${image}"
}
failed=0
check_tools && check_image && check_kubectl_access
if [[ ${failed} != 0 ]]; then
print_error "Pre-checks failed"
exit 1
fi
additional_cm_cmd=""
if [ -n "$file" ]
then
additional_cm_cmd="${additional_cm_cmd} --from-file=${file}"
fi
if [ -n "$size" ]
then
additional_cm_cmd="${additional_cm_cmd} --from-literal=pvcsize=${size}"
fi
if [ -n "$storageclass" ]
then
additional_cm_cmd="${additional_cm_cmd} --from-literal=storageclass=${storageclass}"
fi
kubectl create configmap --namespace ${namespace} fio-config ${additional_cm_cmd}
kubectl label configmap --namespace ${namespace} fio-config createdbyfio=true
printf "\n"
print_heading "Running Kubestr Job in ${namespace} namspace"
cat > kubestr.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: ${DEFAULT_JOB_NAME}
namespace: ${namespace}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ${DEFAULT_JOB_NAME}
subjects:
- kind: ServiceAccount
name: ${DEFAULT_JOB_NAME}
namespace: ${namespace}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: ${DEFAULT_JOB_NAME}
namespace: ${namespace}
spec:
template:
spec:
containers:
- image: ${image}
imagePullPolicy: Always
name: ${DEFAULT_JOB_NAME}
command: [ "/kubestr" ]
args: ["fio", "-c", "fio-config"]
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: Never
serviceAccount: ${DEFAULT_JOB_NAME}
backoffLimit: 4
EOF
kubectl apply -f kubestr.yaml
trap "kubectl delete -f kubestr.yaml" EXIT
while [[ $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" && $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} -o 'jsonpath={..phase}') != "Succeeded" ]];
do echo "Waiting for pod $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} --output=jsonpath='{.items[*].metadata.name}') to be ready - $(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} -o 'jsonpath={..status.containerStatuses[0].state.waiting.reason}')" && sleep 1;
done
echo "Pod Ready!"
echo ""
pod=$(kubectl -n ${namespace} get pods --selector=job-name=${DEFAULT_JOB_NAME} --output=jsonpath='{.items[*].metadata.name}')
kubectl logs -n ${namespace} ${pod} -f
echo ""

2
go.mod
View file

@ -4,7 +4,7 @@ go 1.14
replace (
github.com/graymeta/stow => github.com/kastenhq/stow v0.1.2-kasten
github.com/kanisterio/kanister => github.com/kanisterio/kanister v0.0.0-20200806084024-f6822ca9fb14
github.com/kanisterio/kanister => github.com/kanisterio/kanister v0.0.0-20201019101000-6d342798b895
)
require (

4
go.sum
View file

@ -450,8 +450,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kanisterio/kanister v0.0.0-20200806084024-f6822ca9fb14 h1:PdjSnS1B6xb2Rz0Fq/dp4zzb9A6AR3d89gc2Qqh6j5s=
github.com/kanisterio/kanister v0.0.0-20200806084024-f6822ca9fb14/go.mod h1:Ei95BfPEMotC0gWsssDrbb6hSYKX6J10d95tyEsKao4=
github.com/kanisterio/kanister v0.0.0-20201019101000-6d342798b895 h1:X7m1VJRfsEybvABjKINGYT6/Calvc/s59cKbM2JPVH4=
github.com/kanisterio/kanister v0.0.0-20201019101000-6d342798b895/go.mod h1:Ei95BfPEMotC0gWsssDrbb6hSYKX6J10d95tyEsKao4=
github.com/kastenhq/stow v0.1.2-kasten/go.mod h1:ABI2whmZOX25JbmbVuHRLFuPiGnv5lxXhduCtof7UHk=
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=

392
pkg/fio/fio.go Normal file
View file

@ -0,0 +1,392 @@
package fio
import (
"context"
"fmt"
"os"
kankube "github.com/kanisterio/kanister/pkg/kube"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// DefaultNS describes the default namespace
DefaultNS = "default"
// PodNamespaceEnvKey describes the pod namespace env variable
PodNamespaceEnvKey = "POD_NAMESPACE"
// DefaultFIOJob describes the default FIO job
DefaultFIOJob = "default-fio"
// ConfigMapSCKey describes the storage class key in a config map
ConfigMapSCKey = "storageclass"
// ConfigMapSizeKey describes the size key in a config map
ConfigMapSizeKey = "pvcsize"
ConfigMapPredefinedTestKey = "fiotest.fio"
// DefaultPVCSize is the default PVC size
DefaultPVCSize = "100Gi"
// PVCGenerateName is the name to generate for the PVC
PVCGenerateName = "kubestr-fio-pvc-"
// PodGenerateName is the name to generate for the POD
PodGenerateName = "kubestr-fio-pod-"
// ContainerName is the name of the container that runs the job
ContainerName = "kubestr-fio"
// PodNameEnvKey is the name of the variable used to get the current pod name
PodNameEnvKey = "HOSTNAME"
// ConfigMapMountPath is the path where we mount the configmap
ConfigMapMountPath = "/etc/fio-config"
// VolumeMountPath is the path where we mount the volume
VolumeMountPath = "/dataset"
// CreatedByFIOLabel is the key that desrcibes the label used to mark configmaps
CreatedByFIOLabel = "createdbyfio"
)
// FIO is an interface that represents FIO related commands
type FIO interface {
RunFio(ctx context.Context, args *RunFIOArgs) (string, error) // , test config
}
// FIOrunner implments FIO
type FIOrunner struct {
Cli kubernetes.Interface
fioSteps fioSteps
}
type RunFIOArgs struct {
StorageClass string
ConfigMapName string
JobName string
}
func (f *FIOrunner) RunFio(ctx context.Context, args *RunFIOArgs) (string, error) {
f.fioSteps = &fioStepper{
cli: f.Cli,
podReady: &podReadyChecker{cli: f.Cli},
podSpecMerger: &podSpecMerger{cli: f.Cli},
kubeExecutor: &kubeExecutor{cli: f.Cli},
}
return f.RunFioHelper(ctx, args)
}
func (f *FIOrunner) RunFioHelper(ctx context.Context, args *RunFIOArgs) (string, error) {
// create a configmap with test parameters
if f.Cli == nil { // for UT purposes
return "", fmt.Errorf("cli uninitialized")
}
if args == nil {
args = &RunFIOArgs{}
}
configMap, err := f.fioSteps.loadConfigMap(ctx, args)
if err != nil {
return "", errors.Wrap(err, "Unable to create a ConfigMap")
}
defer func() {
_ = f.fioSteps.deleteConfigMap(context.TODO(), configMap)
}()
testFileName, err := fioTestFilename(configMap.Data)
if err != nil {
return "", errors.Wrap(err, "Failed to get test file name.")
}
size := configMap.Data[ConfigMapSizeKey]
if size == "" {
size = DefaultPVCSize
}
storageClass := configMap.Data[ConfigMapSCKey]
if storageClass == "" {
return "", fmt.Errorf("StorageClass must be provided")
}
if err := f.fioSteps.storageClassExists(ctx, storageClass); err != nil {
return "", errors.Wrap(err, "Cannot find StorageClass")
}
pvc, err := f.fioSteps.createPVC(ctx, storageClass, size)
if err != nil {
return "", errors.Wrap(err, "Failed to create PVC")
}
defer func() {
_ = f.fioSteps.deletePVC(context.TODO(), pvc.Name)
}()
fmt.Println("PVC created", pvc.Name)
pod, err := f.fioSteps.createPod(ctx, pvc.Name, configMap.Name, testFileName)
defer func() {
_ = f.fioSteps.deletePod(context.TODO(), pod.Name)
}()
if err != nil {
return "", errors.Wrap(err, "Failed to create POD")
}
fmt.Println("Pod created", pod.Name)
fmt.Printf("Running FIO test (%s) on StorageClass (%s) with a PVC of Size (%s)\n", testFileName, storageClass, size)
return f.fioSteps.runFIOCommand(ctx, pod.Name, ContainerName, testFileName)
}
type fioSteps interface {
storageClassExists(ctx context.Context, storageClass string) error
loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error)
createPVC(ctx context.Context, storageclass, size string) (*v1.PersistentVolumeClaim, error)
deletePVC(ctx context.Context, pvcName string) error
createPod(ctx context.Context, pvcName, configMapName, testFileName string) (*v1.Pod, error)
deletePod(ctx context.Context, podName string) error
runFIOCommand(ctx context.Context, podName, containerName, testFileName string) (string, error)
deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap) error
}
type fioStepper struct {
cli kubernetes.Interface
podReady waitForPodReadyInterface
podSpecMerger podSpecMergeInterface
kubeExecutor kubeExecInterface
}
func (s *fioStepper) storageClassExists(ctx context.Context, storageClass string) error {
if _, err := s.cli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{}); err != nil {
return err
}
return nil
}
func getConfigMapJob(jobName string) *v1.ConfigMap {
if jobName == "" {
jobName = DefaultFIOJob
}
cm, ok := fioJobs[jobName]
if !ok {
return nil
}
return cm
}
func (s *fioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) {
if args.ConfigMapName == "" {
cm := getConfigMapJob(args.JobName)
if cm == nil {
return nil, fmt.Errorf("Predefined job (%s) not found", args.JobName)
}
cm.Labels = map[string]string{CreatedByFIOLabel: "true"}
cmResult, err := s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Create(ctx, cm, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Unable to create configMap for predefined job (%s)", args.JobName)
}
args.ConfigMapName = cmResult.Name
}
// fetch configmap
configMap, err := s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Get(ctx, args.ConfigMapName, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to load configMap (%s) in namespace (%s)", args.ConfigMapName, GetPodNamespace())
}
// storage class, size, test
if args.StorageClass != "" {
configMap.Data[ConfigMapSCKey] = args.StorageClass
}
if val, ok := configMap.Data[ConfigMapSizeKey]; !ok || val == "" {
configMap.Data[ConfigMapSizeKey] = DefaultPVCSize
}
// if entry fio entry exists use it.
for key := range configMap.Data {
if key != ConfigMapSizeKey && key != ConfigMapSCKey {
return configMap, nil
}
}
// otherwise load one
cm := getConfigMapJob(args.JobName)
if cm == nil {
return nil, fmt.Errorf("Predefined job (%s) not found in configmap", args.JobName)
}
configMap.Data[ConfigMapPredefinedTestKey] = cm.Data[ConfigMapPredefinedTestKey]
return s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Update(ctx, configMap, metav1.UpdateOptions{})
}
func (s *fioStepper) createPVC(ctx context.Context, storageclass, size string) (*v1.PersistentVolumeClaim, error) {
sizeResource, err := resource.ParseQuantity(size)
if err != nil {
return nil, errors.Wrapf(err, "Unable to parse PVC size (%s)", size)
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: PVCGenerateName,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &storageclass,
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): sizeResource,
},
},
},
}
return s.cli.CoreV1().PersistentVolumeClaims(GetPodNamespace()).Create(ctx, pvc, metav1.CreateOptions{})
}
func (s *fioStepper) deletePVC(ctx context.Context, pvcName string) error {
return s.cli.CoreV1().PersistentVolumeClaims(GetPodNamespace()).Delete(ctx, pvcName, metav1.DeleteOptions{})
}
func (s *fioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName string) (*v1.Pod, error) {
if pvcName == "" || configMapName == "" || testFileName == "" {
return nil, fmt.Errorf("Create pod missing required arguments.")
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: PodGenerateName,
Namespace: GetPodNamespace(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: ContainerName,
Command: []string{"/bin/sh"},
Args: []string{"-c", "tail -f /dev/null"},
VolumeMounts: []v1.VolumeMount{
{Name: "persistent-storage", MountPath: VolumeMountPath},
{Name: "config-map", MountPath: ConfigMapMountPath},
},
}},
Volumes: []v1.Volume{
{
Name: "persistent-storage",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName},
},
},
{
Name: "config-map",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
},
},
},
},
},
}
mergedPodSpec, err := s.podSpecMerger.mergePodSpec(ctx, GetPodNamespace(), pod.Spec)
if err != nil {
return nil, errors.Wrap(err, "Failed to merge Pod Spec with parent pod.")
}
pod.Spec = mergedPodSpec
podRes, err := s.cli.CoreV1().Pods(GetPodNamespace()).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return podRes, err
}
err = s.podReady.waitForPodReady(ctx, GetPodNamespace(), podRes.Name)
if err != nil {
return nil, err
}
podRes, err = s.cli.CoreV1().Pods(GetPodNamespace()).Get(ctx, podRes.Name, metav1.GetOptions{})
if err != nil {
return podRes, err
}
return podRes, nil
}
func (s *fioStepper) deletePod(ctx context.Context, podName string) error {
return s.cli.CoreV1().Pods(GetPodNamespace()).Delete(ctx, podName, metav1.DeleteOptions{})
}
func (s *fioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName string) (string, error) {
jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, testFileName)
command := []string{"fio", "--directory", VolumeMountPath, jobFilePath}
stdout, stderr, err := s.kubeExecutor.exec(GetPodNamespace(), podName, containerName, command)
if err != nil || stderr != "" {
return stdout, errors.Wrapf(err, "Error running command:(%v), stderr:(%s)", command, stderr)
}
return stdout, nil
}
// deleteConfigMap only deletes a config map if it has the label
func (s *fioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap) error {
if val, ok := configMap.Labels[CreatedByFIOLabel]; ok && val == "true" {
return s.cli.CoreV1().ConfigMaps(GetPodNamespace()).Delete(ctx, configMap.Name, metav1.DeleteOptions{})
}
return nil
}
// GetPodNamespace gets the pods namespace or returns default
func GetPodNamespace() string {
if val, ok := os.LookupEnv(PodNamespaceEnvKey); ok {
return val
}
return DefaultNS
}
func fioTestFilename(configMap map[string]string) (string, error) {
potentialFilenames := []string{}
for key := range configMap {
if key != ConfigMapSCKey && key != ConfigMapSizeKey {
potentialFilenames = append(potentialFilenames, key)
}
}
if len(potentialFilenames) != 1 {
return "", fmt.Errorf("Unable to find fio file in configmap/more than one found %v", configMap)
}
return potentialFilenames[0], nil
}
type waitForPodReadyInterface interface {
waitForPodReady(ctx context.Context, namespace string, name string) error
}
type podReadyChecker struct {
cli kubernetes.Interface
}
func (p *podReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error {
return kankube.WaitForPodReady(ctx, p.cli, namespace, name)
}
type kubeExecInterface interface {
exec(namespace, podName, containerName string, command []string) (string, string, error)
}
type kubeExecutor struct {
cli kubernetes.Interface
}
func (k *kubeExecutor) exec(namespace, podName, containerName string, command []string) (string, string, error) {
return kankube.Exec(k.cli, namespace, podName, containerName, command, nil)
}
type podSpecMergeInterface interface {
mergePodSpec(ctx context.Context, namespace string, podSpec v1.PodSpec) (v1.PodSpec, error)
}
type podSpecMerger struct {
cli kubernetes.Interface
}
func (m *podSpecMerger) mergePodSpec(ctx context.Context, namespace string, podSpec v1.PodSpec) (v1.PodSpec, error) {
currentPodName := os.Getenv(PodNameEnvKey)
if currentPodName == "" {
return podSpec, fmt.Errorf("Unable to retrieve Pod name from environment variable (%s)", PodNameEnvKey)
}
currentPod, err := m.cli.CoreV1().Pods(namespace).Get(ctx, currentPodName, metav1.GetOptions{})
if err != nil {
return podSpec, fmt.Errorf("Failed to discover pod configuration for Pod (%s): (%s)\n", currentPodName, err.Error())
}
if len(podSpec.Containers) != 1 {
return podSpec, fmt.Errorf("FIO pod doesn't have exactly 1 container.")
}
podSpec.NodeSelector = currentPod.Spec.NodeSelector
podSpec.Tolerations = currentPod.Spec.Tolerations
podSpec.Containers[0].Image = currentPod.Spec.Containers[0].Image
podSpec.SecurityContext = currentPod.Spec.SecurityContext
return podSpec, nil
}

43
pkg/fio/fio_jobs.go Normal file
View file

@ -0,0 +1,43 @@
package fio
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var fioJobs = map[string]*v1.ConfigMap{
DefaultFIOJob: {
ObjectMeta: metav1.ObjectMeta{
GenerateName: DefaultFIOJob,
},
Data: map[string]string{
ConfigMapPredefinedTestKey: testJob1,
},
},
}
var testJob1 = `[global]
randrepeat=0
verify=0
ioengine=libaio
direct=1
gtod_reduce=1
[job1]
name=read_iops
bs=4K
iodepth=64
size=2G
readwrite=randread
time_based
ramp_time=2s
runtime=15s
[job2]
name=write_iops
bs=4K
iodepth=64
size=2G
readwrite=randwrite
time_based
ramp_time=2s
runtime=15s
`

1042
pkg/fio/fio_test.go Normal file

File diff suppressed because it is too large Load diff

20
pkg/kubestr/fio.go Normal file
View file

@ -0,0 +1,20 @@
package kubestr
import (
"context"
"github.com/kastenhq/kubestr/pkg/fio"
)
func (p *Kubestr) FIO(ctx context.Context, storageClass, configMap, jobName string) *TestOutput {
testName := "FIO test results-"
fioResult, err := p.fio.RunFio(ctx, &fio.RunFIOArgs{
StorageClass: storageClass,
ConfigMapName: configMap,
JobName: jobName,
})
if err != nil {
return makeTestOutput(testName, StatusError, err.Error(), nil)
}
return makeTestOutput(testName, StatusOK, fioResult, fioResult)
}

View file

@ -2,6 +2,7 @@ package kubestr
import (
"github.com/kanisterio/kanister/pkg/kube"
"github.com/kastenhq/kubestr/pkg/fio"
"github.com/pkg/errors"
sv1 "k8s.io/api/storage/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -17,6 +18,7 @@ type Kubestr struct {
sdsfgValidator snapshotDataSourceFG
storageClassList *sv1.StorageClassList
volumeSnapshotClassList *unstructured.UnstructuredList
fio fio.FIO
}
const Logo = `
@ -46,6 +48,9 @@ func NewKubestr() (*Kubestr, error) {
cli: cli,
dynCli: dynCli,
},
fio: &fio.FIOrunner{
Cli: cli,
},
}, nil
}

View file

@ -133,15 +133,14 @@ func (v *Provisioner) Print() {
if len(v.StorageClasses) > 0 {
fmt.Println()
fmt.Println(" To perform a FIO test, run-")
fmt.Println(" curl https://kastenhq.github.io/kubestr/run_fio.sh | bash /dev/stdin -s <storage class>")
fmt.Println(" curl https://kastenhq.github.io/kubestr/run_fio2.sh | bash /dev/stdin -s <storage class>")
switch {
case len(v.VolumeSnapshotClasses) == 0 && v.CSIDriver != nil && v.CSIDriver.SupportsSnapshots():
fmt.Println()
fmt.Println(" This provisioner supports snapshots, however no Volume Snaphsot Classes were found.")
case len(v.VolumeSnapshotClasses) > 0:
fmt.Println()
fmt.Println(" (Coming soon) To perform a snapshot/restore test, run-")
fmt.Println(" curl https://kubestr/snaprestore.sh | bash <storage class> <volume snapshot class>")
fmt.Println(" (Coming soon) Test snapshot/restore functionality.")
}
}
fmt.Println()