1
0
Fork 0
mirror of https://github.com/kastenhq/kubestr.git synced 2024-12-14 11:57:56 +00:00

Block mount tester (#159)

* Block mount tester.

* Fixed linter error.

* Updated README

* Ran go mod tidy. Added message advertising the block mount test.

* Addressed PR feedback.

* A few more renames...

* Added a pvc size flag.

* Reversed -c flag again.
This commit is contained in:
carlbraganza 2023-07-24 14:23:35 -07:00 committed by GitHub
parent e81a775a81
commit 9c2e28e275
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 1073 additions and 1681 deletions

View file

@ -30,5 +30,8 @@ Kubestr can assist in the following ways-
### To check a CSI drivers snapshot and restore capabilities -
- Run `./kubestr csicheck -s <storage class> -v <volume snapshot class>`
### To check if a StorageClass supports a block mount -
- Run `./kubestr blockmount -s StorageClass`
## Roadmap
- In the future we plan to allow users to post their FIO results and compare to others.

View file

@ -21,6 +21,7 @@ import (
"os"
"time"
"github.com/kastenhq/kubestr/pkg/block"
"github.com/kastenhq/kubestr/pkg/csi"
csitypes "github.com/kastenhq/kubestr/pkg/csi/types"
"github.com/kastenhq/kubestr/pkg/fio"
@ -97,6 +98,45 @@ var (
)
},
}
blockMountRunAsUser int64
blockMountCleanup bool
blockMountCleanupOnly bool
blockMountWaitTimeoutSeconds uint32
blockMountPVCSize string
blockMountCmd = &cobra.Command{
Use: "blockmount",
Short: "Checks if a storage class supports block volumes",
Long: `Checks if volumes provisioned by a storage class can be mounted in block mode.
The checker works as follows:
- It dynamically provisions a volume of the given storage class.
- It then launches a pod with the volume mounted as a block device.
- If the pod is successfully created then the test passes.
- If the pod fails or times out then the test fails.
In case of failure, re-run the checker with the "-c=false" flag and examine the
failed PVC and Pod: it may be necessary to adjust the default values used for
the PVC size, the pod wait timeout, etc. Clean up the failed resources by
running the checker with the "--cleanup-only" flag.
`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
checkerArgs := block.BlockMountCheckerArgs{
StorageClass: storageClass,
Namespace: namespace,
Cleanup: blockMountCleanup,
RunAsUser: blockMountRunAsUser,
ContainerImage: containerImage,
K8sObjectReadyTimeout: (time.Second * time.Duration(blockMountWaitTimeoutSeconds)),
PVCSize: blockMountPVCSize,
}
return BlockMountCheck(ctx, output, outfile, blockMountCleanupOnly, checkerArgs)
},
}
)
func init() {
@ -121,7 +161,7 @@ func init() {
csiCheckCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run the check.")
csiCheckCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.")
csiCheckCmd.Flags().BoolVarP(&csiCheckCleanup, "cleanup", "c", true, "Clean up the objects created by tool")
csiCheckCmd.Flags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the CSI check using pods as a user (int)")
csiCheckCmd.Flags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the CSI check pod with the specified user ID (int)")
csiCheckCmd.Flags().BoolVarP(&csiCheckSkipCFSCheck, "skipCFScheck", "k", false, "Use this flag to skip validating the ability to clone a snapshot.")
rootCmd.AddCommand(pvcBrowseCmd)
@ -130,6 +170,17 @@ func init() {
pvcBrowseCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace of the PersistentVolumeClaim.")
pvcBrowseCmd.Flags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the inspector pod as a user (int)")
pvcBrowseCmd.Flags().IntVarP(&pvcBrowseLocalPort, "localport", "l", 8080, "The local port to expose the inspector")
rootCmd.AddCommand(blockMountCmd)
blockMountCmd.Flags().StringVarP(&storageClass, "storageclass", "s", "", "The name of a Storageclass. (Required)")
_ = blockMountCmd.MarkFlagRequired("storageclass")
blockMountCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run the check.")
blockMountCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.")
blockMountCmd.Flags().BoolVarP(&blockMountCleanup, "cleanup", "c", true, "Clean up the objects created by the check.")
blockMountCmd.Flags().BoolVarP(&blockMountCleanupOnly, "cleanup-only", "", false, "Do not run the checker, but just clean up resources left from a previous invocation.")
blockMountCmd.Flags().Int64VarP(&blockMountRunAsUser, "runAsUser", "u", 0, "Runs the block mount check pod with the specified user ID (int)")
blockMountCmd.Flags().Uint32VarP(&blockMountWaitTimeoutSeconds, "wait-timeout", "w", 60, "Max time in seconds to wait for the check pod to become ready")
blockMountCmd.Flags().StringVarP(&blockMountPVCSize, "pvc-size", "", "1Gi", "The size of the provisioned PVC.")
}
// Execute executes the main command
@ -306,3 +357,52 @@ func CsiPvcBrowse(ctx context.Context,
}
return err
}
func BlockMountCheck(ctx context.Context, output, outfile string, cleanupOnly bool, checkerArgs block.BlockMountCheckerArgs) error {
kubecli, err := kubestr.LoadKubeCli()
if err != nil {
fmt.Printf("Failed to load kubeCli (%s)", err.Error())
return err
}
checkerArgs.KubeCli = kubecli
dyncli, err := kubestr.LoadDynCli()
if err != nil {
fmt.Printf("Failed to load dynCli (%s)", err.Error())
return err
}
checkerArgs.DynCli = dyncli
blockMountTester, err := block.NewBlockMountChecker(checkerArgs)
if err != nil {
fmt.Printf("Failed to initialize BlockMounter (%s)", err.Error())
return err
}
if cleanupOnly {
blockMountTester.Cleanup()
return nil
}
var (
testName = "Block VolumeMode test"
result *kubestr.TestOutput
)
mountResult, err := blockMountTester.Mount(ctx)
if err != nil {
if !checkerArgs.Cleanup {
fmt.Printf("Warning: Resources may not have been released. Rerun with the additional --cleanup-only flag.\n")
}
result = kubestr.MakeTestOutput(testName, kubestr.StatusError, fmt.Sprintf("StorageClass (%s) does not appear to support Block VolumeMode", checkerArgs.StorageClass), mountResult)
} else {
result = kubestr.MakeTestOutput(testName, kubestr.StatusOK, fmt.Sprintf("StorageClass (%s) supports Block VolumeMode", checkerArgs.StorageClass), mountResult)
}
var wrappedResult = []*kubestr.TestOutput{result}
if !PrintAndJsonOutput(wrappedResult, output, outfile) {
result.Print()
}
return err
}

12
go.mod
View file

@ -6,6 +6,7 @@ replace github.com/graymeta/stow => github.com/kastenhq/stow v0.1.2-kasten
require (
github.com/briandowns/spinner v1.23.0
github.com/frankban/quicktest v1.14.5
github.com/golang/mock v1.6.0
github.com/kanisterio/kanister v0.0.0-20230711223016-1ed0c605326b
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
@ -18,17 +19,12 @@ require (
)
require (
cloud.google.com/go/compute v1.20.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.16.0+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
@ -55,7 +51,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@ -73,12 +69,10 @@ require (
github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2 // indirect
github.com/openshift/client-go v0.0.0-20230324103026-3f1513df25e0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday v1.5.2 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/testify v1.8.2 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect

1647
go.sum

File diff suppressed because it is too large Load diff

214
pkg/block/block_mount.go Normal file
View file

@ -0,0 +1,214 @@
package block
import (
"context"
"fmt"
"time"
kankube "github.com/kanisterio/kanister/pkg/kube"
"github.com/kanisterio/kanister/pkg/poll"
"github.com/kastenhq/kubestr/pkg/csi"
"github.com/kastenhq/kubestr/pkg/csi/types"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
type BlockMountCheckerArgs struct {
KubeCli kubernetes.Interface
DynCli dynamic.Interface
StorageClass string
Namespace string
Cleanup bool
RunAsUser int64
ContainerImage string
K8sObjectReadyTimeout time.Duration
PVCSize string
}
func (a *BlockMountCheckerArgs) Validate() error {
if a.KubeCli == nil || a.DynCli == nil || a.StorageClass == "" || a.Namespace == "" {
return fmt.Errorf("Require fields are missing. (KubeCli, DynCli, StorageClass, Namespace)")
}
return nil
}
// BlockMountChecker tests if a storage class can provision volumes for block mounts.
type BlockMountChecker interface {
Mount(ctx context.Context) (*BlockMountCheckerResult, error)
Cleanup()
}
type BlockMountCheckerResult struct {
StorageClass *sv1.StorageClass
}
const (
blockMountCheckerPVCNameFmt = "kubestr-blockmount-%s-pvc"
blockMountCheckerPodNameFmt = "kubestr-blockmount-%s-pod"
blockModeCheckerPodCleanupTimeout = time.Second * 120
blockModeCheckerPVCCleanupTimeout = time.Second * 120
blockModeCheckerPVCDefaultSize = "1Gi"
)
// blockMountChecker provides BlockMountChecker
type blockMountChecker struct {
args BlockMountCheckerArgs
podName string
pvcName string
validator csi.ArgumentValidator
appCreator csi.ApplicationCreator
cleaner csi.Cleaner
podCleanupTimeout time.Duration
pvcCleanupTimeout time.Duration
}
func NewBlockMountChecker(args BlockMountCheckerArgs) (BlockMountChecker, error) {
if err := args.Validate(); err != nil {
return nil, err
}
b := &blockMountChecker{}
b.args = args
b.podName = fmt.Sprintf(blockMountCheckerPodNameFmt, b.args.StorageClass)
b.pvcName = fmt.Sprintf(blockMountCheckerPVCNameFmt, b.args.StorageClass)
b.validator = csi.NewArgumentValidator(b.args.KubeCli, b.args.DynCli)
b.appCreator = csi.NewApplicationCreator(b.args.KubeCli, args.K8sObjectReadyTimeout)
b.cleaner = csi.NewCleaner(b.args.KubeCli, b.args.DynCli)
b.podCleanupTimeout = blockModeCheckerPodCleanupTimeout
b.pvcCleanupTimeout = blockModeCheckerPVCCleanupTimeout
return b, nil
}
func (b *blockMountChecker) Mount(ctx context.Context) (*BlockMountCheckerResult, error) {
fmt.Printf("Fetching StorageClass %s ...\n", b.args.StorageClass)
sc, err := b.validator.ValidateStorageClass(ctx, b.args.StorageClass)
if err != nil {
fmt.Printf(" -> Failed to fetch StorageClass(%s): (%v)\n", b.args.StorageClass, err)
return nil, err
}
fmt.Printf(" -> Provisioner: %s\n", sc.Provisioner)
if b.args.PVCSize == "" {
b.args.PVCSize = blockModeCheckerPVCDefaultSize
}
restoreSize, err := resource.ParseQuantity(b.args.PVCSize)
if err != nil {
fmt.Printf(" -> Invalid PVC size %s: (%v)\n", b.args.PVCSize, err)
return nil, err
}
blockMode := v1.PersistentVolumeBlock
createPVCArgs := &types.CreatePVCArgs{
Name: b.pvcName,
Namespace: b.args.Namespace,
StorageClass: b.args.StorageClass,
VolumeMode: &blockMode,
RestoreSize: &restoreSize,
}
if b.args.Cleanup {
defer b.Cleanup()
}
fmt.Printf("Provisioning a Volume (%s) for block mode access ...\n", b.args.PVCSize)
tB := time.Now()
_, err = b.appCreator.CreatePVC(ctx, createPVCArgs)
if err != nil {
fmt.Printf(" -> Failed to provision a Volume (%v)\n", err)
return nil, err
}
fmt.Printf(" -> Created PVC %s/%s (%s)\n", b.args.Namespace, b.pvcName, time.Since(tB).Truncate(time.Millisecond).String())
fmt.Println("Creating a Pod with a volumeDevice ...")
tB = time.Now()
_, err = b.appCreator.CreatePod(ctx, &types.CreatePodArgs{
Name: b.podName,
PVCName: b.pvcName,
Namespace: b.args.Namespace,
RunAsUser: b.args.RunAsUser,
ContainerImage: b.args.ContainerImage,
Command: []string{"/bin/sh"},
ContainerArgs: []string{"-c", "tail -f /dev/null"},
DevicePath: "/mnt/block",
})
if err != nil {
fmt.Printf(" -> Failed to create Pod (%v)\n", err)
return nil, err
}
fmt.Printf(" -> Created Pod %s/%s\n", b.args.Namespace, b.podName)
fmt.Printf(" -> Waiting at most %s for the Pod to become ready ...\n", b.args.K8sObjectReadyTimeout.String())
if err = b.appCreator.WaitForPodReady(ctx, b.args.Namespace, b.podName); err != nil {
fmt.Printf(" -> The Pod timed out (%v)\n", err)
return nil, err
}
fmt.Printf(" -> The Pod is ready (%s)\n", time.Since(tB).Truncate(time.Millisecond).String())
return &BlockMountCheckerResult{
StorageClass: sc,
}, nil
}
func (b *blockMountChecker) Cleanup() {
var (
ctx = context.Background()
err error
)
// delete Pod
fmt.Printf("Deleting Pod %s/%s ...\n", b.args.Namespace, b.podName)
tB := time.Now()
err = b.cleaner.DeletePod(ctx, b.podName, b.args.Namespace)
if err != nil && !apierrors.IsNotFound(err) {
fmt.Printf(" Error deleting Pod %s/%s - (%v)\n", b.args.Namespace, b.podName, err)
}
// Give it a chance to run ...
podWaitCtx, podWaitCancelFn := context.WithTimeout(context.Background(), b.podCleanupTimeout)
defer podWaitCancelFn()
err = kankube.WaitForPodCompletion(podWaitCtx, b.args.KubeCli, b.args.Namespace, b.podName)
if err == nil || (err != nil && apierrors.IsNotFound(err)) {
fmt.Printf(" -> Deleted pod (%s)\n", time.Since(tB).Truncate(time.Millisecond).String())
} else {
fmt.Printf(" -> Failed to delete Pod in %s\n", time.Since(tB).Truncate(time.Millisecond).String())
}
// delete PVC
fmt.Printf("Deleting PVC %s/%s ...\n", b.args.Namespace, b.pvcName)
tB = time.Now()
err = b.cleaner.DeletePVC(ctx, b.pvcName, b.args.Namespace)
if err != nil && !apierrors.IsNotFound(err) {
fmt.Printf(" Error deleting PVC %s/%s - (%v)\n", b.args.Namespace, b.pvcName, err)
}
err = b.pvcWaitForTermination(b.pvcCleanupTimeout)
if err != nil {
fmt.Printf(" -> PVC failed to delete in %s\n", time.Since(tB).Truncate(time.Millisecond).String())
} else {
fmt.Printf(" -> Deleted PVC (%s)\n", time.Since(tB).Truncate(time.Millisecond).String())
}
}
func (b *blockMountChecker) pvcWaitForTermination(timeout time.Duration) error {
pvcWaitCtx, pvcWaitCancelFn := context.WithTimeout(context.Background(), timeout)
defer pvcWaitCancelFn()
return poll.Wait(pvcWaitCtx, func(ctx context.Context) (bool, error) {
_, err := b.validator.ValidatePVC(ctx, b.pvcName, b.args.Namespace)
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
return false, nil
})
}

View file

@ -0,0 +1,442 @@
package block
import (
"context"
"errors"
"fmt"
"testing"
"time"
qt "github.com/frankban/quicktest"
"github.com/golang/mock/gomock"
"github.com/kastenhq/kubestr/pkg/csi/mocks"
"github.com/kastenhq/kubestr/pkg/csi/types"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
fakedynamic "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/fake"
)
func TestBlockMountCheckerNew(t *testing.T) {
kubeCli := fake.NewSimpleClientset()
dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())
invalidArgs := []struct {
name string
args BlockMountCheckerArgs
}{
{"args:empty", BlockMountCheckerArgs{}},
{"args:KubeCli", BlockMountCheckerArgs{
KubeCli: kubeCli,
}},
{"args:KubeCli-DynCli", BlockMountCheckerArgs{
KubeCli: kubeCli,
DynCli: dynCli,
}},
{"args:KubeCli-DynCli-StorageClass", BlockMountCheckerArgs{
KubeCli: kubeCli,
DynCli: dynCli,
StorageClass: "sc",
}},
}
for _, tc := range invalidArgs {
t.Run(tc.name, func(t *testing.T) {
c := qt.New(t)
bmt, err := NewBlockMountChecker(tc.args)
c.Assert(err, qt.IsNotNil)
c.Assert(bmt, qt.IsNil)
})
}
t.Run("success", func(t *testing.T) {
c := qt.New(t)
args := BlockMountCheckerArgs{
KubeCli: kubeCli,
DynCli: dynCli,
StorageClass: "sc",
Namespace: "namespace",
}
bmt, err := NewBlockMountChecker(args)
c.Assert(err, qt.IsNil)
c.Assert(bmt, qt.IsNotNil)
b, ok := bmt.(*blockMountChecker)
c.Assert(ok, qt.IsTrue)
c.Assert(b.args, qt.Equals, args)
c.Assert(b.validator, qt.IsNotNil)
c.Assert(b.appCreator, qt.IsNotNil)
c.Assert(b.cleaner, qt.IsNotNil)
c.Assert(b.podName, qt.Equals, fmt.Sprintf(blockMountCheckerPodNameFmt, args.StorageClass))
c.Assert(b.pvcName, qt.Equals, fmt.Sprintf(blockMountCheckerPVCNameFmt, args.StorageClass))
c.Assert(b.podCleanupTimeout, qt.Equals, blockModeCheckerPodCleanupTimeout)
c.Assert(b.pvcCleanupTimeout, qt.Equals, blockModeCheckerPVCCleanupTimeout)
})
}
func TestBlockMountCheckerPvcWaitForTermination(t *testing.T) {
type prepareArgs struct {
b *blockMountChecker
mockValidator *mocks.MockArgumentValidator
}
kubeCli := fake.NewSimpleClientset()
dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())
tcs := []struct {
name string
pvcTimeout time.Duration
prepare func(*prepareArgs)
expErr error
}{
{
name: "success",
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, apierrors.NewNotFound(schema.GroupResource{}, ""))
},
},
{
name: "timeout",
pvcTimeout: time.Microsecond, // pvc wait will timeout
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(&v1.PersistentVolumeClaim{}, nil).AnyTimes()
},
expErr: context.DeadlineExceeded,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
c := qt.New(t)
args := BlockMountCheckerArgs{
KubeCli: kubeCli,
DynCli: dynCli,
StorageClass: "sc",
Namespace: "namespace",
}
bmt, err := NewBlockMountChecker(args)
c.Assert(err, qt.IsNil)
c.Assert(bmt, qt.IsNotNil)
b, ok := bmt.(*blockMountChecker)
c.Assert(ok, qt.IsTrue)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
pa := &prepareArgs{
b: b,
mockValidator: mocks.NewMockArgumentValidator(ctrl),
}
tc.prepare(pa)
b.validator = pa.mockValidator
err = b.pvcWaitForTermination(tc.pvcTimeout)
if tc.expErr != nil {
c.Assert(err, qt.ErrorIs, tc.expErr)
} else {
c.Assert(err, qt.IsNil)
}
})
}
}
func TestBlockMountCheckerCleanup(t *testing.T) {
type prepareArgs struct {
b *blockMountChecker
mockCleaner *mocks.MockCleaner
mockValidator *mocks.MockArgumentValidator
}
errNotFound := apierrors.NewNotFound(schema.GroupResource{}, "")
someError := errors.New("test error")
scName := "sc"
namespace := "namespace"
runningPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(blockMountCheckerPodNameFmt, scName),
Namespace: namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container-0"},
},
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
}
tcs := []struct {
name string
podTimeout time.Duration
pvcTimeout time.Duration
objs []runtime.Object
prepare func(*prepareArgs)
}{
{
name: "nothing-found",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound)
pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound)
pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound)
},
},
{
name: "error-deleting-pod",
podTimeout: time.Microsecond, // pod wait will timeout
pvcTimeout: time.Hour,
objs: []runtime.Object{runningPod},
prepare: func(pa *prepareArgs) {
pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(someError)
pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound)
pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound)
},
},
{
name: "error-deleting-pvc",
podTimeout: time.Hour,
pvcTimeout: time.Microsecond, // timeout
prepare: func(pa *prepareArgs) {
pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound)
pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(someError)
pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, someError).AnyTimes()
},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
c := qt.New(t)
kubeCli := fake.NewSimpleClientset(tc.objs...)
dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())
args := BlockMountCheckerArgs{
KubeCli: kubeCli,
DynCli: dynCli,
StorageClass: scName,
Namespace: namespace,
}
bmt, err := NewBlockMountChecker(args)
c.Assert(err, qt.IsNil)
c.Assert(bmt, qt.IsNotNil)
b, ok := bmt.(*blockMountChecker)
c.Assert(ok, qt.IsTrue)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
pa := &prepareArgs{
b: b,
mockCleaner: mocks.NewMockCleaner(ctrl),
mockValidator: mocks.NewMockArgumentValidator(ctrl),
}
tc.prepare(pa)
b.validator = pa.mockValidator
b.cleaner = pa.mockCleaner
b.podCleanupTimeout = tc.podTimeout
b.pvcCleanupTimeout = tc.pvcTimeout
b.Cleanup()
})
}
}
func TestBlockMountCheckerMount(t *testing.T) {
type prepareArgs struct {
b *blockMountChecker
mockCleaner *mocks.MockCleaner
mockValidator *mocks.MockArgumentValidator
mockAppCreator *mocks.MockApplicationCreator
}
errNotFound := apierrors.NewNotFound(schema.GroupResource{}, "")
someError := errors.New("test error")
scName := "sc"
scProvisioner := "provisioenr"
sc := &sv1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: scProvisioner,
}
namespace := "namespace"
cleanupCalls := func(pa *prepareArgs) {
pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound)
pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound)
pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound)
}
createPVCArgs := func(b *blockMountChecker) *types.CreatePVCArgs {
pvcSize := b.args.PVCSize
if pvcSize == "" {
pvcSize = blockModeCheckerPVCDefaultSize
}
restoreSize := resource.MustParse(pvcSize)
blockMode := v1.PersistentVolumeBlock
return &types.CreatePVCArgs{
Name: b.pvcName,
Namespace: b.args.Namespace,
StorageClass: b.args.StorageClass,
VolumeMode: &blockMode,
RestoreSize: &restoreSize,
}
}
createPVC := func(b *blockMountChecker) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: b.args.Namespace,
Name: b.pvcName,
},
}
}
createPodArgs := func(b *blockMountChecker) *types.CreatePodArgs {
return &types.CreatePodArgs{
Name: b.podName,
PVCName: b.pvcName,
Namespace: b.args.Namespace,
RunAsUser: b.args.RunAsUser,
ContainerImage: b.args.ContainerImage,
Command: []string{"/bin/sh"},
ContainerArgs: []string{"-c", "tail -f /dev/null"},
DevicePath: "/mnt/block",
}
}
createPod := func(b *blockMountChecker) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: b.args.Namespace,
Name: b.podName,
},
}
}
tcs := []struct {
name string
podTimeout time.Duration
pvcTimeout time.Duration
noCleanup bool
objs []runtime.Object
prepare func(*prepareArgs)
result *BlockMountCheckerResult
}{
{
name: "no-storage-class",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(nil, apierrors.NewNotFound(schema.GroupResource{}, pa.b.args.StorageClass))
},
},
{
name: "invalid-pvc-size",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.b.args.PVCSize = "10Q"
pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)
},
},
{
name: "create-pvc-error",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)
pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(nil, someError)
cleanupCalls(pa)
},
},
{
name: "create-pod-error",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)
pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil)
pa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(nil, someError)
cleanupCalls(pa)
},
},
{
name: "wait-for-pod-error",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)
pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil)
pa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(createPod(pa.b), nil)
pa.mockAppCreator.EXPECT().WaitForPodReady(gomock.Any(), pa.b.args.Namespace, pa.b.podName).Return(someError)
cleanupCalls(pa)
},
},
{
name: "success-no-cleanup",
podTimeout: time.Hour,
pvcTimeout: time.Hour,
noCleanup: true,
prepare: func(pa *prepareArgs) {
pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)
pa.b.args.PVCSize = blockModeCheckerPVCDefaultSize
pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil)
pa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(createPod(pa.b), nil)
pa.mockAppCreator.EXPECT().WaitForPodReady(gomock.Any(), pa.b.args.Namespace, pa.b.podName).Return(nil)
},
result: &BlockMountCheckerResult{
StorageClass: sc,
},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
c := qt.New(t)
ctx := context.Background()
kubeCli := fake.NewSimpleClientset(tc.objs...)
dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())
args := BlockMountCheckerArgs{
KubeCli: kubeCli,
DynCli: dynCli,
StorageClass: scName,
Namespace: namespace,
Cleanup: !tc.noCleanup,
}
bmt, err := NewBlockMountChecker(args)
c.Assert(err, qt.IsNil)
c.Assert(bmt, qt.IsNotNil)
b, ok := bmt.(*blockMountChecker)
c.Assert(ok, qt.IsTrue)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
pa := &prepareArgs{
b: b,
mockCleaner: mocks.NewMockCleaner(ctrl),
mockValidator: mocks.NewMockArgumentValidator(ctrl),
mockAppCreator: mocks.NewMockApplicationCreator(ctrl),
}
tc.prepare(pa)
b.validator = pa.mockValidator
b.cleaner = pa.mockCleaner
b.appCreator = pa.mockAppCreator
b.podCleanupTimeout = tc.podTimeout
b.pvcCleanupTimeout = tc.pvcTimeout
result, err := b.Mount(ctx)
if tc.result != nil {
c.Assert(result, qt.DeepEquals, tc.result)
c.Assert(err, qt.IsNil)
} else {
c.Assert(result, qt.IsNil)
c.Assert(err, qt.IsNotNil)
}
})
}
}

View file

@ -1,5 +1,7 @@
package csi
// This file contains general Kubernetes operations, not just CSI related operations.
import (
"context"
"fmt"
@ -52,6 +54,13 @@ type validateOperations struct {
dynCli dynamic.Interface
}
func NewArgumentValidator(kubeCli kubernetes.Interface, dynCli dynamic.Interface) ArgumentValidator {
return &validateOperations{
kubeCli: kubeCli,
dynCli: dynCli,
}
}
func (o *validateOperations) ValidatePVC(ctx context.Context, pvcName, namespace string) (*v1.PersistentVolumeClaim, error) {
if o.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
@ -102,6 +111,13 @@ type applicationCreate struct {
k8sObjectReadyTimeout time.Duration
}
func NewApplicationCreator(kubeCli kubernetes.Interface, k8sObjectReadyTimeout time.Duration) ApplicationCreator {
return &applicationCreate{
kubeCli: kubeCli,
k8sObjectReadyTimeout: k8sObjectReadyTimeout,
}
}
func (c *applicationCreate) CreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) {
if c.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
@ -111,6 +127,7 @@ func (c *applicationCreate) CreatePVC(ctx context.Context, args *types.CreatePVC
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: args.Name,
GenerateName: args.GenerateName,
Namespace: args.Namespace,
Labels: map[string]string{
@ -120,6 +137,7 @@ func (c *applicationCreate) CreatePVC(ctx context.Context, args *types.CreatePVC
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &args.StorageClass,
VolumeMode: args.VolumeMode,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
@ -155,8 +173,14 @@ func (c *applicationCreate) CreatePod(ctx context.Context, args *types.CreatePod
args.ContainerImage = common.DefaultPodImage
}
volumeNameInPod := "persistent-storage"
containerName := args.Name
if containerName == "" {
containerName = args.GenerateName
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: args.Name,
GenerateName: args.GenerateName,
Namespace: args.Namespace,
Labels: map[string]string{
@ -165,17 +189,13 @@ func (c *applicationCreate) CreatePod(ctx context.Context, args *types.CreatePod
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: args.GenerateName,
Name: containerName,
Image: args.ContainerImage,
Command: args.Command,
Args: args.ContainerArgs,
VolumeMounts: []v1.VolumeMount{{
Name: "persistent-storage",
MountPath: args.MountPath,
}},
}},
Volumes: []v1.Volume{{
Name: "persistent-storage",
Name: volumeNameInPod,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: args.PVCName,
@ -185,6 +205,18 @@ func (c *applicationCreate) CreatePod(ctx context.Context, args *types.CreatePod
},
}
if args.MountPath != "" {
pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
Name: volumeNameInPod,
MountPath: args.MountPath,
}}
} else { // args.DevicePath
pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{{
Name: volumeNameInPod,
DevicePath: args.DevicePath,
}}
}
if args.RunAsUser > 0 {
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &args.RunAsUser,
@ -384,6 +416,13 @@ type cleanse struct {
dynCli dynamic.Interface
}
func NewCleaner(kubeCli kubernetes.Interface, dynCli dynamic.Interface) Cleaner {
return &cleanse{
kubeCli: kubeCli,
dynCli: dynCli,
}
}
func (c *cleanse) DeletePVC(ctx context.Context, pvcName string, namespace string) error {
if c.kubeCli == nil {
return fmt.Errorf("kubeCli not initialized")

View file

@ -183,27 +183,66 @@ func (s *CSITestSuite) TestGetCSISnapshotGroupVersion(c *C) {
}
}
func (s *CSITestSuite) TestValidatePVC(c *C) {
ctx := context.Background()
ops := NewArgumentValidator(fake.NewSimpleClientset(), nil)
pvc, err := ops.ValidatePVC(ctx, "pvc", "ns")
c.Check(err, NotNil)
c.Check(pvc, IsNil)
ops = NewArgumentValidator(fake.NewSimpleClientset(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns",
Name: "pvc",
},
}), nil)
pvc, err = ops.ValidatePVC(ctx, "pvc", "ns")
c.Check(err, IsNil)
c.Check(pvc, NotNil)
ops = NewArgumentValidator(nil, nil)
pvc, err = ops.ValidatePVC(ctx, "pvc", "ns")
c.Check(err, NotNil)
c.Check(pvc, IsNil)
}
func (s *CSITestSuite) TestFetchPV(c *C) {
ctx := context.Background()
ops := NewArgumentValidator(fake.NewSimpleClientset(), nil)
pv, err := ops.FetchPV(ctx, "pv")
c.Check(err, NotNil)
c.Check(pv, IsNil)
ops = NewArgumentValidator(fake.NewSimpleClientset(&v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pv",
},
}), nil)
pv, err = ops.FetchPV(ctx, "pv")
c.Check(err, IsNil)
c.Check(pv, NotNil)
ops = NewArgumentValidator(nil, nil)
pv, err = ops.FetchPV(ctx, "pv")
c.Check(err, NotNil)
c.Check(pv, IsNil)
}
func (s *CSITestSuite) TestValidateNamespace(c *C) {
ctx := context.Background()
ops := &validateOperations{
kubeCli: fake.NewSimpleClientset(),
}
ops := NewArgumentValidator(fake.NewSimpleClientset(), nil)
err := ops.ValidateNamespace(ctx, "ns")
c.Check(err, NotNil)
ops = &validateOperations{
kubeCli: fake.NewSimpleClientset(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
}),
}
ops = NewArgumentValidator(fake.NewSimpleClientset(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
}), nil)
err = ops.ValidateNamespace(ctx, "ns")
c.Check(err, IsNil)
ops = &validateOperations{
kubeCli: nil,
}
ops = NewArgumentValidator(nil, nil)
err = ops.ValidateNamespace(ctx, "ns")
c.Check(err, NotNil)
}
@ -427,7 +466,8 @@ func (s *CSITestSuite) TestCreatePVC(c *C) {
pvcChecker: IsNil,
},
} {
creator := &applicationCreate{kubeCli: tc.cli}
appCreator := NewApplicationCreator(tc.cli, 0)
creator := appCreator.(*applicationCreate)
if tc.failCreates {
creator.kubeCli.(*fake.Clientset).Fake.PrependReactor("create", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("Error creating object")
@ -481,6 +521,7 @@ func (s *CSITestSuite) TestCreatePod(c *C) {
Command: []string{"somecommand"},
RunAsUser: 1000,
ContainerImage: "containerimage",
MountPath: "/mnt/fs",
},
errChecker: IsNil,
podChecker: NotNil,
@ -493,19 +534,61 @@ func (s *CSITestSuite) TestCreatePod(c *C) {
PVCName: "pvcname",
Namespace: "ns",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
},
failCreates: true,
errChecker: NotNil,
podChecker: NotNil,
},
{
description: "Pod generate name arg not set",
description: "Neither Name nor GenerateName set",
cli: fake.NewSimpleClientset(),
args: &types.CreatePodArgs{
GenerateName: "",
PVCName: "pvcname",
Namespace: "ns",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
},
errChecker: NotNil,
podChecker: IsNil,
},
{
description: "Both Name and GenerateName set",
cli: fake.NewSimpleClientset(),
args: &types.CreatePodArgs{
GenerateName: "name",
Name: "name",
PVCName: "pvcname",
Namespace: "ns",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
},
errChecker: NotNil,
podChecker: IsNil,
},
{
description: "Neither MountPath nor DevicePath set error",
cli: fake.NewSimpleClientset(),
args: &types.CreatePodArgs{
GenerateName: "name",
PVCName: "",
Namespace: "ns",
Command: []string{"somecommand"},
},
errChecker: NotNil,
podChecker: IsNil,
},
{
description: "Both MountPath and DevicePath set error",
cli: fake.NewSimpleClientset(),
args: &types.CreatePodArgs{
GenerateName: "name",
PVCName: "",
Namespace: "ns",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
DevicePath: "/mnt/dev",
},
errChecker: NotNil,
podChecker: IsNil,
@ -518,6 +601,7 @@ func (s *CSITestSuite) TestCreatePod(c *C) {
PVCName: "",
Namespace: "ns",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
},
errChecker: NotNil,
podChecker: IsNil,
@ -530,18 +614,33 @@ func (s *CSITestSuite) TestCreatePod(c *C) {
PVCName: "pvcname",
Namespace: "",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
},
errChecker: NotNil,
podChecker: IsNil,
},
{
description: "ns namespace pod is created",
description: "ns namespace pod is created (GenerateName/MountPath)",
cli: fake.NewSimpleClientset(),
args: &types.CreatePodArgs{
GenerateName: "name",
PVCName: "pvcname",
Namespace: "ns",
Command: []string{"somecommand"},
MountPath: "/mnt/fs",
},
errChecker: IsNil,
podChecker: NotNil,
},
{
description: "ns namespace pod is created (Name/DevicePath)",
cli: fake.NewSimpleClientset(),
args: &types.CreatePodArgs{
Name: "name",
PVCName: "pvcname",
Namespace: "ns",
Command: []string{"somecommand"},
DevicePath: "/mnt/dev",
},
errChecker: IsNil,
podChecker: NotNil,
@ -567,16 +666,30 @@ func (s *CSITestSuite) TestCreatePod(c *C) {
if pod != nil && err == nil {
_, ok := pod.Labels[createdByLabel]
c.Assert(ok, Equals, true)
c.Assert(pod.GenerateName, Equals, tc.args.GenerateName)
if tc.args.GenerateName != "" {
c.Assert(pod.GenerateName, Equals, tc.args.GenerateName)
c.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.GenerateName)
} else {
c.Assert(pod.Name, Equals, tc.args.Name)
c.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.Name)
}
c.Assert(pod.Namespace, Equals, tc.args.Namespace)
c.Assert(len(pod.Spec.Containers), Equals, 1)
c.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.GenerateName)
c.Assert(pod.Spec.Containers[0].Command, DeepEquals, tc.args.Command)
c.Assert(pod.Spec.Containers[0].Args, DeepEquals, tc.args.ContainerArgs)
c.Assert(pod.Spec.Containers[0].VolumeMounts, DeepEquals, []v1.VolumeMount{{
Name: "persistent-storage",
MountPath: tc.args.MountPath,
}})
if tc.args.MountPath != "" {
c.Assert(pod.Spec.Containers[0].VolumeMounts, DeepEquals, []v1.VolumeMount{{
Name: "persistent-storage",
MountPath: tc.args.MountPath,
}})
c.Assert(pod.Spec.Containers[0].VolumeDevices, IsNil)
} else {
c.Assert(pod.Spec.Containers[0].VolumeDevices, DeepEquals, []v1.VolumeDevice{{
Name: "persistent-storage",
DevicePath: tc.args.DevicePath,
}})
c.Assert(pod.Spec.Containers[0].VolumeMounts, IsNil)
}
c.Assert(pod.Spec.Volumes, DeepEquals, []v1.Volume{{
Name: "persistent-storage",
VolumeSource: v1.VolumeSource{
@ -948,9 +1061,7 @@ func (s *CSITestSuite) TestDeletePVC(c *C) {
errChecker: NotNil,
},
} {
cleaner := &cleanse{
kubeCli: tc.cli,
}
cleaner := NewCleaner(tc.cli, nil)
err := cleaner.DeletePVC(ctx, tc.pvcName, tc.namespace)
c.Check(err, tc.errChecker)
}
@ -1152,9 +1263,7 @@ func (s *CSITestSuite) TestDeleteSnapshot(c *C) {
errChecker: NotNil,
},
} {
cleaner := &cleanse{
dynCli: tc.cli,
}
cleaner := NewCleaner(nil, tc.cli)
err := cleaner.DeleteSnapshot(ctx, tc.snapshotName, tc.namespace, tc.groupVersion)
c.Check(err, tc.errChecker)
}
@ -1247,3 +1356,72 @@ func (s *CSITestSuite) getPVC(ns, pvc string, phase v1.PersistentVolumeClaimPhas
},
}
}
func (s *CSITestSuite) TestWaitForPodReady(c *C) {
ctx := context.Background()
const ns = "ns"
const podName = "pod"
readyPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container-0"},
},
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
}
warningEvent := v1.Event{
Type: v1.EventTypeWarning,
Message: "warning event",
}
for _, tc := range []struct {
description string
cli kubernetes.Interface
eventsList []v1.Event
errChecker Checker
errString string
}{
{
description: "Happy path",
cli: fake.NewSimpleClientset(readyPod),
errChecker: IsNil,
},
{
description: "Not found",
cli: fake.NewSimpleClientset(),
errChecker: NotNil,
errString: "not found",
},
{
description: "Pod events",
cli: fake.NewSimpleClientset(),
errChecker: NotNil,
errString: "had issues creating Pod",
eventsList: []v1.Event{warningEvent},
},
{
description: "No CLI",
errChecker: NotNil,
errString: "kubeCli not initialized",
},
} {
fmt.Println("TestWaitForPodReady:", tc.description)
creator := &applicationCreate{kubeCli: tc.cli}
if len(tc.eventsList) > 0 {
creator.kubeCli.(*fake.Clientset).PrependReactor("list", "events", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, &v1.EventList{Items: tc.eventsList}, nil
})
}
err := creator.WaitForPodReady(ctx, ns, podName)
c.Check(err, tc.errChecker)
if err != nil {
c.Assert(strings.Contains(err.Error(), tc.errString), Equals, true)
}
}
}

View file

@ -12,13 +12,13 @@ import (
)
type CSISnapshotRestoreArgs struct {
StorageClass string
VolumeSnapshotClass string
Namespace string
RunAsUser int64
ContainerImage string
Cleanup bool
SkipCFSCheck bool
StorageClass string
VolumeSnapshotClass string
Namespace string
RunAsUser int64
ContainerImage string
Cleanup bool
SkipCFSCheck bool
K8sObjectReadyTimeout time.Duration
}
@ -38,34 +38,44 @@ type CSISnapshotRestoreResults struct {
}
type CreatePVCArgs struct {
GenerateName string
Name string // Only one of Name or
GenerateName string // GenerateName should be specified.
StorageClass string
Namespace string
DataSource *v1.TypedLocalObjectReference
RestoreSize *resource.Quantity
VolumeMode *v1.PersistentVolumeMode // missing implies v1.PersistentVolumeFilesystem
}
func (c *CreatePVCArgs) Validate() error {
if c.GenerateName == "" || c.StorageClass == "" || c.Namespace == "" {
return fmt.Errorf("Invalid CreatePVCArgs (%v)", c)
if (c.GenerateName == "" && c.Name == "") ||
(c.GenerateName != "" && c.Name != "") ||
c.StorageClass == "" || c.Namespace == "" {
return fmt.Errorf("Invalid CreatePVCArgs (%#v)", c)
}
return nil
}
type CreatePodArgs struct {
GenerateName string
Name string // Only one of Name or
GenerateName string // GenerateName should be specified.
PVCName string
Namespace string
RunAsUser int64
ContainerImage string
Command []string
ContainerArgs []string
MountPath string
MountPath string // Only one of MountPath or
DevicePath string // DevicePath should be specified.
}
func (c *CreatePodArgs) Validate() error {
if c.GenerateName == "" || c.PVCName == "" || c.Namespace == "" {
return fmt.Errorf("Invalid CreatePodArgs (%v)", c)
if (c.GenerateName == "" && c.Name == "") ||
(c.GenerateName != "" && c.Name != "") ||
(c.MountPath == "" && c.DevicePath == "") ||
(c.MountPath != "" && c.DevicePath != "") ||
c.PVCName == "" || c.Namespace == "" {
return fmt.Errorf("Invalid CreatePodArgs (%#v)", c)
}
return nil
}

View file

@ -144,6 +144,9 @@ func (v *Provisioner) Print() {
fmt.Println()
fmt.Println(" To perform a FIO test, run-")
fmt.Println(" ./kubestr fio -s <storage class>")
fmt.Println()
fmt.Println(" To perform a check for block device support, run-")
fmt.Println(" ./kubestr blockmount -s <storage class>")
switch {
case len(v.VolumeSnapshotClasses) == 0 && v.CSIDriver != nil && v.CSIDriver.SupportsSnapshots():
fmt.Println()