1
0
Fork 0
mirror of https://github.com/kastenhq/kubestr.git synced 2024-12-14 11:57:56 +00:00
This commit is contained in:
Sirish Bathina 2021-02-05 15:26:10 -10:00
commit e5ef839a62
54 changed files with 9554 additions and 0 deletions

53
.github/workflows/docker-publish.yml vendored Normal file
View file

@ -0,0 +1,53 @@
name: Docker
on:
push:
# Publish `master` as Docker `latest` image.
branches:
- master
# Publish `v1.2.3` tags as releases.
tags:
- v*
env:
IMAGE_NAME: kubestr
jobs:
push:
runs-on: ubuntu-latest
if: github.event_name == 'push'
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Build image
run: docker build . --file Dockerfile --tag $IMAGE_NAME
- name: Log into GitHub Container Registry
# TODO: Create a PAT with `read:packages` and `write:packages` scopes and save it as an Actions secret `CR_PAT`
run: echo "${{ secrets.CR_PAT }}" | docker login https://ghcr.io -u ${{ github.actor }} --password-stdin
- name: Push image to GitHub Container Registry
run: |
IMAGE_ID=ghcr.io/${{ github.repository_owner }}/$IMAGE_NAME
# Change all uppercase to lowercase
IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]')
# Strip git ref prefix from version
VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
# Strip "v" prefix from tag name
[[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//')
# Use Docker `latest` tag convention
[ "$VERSION" == "master" ] && VERSION=latest
echo IMAGE_ID=$IMAGE_ID
echo VERSION=$VERSION
docker tag $IMAGE_NAME $IMAGE_ID:$VERSION
docker push $IMAGE_ID:$VERSION

29
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.14
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Build
run: go build -v .
- name: Test
run: go test -v ./...

21
.github/workflows/golangci-lint.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: golangci-lint
on:
push:
tags:
- v*
branches:
- master
- main
pull_request:
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.29
args: --timeout=5m

24
.github/workflows/release.yaml vendored Normal file
View file

@ -0,0 +1,24 @@
name: Release
on:
release:
types: [created, edited]
jobs:
releases-matrix:
name: Release Go Binary
runs-on: ubuntu-latest
strategy:
matrix:
# build and publish in parallel: linux/amd64, windows/amd64, darwin/amd64
goos: [linux, windows, darwin]
goarch: [amd64]
steps:
- uses: actions/checkout@v2
- uses: wangyoucao577/go-release-action@v1.11
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
binary_name: "kubestr"
extra_files: LICENSE README.md

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
.gitignore

26
Dockerfile Normal file
View file

@ -0,0 +1,26 @@
FROM golang:alpine3.12 AS builder
ENV GO111MODULE=on \
CGO_ENABLED=0 \
GOOS=linux \
GOARCH=amd64 \
GOBIN=/dist
WORKDIR /app
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN go get -ldflags="-w -s" .
FROM alpine:3.12
RUN apk --no-cache add fio
COPY --from=builder /dist/kubestr /
ENTRYPOINT ["/kubestr"]

7
FIO.md Normal file
View file

@ -0,0 +1,7 @@
layout: page
title: FIO
permalink: /fio/
# FIO
<script id="asciicast-D9EFwlEUVx787hayFapdHljBW" src="https://asciinema.org/a/D9EFwlEUVx787hayFapdHljBW.js" async></script>

201
LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

34
README.md Normal file
View file

@ -0,0 +1,34 @@
# Kubestr
## What is it?
Kubestr is a collection of tools to discover, validate and evaluate your kubernetes storage options.
As adoption of kubernetes grows so have the persistent storage offerings that are available to users. The introduction of [CSI](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/)(Container Storage Interface) has enabled storage providers to develop drivers with ease. In fact there are around a 100 different CSI drivers available today. Along with the existing in-tree providers, these options can make choosing the right storage difficult.
Kubestr can assist in the following ways-
- Identify the various storage options present in a cluster.
- Validate if the storage options are configured correctly.
- Evaluate the storage using common benchmarking tools like FIO.
<script id="asciicast-7iJTbWKwdhPHNWYV00LIgx7gn" src="https://asciinema.org/a/7iJTbWKwdhPHNWYV00LIgx7gn.js" async></script>
## Using Kubestr
### To install the tool -
- Ensure that the kubernetes context is set and the cluster is accessible through your terminal. (Does [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) work?)
- Download the latest release [here](https://github.com/kastenhq/kubestr/releases/latest).
- Unpack the tool and make it an executable `chmod +x kubestr`.
### To discover available storage options -
- Run `./kubestr`
### To run an FIO test -
- Run `./kubestr fio -s <storage class>`
- Additional options like `--size` and `--fiofile` can be specified.
- For more information visit our [fio](https://github.com/kastenhq/kubestr/blob/master/FIO.md) page.
### To check a CSI drivers snapshot and restore capabilities -
- Run `./kubestr csicheck -s <storage class> -v <volume snapshot class>`
## Roadmap
- In the future we plan to allow users to post their FIO results and compare to others.

3
_config.yml Normal file
View file

@ -0,0 +1,3 @@
theme: jekyll-theme-cayman
title: Kubestr
description: Explore your kubernetes storage options

View file

@ -0,0 +1,6 @@
layout: post
title: "Faster Storage"
date: 2021-02-07
categories: fio storage
Some content

227
cmd/rootCmd.go Normal file
View file

@ -0,0 +1,227 @@
// Copyright 2020 Kubestr Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/kastenhq/kubestr/pkg/csi"
csitypes "github.com/kastenhq/kubestr/pkg/csi/types"
"github.com/kastenhq/kubestr/pkg/fio"
"github.com/kastenhq/kubestr/pkg/kubestr"
"github.com/spf13/cobra"
)
var (
output string
rootCmd = &cobra.Command{
Use: "kubestr",
Short: "A tool to validate kubernetes storage",
Long: `kubestr is a tool that will scan your k8s cluster
and validate that the storage systems in place as well as run
performance tests.`,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
Baseline(ctx, output)
},
}
storageClass string
namespace string
containerImage string
fioCheckerSize string
fioCheckerFilePath string
fioCheckerTestName string
fioCmd = &cobra.Command{
Use: "fio",
Short: "Runs an fio test",
Long: `Run an fio test`,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
Fio(ctx, output, storageClass, fioCheckerSize, namespace, fioCheckerTestName, fioCheckerFilePath, containerImage)
},
}
csiCheckVolumeSnapshotClass string
csiCheckRunAsUser int64
csiCheckCleanup bool
csiCheckSkipCFSCheck bool
csiCheckCmd = &cobra.Command{
Use: "csicheck",
Short: "Runs the CSI snapshot restore check",
Long: "Validates a CSI provisioners ability to take a snapshot of an application and restore it",
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
CSICheck(ctx, output, namespace, storageClass, csiCheckVolumeSnapshotClass, csiCheckRunAsUser, containerImage, csiCheckCleanup, csiCheckSkipCFSCheck)
},
}
)
func init() {
rootCmd.PersistentFlags().StringVarP(&output, "output", "o", "", "Options(json)")
rootCmd.AddCommand(fioCmd)
fioCmd.Flags().StringVarP(&storageClass, "storageclass", "s", "", "The name of a Storageclass. (Required)")
_ = fioCmd.MarkFlagRequired("storageclass")
fioCmd.Flags().StringVarP(&fioCheckerSize, "size", "z", fio.DefaultPVCSize, "The size of the volume used to run FIO.")
fioCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run FIO.")
fioCmd.Flags().StringVarP(&fioCheckerFilePath, "fiofile", "f", "", "The path to a an fio config file.")
fioCmd.Flags().StringVarP(&fioCheckerTestName, "testname", "t", "", "The Name of a predefined kubestr fio test. Options(default-fio)")
fioCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.")
rootCmd.AddCommand(csiCheckCmd)
csiCheckCmd.Flags().StringVarP(&storageClass, "storageclass", "s", "", "The name of a Storageclass. (Required)")
_ = csiCheckCmd.MarkFlagRequired("storageclass")
csiCheckCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, "volumesnapshotclass", "v", "", "The name of a VolumeSnapshotClass. (Required)")
_ = csiCheckCmd.MarkFlagRequired("volumesnapshotclass")
csiCheckCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run the check.")
csiCheckCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.")
csiCheckCmd.Flags().BoolVarP(&csiCheckCleanup, "cleanup", "c", true, "Clean up the objects created by tool")
csiCheckCmd.Flags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the CSI check using pods as a user (int)")
csiCheckCmd.Flags().BoolVarP(&csiCheckSkipCFSCheck, "skipCFScheck", "k", false, "Use this flag to skip validating the ability to clone a snapshot.")
}
// Execute executes the main command
func Execute() error {
return rootCmd.Execute()
}
// Baseline executes the baseline check
func Baseline(ctx context.Context, output string) {
p, err := kubestr.NewKubestr()
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Print(kubestr.Logo)
result := p.KubernetesChecks()
if output == "json" {
jsonRes, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(jsonRes))
return
}
for _, retval := range result {
retval.Print()
fmt.Println()
time.Sleep(500 * time.Millisecond)
}
provisionerList, err := p.ValidateProvisioners(ctx)
if err != nil {
fmt.Println(err.Error())
return
}
if output == "json" {
jsonRes, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(jsonRes))
return
}
fmt.Println("Available Storage Provisioners:")
fmt.Println()
time.Sleep(500 * time.Millisecond) // Added to introduce lag.
for _, provisioner := range provisionerList {
provisioner.Print()
fmt.Println()
time.Sleep(500 * time.Millisecond)
}
}
// Fio executes the FIO test.
func Fio(ctx context.Context, output, storageclass, size, namespace, jobName, fioFilePath string, containerImage string) {
cli, err := kubestr.LoadKubeCli()
if err != nil {
fmt.Println(err.Error())
return
}
fioRunner := &fio.FIOrunner{
Cli: cli,
}
testName := "FIO test results"
var result *kubestr.TestOutput
if fioResult, err := fioRunner.RunFio(ctx, &fio.RunFIOArgs{
StorageClass: storageclass,
Size: size,
Namespace: namespace,
FIOJobName: jobName,
FIOJobFilepath: fioFilePath,
Image: containerImage,
}); err != nil {
result = kubestr.MakeTestOutput(testName, kubestr.StatusError, err.Error(), fioResult)
} else {
result = kubestr.MakeTestOutput(testName, kubestr.StatusOK, fmt.Sprintf("\n%s", fioResult.Result.Print()), fioResult)
}
if output == "json" {
jsonRes, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(jsonRes))
return
}
result.Print()
}
func CSICheck(ctx context.Context, output,
namespace string,
storageclass string,
volumesnapshotclass string,
runAsUser int64,
containerImage string,
cleanup bool,
skipCFScheck bool,
) {
testName := "CSI checker test"
kubecli, err := kubestr.LoadKubeCli()
if err != nil {
fmt.Printf("Failed to load kubeCLi (%s)", err.Error())
return
}
dyncli, err := kubestr.LoadDynCli()
if err != nil {
fmt.Printf("Failed to load kubeCLi (%s)", err.Error())
return
}
csiCheckRunner := &csi.SnapshotRestoreRunner{
KubeCli: kubecli,
DynCli: dyncli,
}
var result *kubestr.TestOutput
csiCheckResult, err := csiCheckRunner.RunSnapshotRestore(ctx, &csitypes.CSISnapshotRestoreArgs{
StorageClass: storageclass,
VolumeSnapshotClass: volumesnapshotclass,
Namespace: namespace,
RunAsUser: runAsUser,
ContainerImage: containerImage,
Cleanup: cleanup,
SkipCFSCheck: skipCFScheck,
})
if err != nil {
result = kubestr.MakeTestOutput(testName, kubestr.StatusError, err.Error(), csiCheckResult)
} else {
result = kubestr.MakeTestOutput(testName, kubestr.StatusOK, "CSI application successfully snapshotted and restored.", csiCheckResult)
}
if output == "json" {
jsonRes, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(jsonRes))
return
}
result.Print()
}

7
docs/README.md Normal file
View file

@ -0,0 +1,7 @@
# Kubestr
Kubestr is a tool that qualifies the storage options present in a cluster.
In upcoming releases we plan to suport running an FIO test on the storage as well as testing the snapshotting capabilities of a storage provisioner.
To run the tool -
`curl https://kastenhq.github.io/kubestr/run_kubestr.sh | bash`

1
docs/_config.yml Normal file
View file

@ -0,0 +1 @@
theme: jekyll-theme-hacker

25
docs/kubestr/404.html Normal file
View file

@ -0,0 +1,25 @@
---
permalink: /404.html
layout: default
---
<style type="text/css" media="screen">
.container {
margin: 10px auto;
max-width: 600px;
text-align: center;
}
h1 {
margin: 30px 0;
font-size: 4em;
line-height: 1;
letter-spacing: -1px;
}
</style>
<div class="container">
<h1>404</h1>
<p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p>
</div>

30
docs/kubestr/Gemfile Normal file
View file

@ -0,0 +1,30 @@
source "https://rubygems.org"
# Hello! This is where you manage which Jekyll version is used to run.
# When you want to use a different version, change it below, save the
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
#
# bundle exec jekyll serve
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
gem "jekyll", "~> 4.2.0"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
gem "minima", "~> 2.5"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To upgrade, run `bundle update github-pages`.
# gem "github-pages", group: :jekyll_plugins
# If you have any plugins, put them here!
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.12"
end
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
# and associated library.
platforms :mingw, :x64_mingw, :mswin, :jruby do
gem "tzinfo", "~> 1.2"
gem "tzinfo-data"
end
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin]

80
docs/kubestr/Gemfile.lock Normal file
View file

@ -0,0 +1,80 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.7.0)
public_suffix (>= 2.0.2, < 5.0)
colorator (1.1.0)
concurrent-ruby (1.1.8)
em-websocket (0.5.2)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
eventmachine (1.2.7)
ffi (1.14.2)
forwardable-extended (2.6.0)
http_parser.rb (0.6.0)
i18n (1.8.8)
concurrent-ruby (~> 1.0)
jekyll (4.2.0)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 1.0)
jekyll-sass-converter (~> 2.0)
jekyll-watch (~> 2.0)
kramdown (~> 2.3)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
mercenary (~> 0.4.0)
pathutil (~> 0.9)
rouge (~> 3.0)
safe_yaml (~> 1.0)
terminal-table (~> 2.0)
jekyll-feed (0.15.1)
jekyll (>= 3.7, < 5.0)
jekyll-sass-converter (2.1.0)
sassc (> 2.0.1, < 3.0)
jekyll-seo-tag (2.7.1)
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
kramdown (2.3.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.3)
listen (3.4.1)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.4.0)
minima (2.5.1)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (4.0.6)
rb-fsevent (0.10.4)
rb-inotify (0.10.1)
ffi (~> 1.0)
rexml (3.2.4)
rouge (3.26.0)
safe_yaml (1.0.5)
sassc (2.4.0)
ffi (~> 1.9)
terminal-table (2.0.0)
unicode-display_width (~> 1.1, >= 1.1.1)
unicode-display_width (1.7.0)
PLATFORMS
x86_64-darwin-20
DEPENDENCIES
jekyll (~> 4.2.0)
jekyll-feed (~> 0.12)
minima (~> 2.5)
tzinfo (~> 1.2)
tzinfo-data
wdm (~> 0.1.1)
BUNDLED WITH
2.2.8

55
docs/kubestr/_config.yml Normal file
View file

@ -0,0 +1,55 @@
# Welcome to Jekyll!
#
# This config file is meant for settings that affect your whole blog, values
# which you are expected to set up once and rarely edit after that. If you find
# yourself editing this file very often, consider using Jekyll's data files
# feature for the data you need to update frequently.
#
# For technical reasons, this file is *NOT* reloaded automatically when you use
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
#
# If you need help with YAML syntax, here are some quick references for you:
# https://learn-the-web.algonquindesign.ca/topics/markdown-yaml-cheat-sheet/#yaml
# https://learnxinyminutes.com/docs/yaml/
#
# Site settings
# These are used to personalize your new site. If you look in the HTML files,
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: Your awesome title
email: your-email@example.com
description: >- # this means to ignore newlines until "baseurl:"
Write an awesome description for your new site here. You can edit this
line in _config.yml. It will appear in your document head meta (for
Google search results) and in your feed.xml site description.
baseurl: "" # the subpath of your site, e.g. /blog
url: "" # the base hostname & protocol for your site, e.g. http://example.com
twitter_username: jekyllrb
github_username: jekyll
# Build settings
theme: minima
plugins:
- jekyll-feed
# Exclude from processing.
# The following items will not be processed, by default.
# Any item listed under the `exclude:` key here will be automatically added to
# the internal "default list".
#
# Excluded items can be processed by explicitly listing the directories or
# their entries' file path in the `include:` list.
#
# exclude:
# - .sass-cache/
# - .jekyll-cache/
# - gemfiles/
# - Gemfile
# - Gemfile.lock
# - node_modules/
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/

View file

@ -0,0 +1,29 @@
---
layout: post
title: "Welcome to Jekyll!"
date: 2021-02-05 15:20:32 -1000
categories: jekyll update
---
Youll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated.
Jekyll requires blog post files to be named according to the following format:
`YEAR-MONTH-DAY-title.MARKUP`
Where `YEAR` is a four-digit number, `MONTH` and `DAY` are both two-digit numbers, and `MARKUP` is the file extension representing the format used in the file. After that, include the necessary front matter. Take a look at the source for this post to get an idea about how it works.
Jekyll also offers powerful support for code snippets:
{% highlight ruby %}
def print_hi(name)
puts "Hi, #{name}"
end
print_hi('Tom')
#=> prints 'Hi, Tom' to STDOUT.
{% endhighlight %}
Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekylls GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk].
[jekyll-docs]: https://jekyllrb.com/docs/home
[jekyll-gh]: https://github.com/jekyll/jekyll
[jekyll-talk]: https://talk.jekyllrb.com/

View file

@ -0,0 +1,18 @@
---
layout: page
title: About
permalink: /about/
---
This is the base Jekyll theme. You can find out more info about customizing your Jekyll theme, as well as basic Jekyll usage documentation at [jekyllrb.com](https://jekyllrb.com/)
You can find the source code for Minima at GitHub:
[jekyll][jekyll-organization] /
[minima](https://github.com/jekyll/minima)
You can find the source code for Jekyll at GitHub:
[jekyll][jekyll-organization] /
[jekyll](https://github.com/jekyll/jekyll)
[jekyll-organization]: https://github.com/jekyll

View file

@ -0,0 +1,6 @@
---
# Feel free to add content and custom Front Matter to this file.
# To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
layout: home
---

121
extra/csi-drivers Normal file
View file

@ -0,0 +1,121 @@
# Drivers
The following are a set of CSI driver which can be used with Kubernetes:
> NOTE: If you would like your driver to be added to this table, please open a pull request in [this repo](https://github.com/kubernetes-csi/docs/pulls) updating this file. Other Features is allowed to be filled in Raw Block, Snapshot, Expansion and Cloning. If driver did not implement any Other Features, please leave it blank.
> DISCLAIMER: Information in this table has not been validated by Kubernetes SIG-Storage. Users who want to use these CSI drivers need to contact driver maintainers for driver capabilities.
## Production Drivers
Name | CSI Driver Name | Compatible with CSI Version(s) | Description | Persistence (Beyond Pod Lifetime) | Supported Access Modes | Dynamic Provisioning | Other Features
-----|-----------------|--------------------------------|-------------|-----------------------------------|------------------------|----------------------|--------
[Alicloud Disk](https://github.com/AliyunContainerService/csi-plugin) | `diskplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Disk | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot
[Alicloud NAS](https://github.com/AliyunContainerService/csi-plugin) | `nasplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Network Attached Storage (NAS) | Persistent | Read/Write Multiple Pods | No |
[Alicloud OSS](https://github.com/AliyunContainerService/csi-plugin)| `ossplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Object Storage Service (OSS) | Persistent | Read/Write Multiple Pods | No |
[ArStor CSI](https://github.com/huayun-docs/csi-driver-arstor) | `arstor.csi.huayun.io` | v1.0 | A Container Storage Interface (CSI) Driver for Huayun Storage Service (ArStor) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[AWS Elastic Block Storage](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) | `ebs.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS Elastic Block Storage (EBS) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion
[AWS Elastic File System](https://github.com/aws/aws-efs-csi-driver) | `efs.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS Elastic File System (EFS) | Persistent | Read/Write Multiple Pods | No |
[AWS FSx for Lustre](https://github.com/aws/aws-fsx-csi-driver) | `fsx.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS FSx for Lustre (EBS) | Persistent | Read/Write Multiple Pods | Yes |
[Azure disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver) | `disk.csi.azure.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Azure disk | Persistent | Read/Write Single Pod | Yes |
[Azure file](https://github.com/kubernetes-sigs/azurefile-csi-driver) | `file.csi.azure.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Azure file | Persistent | Read/Write Multiple Pods | Yes |
[Bigtera VirtualStor (block)](https://github.com/bigtera-ce/ceph-csi) | `csi.block.bigtera.com` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for Bigtera VirtualStor block storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion
[Bigtera VirtualStor (filesystem)](https://github.com/bigtera-ce/ceph-csi) | `csi.fs.bigtera.com` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for Bigtera VirtualStor filesystem | Persistent | Read/Write Multiple Pods | Yes | Expansion
[CephFS](https://github.com/ceph/ceph-csi) | `cephfs.csi.ceph.com` | v0.3, v1.0.0, v1.1.0, v1.2.0 | A Container Storage Interface (CSI) Driver for CephFS | Persistent | Read/Write Multiple Pods | Yes | Expansion, Snapshot, Clone
[Ceph RBD](https://github.com/ceph/ceph-csi) | `rbd.csi.ceph.com` | v0.3, v1.0.0, v1.1.0, v1.2.0 | A Container Storage Interface (CSI) Driver for Ceph RBD | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology, Cloning
[ChubaoFS](https://github.com/chubaofs/chubaofs-csi) | `csi.chubaofs.com` | v1.0.0 | A Container Storage Interface (CSI) Driver for ChubaoFS Storage | Persistent | Read/Write Multiple Pods | Yes |
[Cinder](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/cinder) | `cinder.csi.openstack.org` | v0.3, v1.0, v1.1 | A Container Storage Interface (CSI) Driver for OpenStack Cinder | Persistent and Ephemeral | Depends on the storage backend used | Yes, if storage backend supports it | Raw Block, Snapshot, Expansion
[cloudscale.ch](https://github.com/cloudscale-ch/csi-cloudscale) | `csi.cloudscale.ch` | v1.0 | A Container Storage Interface (CSI) Driver for the [cloudscale.ch](https://www.cloudscale.ch/) IaaS platform | Persistent | Read/Write Single Pod | Yes |Snapshot
[Datatom-InfinityCSI](https://github.com/datatom-infinity/infinity-csi) | `csi-infiblock-plugin` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for DATATOM Infinity storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology
[Datatom-InfinityCSI (filesystem)](https://github.com/datatom-infinity/infinity-csi) | `csi-infifs-plugin` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for DATATOM Infinity filesystem storage | Persistent | Read/Write Multiple Pods | Yes | Expansion
[Datera](https://github.com/Datera/datera-csi) | `dsp.csi.daterainc.io` | v1.0 | A Container Storage Interface (CSI) Driver for Datera Data Services Platform (DSP) | Persistent | Read/Write Single Pod | Yes |Snapshot
[DDN EXAScaler](https://github.com/DDNStorage/exa-csi-driver) | `exa.csi.ddn.com` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for DDN EXAScaler filesystems | Persistent | Read/Write Multiple Pods | Yes | Expansion
[Dell EMC PowerScale](https://github.com/dell/csi-powerscale) | `csi-isilon.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerScale](https://www.delltechnologies.com/en-us/storage/powerscale.htm) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning
[Dell EMC PowerMax](https://github.com/dell/csi-powermax) | `csi-powermax.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerMax](https://www.delltechnologies.com/en-us/storage/powermax.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[Dell EMC PowerStore](https://github.com/dell/csi-powerstore) | `csi-powerstore.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerStore](https://www.delltechnologies.com/en-us/storage/powerstore-storage-appliance.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[Dell EMC Unity](https://github.com/dell/csi-unity) | `csi-unity.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC Unity](https://www.delltechnologies.com/en-us/storage/unity.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[Dell EMC VxFlexOS](https://github.com/dell/csi-vxflexos) | `csi-vxflexos.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC VxFlexOS](https://www.delltechnologies.com/en-us/hyperconverged-infrastructure/vxflex.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology
[democratic-csi](https://github.com/democratic-csi/democratic-csi) | `org.democratic-csi.[X]` | v1.0,v1.1,v1.2 | Generic CSI plugin supporting zfs based solutions ([FreeNAS](https://www.freenas.org/) / [TrueNAS](https://www.truenas.com/) and [ZoL](https://zfsonlinux.org/) solutions such as [Ubuntu](https://ubuntu.com/)) | Persistent and Ephemeral | Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume) | Yes | Raw Block, Snapshot, Expansion, Cloning
[Diamanti-CSI](https://diamanti.com/use-cases/io-acceleration/#csi) | `dcx.csi.diamanti.com` | v1.0 | A Container Storage Interface (CSI) Driver for Diamanti DCX Platform | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion
[DigitalOcean Block Storage](https://github.com/digitalocean/csi-digitalocean) | `dobs.csi.digitalocean.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for DigitalOcean Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion
[DriveScale](https://github.com/DriveScale/k8s-plugins) | `csi.drivescale.com` | v1.0 |A Container Storage Interface (CSI) Driver for DriveScale software composable infrastructure solution | Persistent | Read/Write Single Pod | Yes |
[Ember CSI](https://ember-csi.io) | `[x].ember-csi.io` | v0.2, v0.3, v1.0 | Multi-vendor CSI plugin supporting over 80 Drivers to provide block and mount storage to Container Orchestration systems. | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot
[Excelero NVMesh](https://github.com/Excelero/nvmesh-csi-driver) | `nvmesh-csi.excelero.com` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for Excelero NVMesh | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Expansion
[GCE Persistent Disk](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) | `pd.csi.storage.gke.io` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Google Compute Engine Persistent Disk (GCE PD) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology
[Google Cloud Filestore](https://github.com/kubernetes-sigs/gcp-filestore-csi-driver) | `com.google.csi.filestore` | v0.3 | A Container Storage Interface (CSI) Driver for Google Cloud Filestore | Persistent | Read/Write Multiple Pods | Yes |
[Google Cloud Storage](https://github.com/ofek/csi-gcs) | `gcs.csi.ofek.dev` | v1.0 | A Container Storage Interface (CSI) Driver for Google Cloud Storage | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Expansion
[GlusterFS](https://github.com/gluster/gluster-csi-driver) | `org.gluster.glusterfs` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for GlusterFS | Persistent | Read/Write Multiple Pods | Yes | Snapshot
[Gluster VirtBlock](https://github.com/gluster/gluster-csi-driver) | `org.gluster.glustervirtblock` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Gluster Virtual Block volumes | Persistent | Read/Write Single Pod | Yes |
[Hammerspace CSI](https://github.com/hammer-space/csi-plugin) | `com.hammerspace.csi` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Hammerspace Storage | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot
[Hedvig](https://documentation.commvault.com/commvault/hedvig/others/pdf/Hedvig_CSI_User_Guide.pdf) | `io.hedvig.csi` | v1.0 | A Container Storage Interface (CSI) Driver for Hedvig | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion
[Hetzner Cloud Volumes CSI](https://github.com/hetznercloud/csi-driver) | `csi.hetzner.cloud` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Hetzner Cloud Volumes | Persistent | Read/Write Single Pod | Yes | Raw Block, Expansion
[Hitachi Vantara](https://knowledge.hitachivantara.com/Documents/Adapters_and_Drivers/Storage_Adapters_and_Drivers/Containers) | `hspc.csi.hitachi.com` | v1.2 | A Container Storage Interface (CSI) Driver for VSP series Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[HPE](https://github.com/hpe-storage/csi-driver) | `csi.hpe.com` | v1.0, v1.1, v1.2 | A [multi-platform](https://scod.hpedev.io/csi_driver) Container Storage Interface (CSI) driver. Supports [HPE Nimble Storage](https://hpe.com/storage/nimble), [HPE Primera](https://hpe.com/storage/primera) and [HPE 3PAR](https://hpe.com/storage/3par) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning
[Huawei Storage CSI](https://github.com/Huawei/eSDK_K8S_Plugin) | `csi.huawei.com` | v1.0 | A Container Storage Interface (CSI) Driver for FusionStorage, OceanStor 100D, OceanStor Dorado V3, OceanStor Dorado V6, OceanStor V3, OceanStor V5 | Persistent | Read/Write Multiple Pod | Yes | Snapshot, Expansion, Cloning
[HyperV CSI](https://github.com/Zetanova/hyperv-csi-driver) | `eu.zetanova.csi.hyperv` | v1.0, v1.1 | A Container Storage Interface (CSI) driver to manage hyperv hosts | Persistent | Read/Write Multiple Pods | Yes |
[IBM Block Storage](https://github.com/ibm/ibm-block-csi-driver) | `block.csi.ibm.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) [Driver](https://www.ibm.com/support/knowledgecenter/SSRQ8T) for IBM Spectrum Virtualize Family, IBM FlashSystem A9000 and A9000R, IBM DS8880 and DS8900. | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[IBM Spectrum Scale](https://github.com/IBM/ibm-spectrum-scale-csi) | `spectrumscale.csi.ibm.com` | v1.0, v1.1 | A Container Storage Interface (CSI) [Driver](https://www.ibm.com/support/knowledgecenter/STXKQY_CSI_SHR) for the IBM Spectrum Scale File System | Persistent | Read/Write Multiple Pod | Yes |
[IBM Cloud Block Storage VPC CSI Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) | `vpc.block.csi.ibm.io` | v1.0 | A Container Storage Interface (CSI) [Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) for IBM Cloud Kubernetes Service and Red Hat OpenShift on IBM Cloud | Persistent | Read/Write Single Pod | Yes | Raw Block |
[Infinidat](https://github.com/Infinidat/infinibox-csi-driver) | `infinibox-csi-driver` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for Infinidat [InfiniBox](https://infinidat.com/en/products-technology/infinibox) | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning
[Inspur InStorage CSI](https://github.com/OpenInspur/instorage-k8s) | `csi-instorage` | v1.0 | A Container Storage Interface (CSI) Driver for inspur AS/HF/CS/CF Series Primary Storage, inspur AS13000 Series SDS Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[Intel PMEM-CSI](https://github.com/intel/pmem-csi) | `pmem-csi.intel.com` | v1.0 | A Container Storage Interface (CSI) driver for [PMEM](https://pmem.io/) from Intel | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block
[JuiceFS](https://github.com/juicedata/juicefs-csi-driver) | `csi.juicefs.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for JuiceFS File System | Persistent | Read/Write Multiple Pod | Yes |
[kaDalu](https://github.com/kadalu/kadalu) | `org.kadalu.gluster` | v0.3 | A CSI Driver (and operator) for GlusterFS | Persistent | Read/Write Multiple Pods | Yes |
[KumoScale Block Storage](https://github.com/KioxiaAmerica/kumoscale-csi) | `kumoscale.kioxia.com` | v1.0 | A Container Storage Interface (CSI) Driver for KumoScale Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology
[Linode Block Storage](https://github.com/linode/linode-blockstorage-csi-driver) | `linodebs.csi.linode.com` | v1.0 | A Container Storage Interface (CSI) Driver for Linode Block Storage | Persistent | Read/Write Single Pod | Yes |
[LINSTOR](https://github.com/LINBIT/linstor-csi) | `io.drbd.linstor-csi` | v1.1 | A Container Storage Interface (CSI) Driver for [LINSTOR](https://www.linbit.com/en/linstor/) volumes | Persistent | Read/Write Single Pod | Yes | Snapshot
[Longhorn](https://github.com/longhorn/longhorn) | `driver.longhorn.io` | v1.1 | A Container Storage Interface (CSI) Driver for [Longhorn](https://longhorn.io/) volumes | Persistent | Read/Write Single Node | Yes | Raw Block
[MacroSAN](https://github.com/macrosan-csi/macrosan-csi-driver) | `csi-macrosan` | v1.0 | A Container Storage Interface (CSI) Driver for MacroSAN Block Storage | Persistent | Read/Write Single Pod | Yes |
[Manila](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/manila) | `manila.csi.openstack.org` | v1.1, v1.2 | A Container Storage Interface (CSI) Driver for OpenStack Shared File System Service (Manila) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Topology
[MapR](https://github.com/mapr/mapr-csi) | `com.mapr.csi-kdf` | v1.0 | A Container Storage Interface (CSI) Driver for MapR Data Platform | Persistent | Read/Write Multiple Pods | Yes | Snapshot
[MooseFS](https://github.com/moosefs/moosefs-csi) | `com.tuxera.csi.moosefs` | v1.0 | A Container Storage Interface (CSI) Driver for [MooseFS](https://moosefs.com/) clusters. | Persistent | Read/Write Multiple Pods | Yes |
[NetApp](https://github.com/NetApp/trident) | `csi.trident.netapp.io` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NetApp's [Trident](https://netapp-trident.readthedocs.io/) container storage orchestrator | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology
[NexentaStor File Storage](https://github.com/Nexenta/nexentastor-csi-driver) | `nexentastor-csi-driver.nexenta.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NexentaStor File Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology
[NexentaStor Block Storage](https://github.com/Nexenta/nexentastor-csi-driver-block) | `nexentastor-block-csi-driver.nexenta.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NexentaStor over iSCSI protocol | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology, Raw block
[Nutanix](https://github.com/nutanix/csi-plugin) | `com.nutanix.csi` | v0.3, v1.0, v1.2 | A Container Storage Interface (CSI) Driver for Nutanix | Persistent | "Read/Write Single Pod" with Nutanix Volumes and "Read/Write Multiple Pods" with Nutanix Files | Yes | Raw Block, Snapshot, Expansion, Cloning
[OpenEBS](https://github.com/openebs/csi)| `cstor.csi.openebs.io` | v1.0 | A Container Storage Interface (CSI) Driver for [OpenEBS](https://www.openebs.io/)| Persistent | Read/Write Single Pod | Yes | Expansion, Snapshot, Cloning
[OpenSDS](https://github.com/opensds/nbp/tree/master/csi) | `csi-opensdsplugin` | v1.0 | A Container Storage Interface (CSI) Driver for [OpenSDS]((https://www.opensds.io/)) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot
[Open-E](https://github.com/open-e/JovianDSS-KubernetesCSI) | `com.open-e.joviandss.csi` | v1.0 | A Container Storage Interface (CSI) Driver for Open-E JovianDSS Storage | Persistent | Read/Write Single Pod | Yes | Snapshot, Cloning
[oVirt](https://github.com/openshift/ovirt-csi-driver) | `csi.ovirt.org` | v1.0 | A Container Storage Interface (CSI) Driver for [oVirt](https://ovirt.org) | Persistent | Read/Write Single Pod | Yes | Block, File Storage
[Portworx](https://github.com/libopenstorage/openstorage/tree/master/csi) | `pxd.openstorage.org` | v0.3, v1.1 | A Container Storage Interface (CSI) Driver for [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion
[Pure Storage CSI](https://github.com/purestorage/pso-csi)| `pure-csi` | v1.0, v1.1, v1.2, v1.3 | A Container Storage Interface (CSI) Driver for Pure Storage's [Pure Service Orchestrator](https://purestorage.com/containers) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Cloning, Raw Block, Topology, Expansion
[QingCloud CSI](https://github.com/yunify/qingcloud-csi)| `disk.csi.qingcloud.com` | v1.1 | A Container Storage Interface (CSI) Driver for QingCloud Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[QingStor CSI](https://github.com/yunify/qingstor-csi) | `neonsan.csi.qingstor.com` | v0.3, v1.1 | A Container Storage Interface (CSI) Driver for NeonSAN storage system | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning
[Quobyte](https://github.com/quobyte/quobyte-csi) | `quobyte-csi` | v0.2 | A Container Storage Interface (CSI) Driver for Quobyte | Persistent | Read/Write Multiple Pods | Yes |
[ROBIN](https://get.robin.io/) | `robin` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [ROBIN](https://docs.robin.io) | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning
[SandStone](https://github.com/sandstone-storage/sandstone-csi-driver) | `csi-sandstone-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for SandStone USP | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning
[Sangfor-EDS-File-Storage](https://github.com/evan37717/sangfor-eds-csi) | `eds.csi.file.sangfor.com` | v1.0 | A Container Storage Interface (CSI) Driver for Sangfor Distributed File Storage(EDS) | Persistent | Read/Write Multiple Pods | Yes |
[Sangfor-EDS-Block-Storage](https://github.com/eds-wzc/sangfor-eds-csi) | `eds.csi.block.sangfor.com` | v1.0 | A Container Storage Interface (CSI) Driver for Sangfor Block Storage(EDS) | Persistent | Read/Write Single Pod | Yes |
[SeaweedFS](https://github.com/seaweedfs/seaweedfs-csi-driver) | `seaweedfs-csi-driver` | v1.0 | A Container Storage Interface (CSI Driver for [SeaweedFS](https://github.com/chrislusf/seaweedfs)) | Persistent | Read/Write Multiple Pods | Yes |
[Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver) | `secrets-store.csi.k8s.io` | v0.0.10 | A Container Storage Interface (CSI) Driver for mounting secrets, keys, and certs stored in enterprise-grade external secrets stores as volumes. | Ephemeral | N/A | N/A |
[SmartX](http://www.smartx.com/?locale=en) | `csi-smtx-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for SmartX ZBS Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion
[SPDK-CSI](https://github.com/spdk/spdk-csi) | `csi.spdk.io` | v1.1 | A Container Storage Interface (CSI) Driver for [SPDK](https://spdk.io/) | Persistent and Ephemeral | Read/Write Single Pod | Yes |
[StorageOS](https://docs.storageos.com/docs/platforms/kubernetes/install/) | `storageos` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [StorageOS](https://storageos.com/) | Persistent | Read/Write Multiple Pods | Yes |
[Storidge](https://docs.storidge.com/kubernetes_storage/overview.html) | `csi.cio.storidge.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [Storidge CIO](https://storidge.com/) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion
[StorPool](https://kb.storpool.com/storpool_integrations/github/kubernetes.html) | `csi-driver.storpool.com` | v1.0 | A Container Storage Interface (CSI) Driver for [StorPool](https://storpool.com/) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Expansion
[Tencent Cloud Block Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cbs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud Block Storage | Persistent | Read/Write Single Pod | Yes | Snapshot
[Tencent Cloud File Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cfs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud File Storage | Persistent | Read/Write Multiple Pods | Yes |
[Tencent Cloud Object Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cosfs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud Object Storage | Persistent | Read/Write Multiple Pods | No |
[TopoLVM](https://github.com/cybozu-go/topolvm)| `topolvm.cybozu.com` | v1.1 | A Container Storage Interface (CSI) Driver for LVM | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Expansion, Topology Aware
[VAST Data](https://github.com/vast-data/vast-csi) | `csi.vastdata.com` | v1.0 | A Container Storage Interface (CSI) Driver for VAST Data | Persistent | Read/Write Multiple Pods | Yes |
[XSKY-EBS](https://xsky-storage.github.io/xsky-csi-driver/csi-block.html) | `csi.block.xsky.com` | v1.0 | A Container Storage Interface (CSI) Driver for XSKY Distributed Block Storage (X-EBS) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning
[XSKY-EUS](https://xsky-storage.github.io/xsky-csi-driver/csi-fs.html) | `csi.fs.xsky.com` | v1.0 | A Container Storage Interface (CSI) Driver for XSKY Distributed File Storage (X-EUS) | Persistent | Read/Write Multiple Pods | Yes |
[Vault](https://github.com/kubevault/csi-driver) | `secrets.csi.kubevault.com` | v1.0 | A Container Storage Interface (CSI) Driver for mounting HashiCorp Vault secrets as volumes. | Ephemeral | N/A | N/A |
[Veritas InfoScale Volumes](https://www.veritas.com/solution/virtualization/containers.html) | `org.veritas.infoscale` | v1.2 | A Container Storage Interface (CSI) Driver for Veritas InfoScale volumes | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning
[vSphere](https://github.com/kubernetes-sigs/vsphere-csi-driver) | `csi.vsphere.vmware.com` | v1.0 | A Container Storage Interface (CSI) Driver for VMware vSphere | Persistent | Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume) | Yes | Raw Block,<br/><br/>Expansion (Block Volume),<br/><br/>Topology Aware (Block Volume)
[Vultr Block Storage](https://github.com/vultr/vultr-csi) | `block.csi.vultr.com` | v1.2 | A Container Storage Interface (CSI) Driver for Vultr Block Storage | Persistent | Read/Write Single Pod | Yes |
[WekaIO](https://github.com/weka/csi-wekafs) | `csi.weka.io` | v1.0 | A Container Storage Interface (CSI) Driver for mounting WekaIO WekaFS filesystem as volumes | Persistent | Read/Write Multiple Pods | Yes |
[Yandex.Cloud](https://github.com/flant/yandex-csi-driver) | `yandex.csi.flant.com` | v1.2 | A Container Storage Interface (CSI) plugin for Yandex.Cloud Compute Disks | Persistent | Read/Write Single Pod | Yes |
[YanRongYun](http://www.yanrongyun.com/) | ? | v1.0 | A Container Storage Interface (CSI) Driver for YanRong YRCloudFile Storage | Persistent | Read/Write Multiple Pods | Yes |
[Zadara-CSI](https://github.com/zadarastorage/zadara-csi) | `csi.zadara.com` | v1.0, v1.1 | A Container Storage Interface (CSI) plugin for Zadara VPSA Storage Array & VPSA All-Flash | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning
## Sample Drivers
Name | Status | More Information
-----|--------|-------
[Flexvolume](https://github.com/kubernetes-csi/csi-driver-flex) | Sample |
[HostPath](https://github.com/kubernetes-csi/csi-driver-host-path) | v1.2.0 | Only use for a single node tests. See the [Example](example.html) page for Kubernetes-specific instructions.
[ImagePopulator](https://github.com/kubernetes-csi/csi-driver-image-populator) | Prototype | Driver that lets you use a container image as an ephemeral volume.
[In-memory Sample Mock Driver](https://github.com/kubernetes-csi/csi-test/tree/master/mock/service) | v0.3.0 | The sample mock driver used for [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity)
[NFS](https://github.com/kubernetes-csi/csi-driver-nfs) | Sample |
[Synology NAS](https://github.com/jparklab/synology-csi) | v1.0.0 | An unofficial (and unsupported) Container Storage Interface Driver for Synology NAS.
[VFS Driver](https://github.com/thecodeteam/csi-vfs) | Released | A CSI plugin that provides a virtual file system.

35
go.mod Normal file
View file

@ -0,0 +1,35 @@
module github.com/kastenhq/kubestr
go 1.14
replace github.com/graymeta/stow => github.com/kastenhq/stow v0.1.2-kasten
require (
github.com/Masterminds/semver v1.5.0 // indirect
github.com/aws/aws-sdk-go v1.31.6 // indirect
github.com/briandowns/spinner v1.12.0
github.com/golang/mock v1.4.4
github.com/google/go-cmp v0.4.1 // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/imdario/mergo v0.3.9 // indirect
github.com/jarcoal/httpmock v1.0.5 // indirect
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 // indirect
github.com/kanisterio/kanister v0.0.0-20210112095936-bf04d6102c72
github.com/kr/text v0.2.0 // indirect
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0
github.com/mitchellh/mapstructure v1.3.1 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/onsi/ginkgo v1.12.0 // indirect
github.com/onsi/gomega v1.9.0 // indirect
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/spf13/cobra v1.0.0
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d // indirect
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
gopkg.in/yaml.v2 v2.3.0 // indirect
honnef.co/go/tools v0.0.1-2020.1.5 // indirect
k8s.io/api v0.19.5
k8s.io/apimachinery v0.19.5
k8s.io/client-go v0.19.5
)

1223
go.sum Normal file

File diff suppressed because it is too large Load diff

30
main.go Normal file
View file

@ -0,0 +1,30 @@
// Copyright 2020 Kubestr Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
//go:generate ./scripts/load_csi_provisioners.sh
import (
"github.com/kastenhq/kubestr/cmd"
)
func main() {
_ = Execute()
}
// Execute executes the main command
func Execute() error {
return cmd.Execute()
}

24
pkg/common/common.go Normal file
View file

@ -0,0 +1,24 @@
package common
const (
// VolSnapClassAlphaDriverKey describes alpha driver key
VolSnapClassAlphaDriverKey = "snapshotter"
// VolSnapClassBetaDriverKey describes beta driver key
VolSnapClassBetaDriverKey = "driver"
// VolSnapClassStableDriverKey describes the stable driver key
VolSnapClassStableDriverKey = "driver"
// DefaultPodImage the default pod image
DefaultPodImage = "ghcr.io/kastenhq/kubestr:latest"
// SnapGroupName describes the snapshot group name
SnapGroupName = "snapshot.storage.k8s.io"
// VolumeSnapshotClassResourcePlural describes volume snapshot classses
VolumeSnapshotClassResourcePlural = "volumesnapshotclasses"
// VolumeSnapshotResourcePlural is "volumesnapshots"
VolumeSnapshotResourcePlural = "volumesnapshots"
// SnapshotAlphaVersion is the apiversion of the alpha relase
SnapshotAlphaVersion = "snapshot.storage.k8s.io/v1alpha1"
// SnapshotBetaVersion is the apiversion of the beta relase
SnapshotBetaVersion = "snapshot.storage.k8s.io/v1beta1"
// SnapshotStableVersion is the apiversion of the stable release
SnapshotStableVersion = "snapshot.storage.k8s.io/v1"
)

11
pkg/csi/csi.go Normal file
View file

@ -0,0 +1,11 @@
package csi
import (
"context"
"github.com/kastenhq/kubestr/pkg/csi/types"
)
type CSI interface {
RunSnapshotRestore(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error)
}

View file

@ -0,0 +1,49 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ApiVersionFetcher)
// Package mocks is a generated GoMock package.
package mocks
import (
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
reflect "reflect"
)
// MockApiVersionFetcher is a mock of ApiVersionFetcher interface
type MockApiVersionFetcher struct {
ctrl *gomock.Controller
recorder *MockApiVersionFetcherMockRecorder
}
// MockApiVersionFetcherMockRecorder is the mock recorder for MockApiVersionFetcher
type MockApiVersionFetcherMockRecorder struct {
mock *MockApiVersionFetcher
}
// NewMockApiVersionFetcher creates a new mock instance
func NewMockApiVersionFetcher(ctrl *gomock.Controller) *MockApiVersionFetcher {
mock := &MockApiVersionFetcher{ctrl: ctrl}
mock.recorder = &MockApiVersionFetcherMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockApiVersionFetcher) EXPECT() *MockApiVersionFetcherMockRecorder {
return m.recorder
}
// GetCSISnapshotGroupVersion mocks base method
func (m *MockApiVersionFetcher) GetCSISnapshotGroupVersion() (*v1.GroupVersionForDiscovery, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCSISnapshotGroupVersion")
ret0, _ := ret[0].(*v1.GroupVersionForDiscovery)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCSISnapshotGroupVersion indicates an expected call of GetCSISnapshotGroupVersion
func (mr *MockApiVersionFetcherMockRecorder) GetCSISnapshotGroupVersion() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCSISnapshotGroupVersion", reflect.TypeOf((*MockApiVersionFetcher)(nil).GetCSISnapshotGroupVersion))
}

View file

@ -0,0 +1,80 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ApplicationCreator)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
types "github.com/kastenhq/kubestr/pkg/csi/types"
v1 "k8s.io/api/core/v1"
reflect "reflect"
)
// MockApplicationCreator is a mock of ApplicationCreator interface
type MockApplicationCreator struct {
ctrl *gomock.Controller
recorder *MockApplicationCreatorMockRecorder
}
// MockApplicationCreatorMockRecorder is the mock recorder for MockApplicationCreator
type MockApplicationCreatorMockRecorder struct {
mock *MockApplicationCreator
}
// NewMockApplicationCreator creates a new mock instance
func NewMockApplicationCreator(ctrl *gomock.Controller) *MockApplicationCreator {
mock := &MockApplicationCreator{ctrl: ctrl}
mock.recorder = &MockApplicationCreatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockApplicationCreator) EXPECT() *MockApplicationCreatorMockRecorder {
return m.recorder
}
// CreatePVC mocks base method
func (m *MockApplicationCreator) CreatePVC(arg0 context.Context, arg1 *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreatePVC", arg0, arg1)
ret0, _ := ret[0].(*v1.PersistentVolumeClaim)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreatePVC indicates an expected call of CreatePVC
func (mr *MockApplicationCreatorMockRecorder) CreatePVC(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePVC", reflect.TypeOf((*MockApplicationCreator)(nil).CreatePVC), arg0, arg1)
}
// CreatePod mocks base method
func (m *MockApplicationCreator) CreatePod(arg0 context.Context, arg1 *types.CreatePodArgs) (*v1.Pod, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreatePod", arg0, arg1)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreatePod indicates an expected call of CreatePod
func (mr *MockApplicationCreatorMockRecorder) CreatePod(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePod", reflect.TypeOf((*MockApplicationCreator)(nil).CreatePod), arg0, arg1)
}
// WaitForPodReady mocks base method
func (m *MockApplicationCreator) WaitForPodReady(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForPodReady", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForPodReady indicates an expected call of WaitForPodReady
func (mr *MockApplicationCreatorMockRecorder) WaitForPodReady(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPodReady", reflect.TypeOf((*MockApplicationCreator)(nil).WaitForPodReady), arg0, arg1, arg2)
}

View file

@ -0,0 +1,81 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ArgumentValidator)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/api/storage/v1"
v10 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
reflect "reflect"
)
// MockArgumentValidator is a mock of ArgumentValidator interface
type MockArgumentValidator struct {
ctrl *gomock.Controller
recorder *MockArgumentValidatorMockRecorder
}
// MockArgumentValidatorMockRecorder is the mock recorder for MockArgumentValidator
type MockArgumentValidatorMockRecorder struct {
mock *MockArgumentValidator
}
// NewMockArgumentValidator creates a new mock instance
func NewMockArgumentValidator(ctrl *gomock.Controller) *MockArgumentValidator {
mock := &MockArgumentValidator{ctrl: ctrl}
mock.recorder = &MockArgumentValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockArgumentValidator) EXPECT() *MockArgumentValidatorMockRecorder {
return m.recorder
}
// ValidateNamespace mocks base method
func (m *MockArgumentValidator) ValidateNamespace(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateNamespace", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateNamespace indicates an expected call of ValidateNamespace
func (mr *MockArgumentValidatorMockRecorder) ValidateNamespace(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNamespace", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateNamespace), arg0, arg1)
}
// ValidateStorageClass mocks base method
func (m *MockArgumentValidator) ValidateStorageClass(arg0 context.Context, arg1 string) (*v1.StorageClass, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateStorageClass", arg0, arg1)
ret0, _ := ret[0].(*v1.StorageClass)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ValidateStorageClass indicates an expected call of ValidateStorageClass
func (mr *MockArgumentValidatorMockRecorder) ValidateStorageClass(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateStorageClass", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateStorageClass), arg0, arg1)
}
// ValidateVolumeSnapshotClass mocks base method
func (m *MockArgumentValidator) ValidateVolumeSnapshotClass(arg0 context.Context, arg1 string, arg2 *v10.GroupVersionForDiscovery) (*unstructured.Unstructured, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateVolumeSnapshotClass", arg0, arg1, arg2)
ret0, _ := ret[0].(*unstructured.Unstructured)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ValidateVolumeSnapshotClass indicates an expected call of ValidateVolumeSnapshotClass
func (mr *MockArgumentValidatorMockRecorder) ValidateVolumeSnapshotClass(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeSnapshotClass", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateVolumeSnapshotClass), arg0, arg1, arg2)
}

View file

@ -0,0 +1,77 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: Cleaner)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
reflect "reflect"
)
// MockCleaner is a mock of Cleaner interface
type MockCleaner struct {
ctrl *gomock.Controller
recorder *MockCleanerMockRecorder
}
// MockCleanerMockRecorder is the mock recorder for MockCleaner
type MockCleanerMockRecorder struct {
mock *MockCleaner
}
// NewMockCleaner creates a new mock instance
func NewMockCleaner(ctrl *gomock.Controller) *MockCleaner {
mock := &MockCleaner{ctrl: ctrl}
mock.recorder = &MockCleanerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockCleaner) EXPECT() *MockCleanerMockRecorder {
return m.recorder
}
// DeletePVC mocks base method
func (m *MockCleaner) DeletePVC(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeletePVC", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeletePVC indicates an expected call of DeletePVC
func (mr *MockCleanerMockRecorder) DeletePVC(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePVC", reflect.TypeOf((*MockCleaner)(nil).DeletePVC), arg0, arg1, arg2)
}
// DeletePod mocks base method
func (m *MockCleaner) DeletePod(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeletePod", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeletePod indicates an expected call of DeletePod
func (mr *MockCleanerMockRecorder) DeletePod(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePod", reflect.TypeOf((*MockCleaner)(nil).DeletePod), arg0, arg1, arg2)
}
// DeleteSnapshot mocks base method
func (m *MockCleaner) DeleteSnapshot(arg0 context.Context, arg1, arg2 string, arg3 *v1.GroupVersionForDiscovery) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSnapshot indicates an expected call of DeleteSnapshot
func (mr *MockCleanerMockRecorder) DeleteSnapshot(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockCleaner)(nil).DeleteSnapshot), arg0, arg1, arg2, arg3)
}

View file

@ -0,0 +1,48 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: DataValidator)
// Package mocks is a generated GoMock package.
package mocks
import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockDataValidator is a mock of DataValidator interface
type MockDataValidator struct {
ctrl *gomock.Controller
recorder *MockDataValidatorMockRecorder
}
// MockDataValidatorMockRecorder is the mock recorder for MockDataValidator
type MockDataValidatorMockRecorder struct {
mock *MockDataValidator
}
// NewMockDataValidator creates a new mock instance
func NewMockDataValidator(ctrl *gomock.Controller) *MockDataValidator {
mock := &MockDataValidator{ctrl: ctrl}
mock.recorder = &MockDataValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDataValidator) EXPECT() *MockDataValidatorMockRecorder {
return m.recorder
}
// FetchPodData mocks base method
func (m *MockDataValidator) FetchPodData(arg0, arg1 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchPodData", arg0, arg1)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FetchPodData indicates an expected call of FetchPodData
func (mr *MockDataValidatorMockRecorder) FetchPodData(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchPodData", reflect.TypeOf((*MockDataValidator)(nil).FetchPodData), arg0, arg1)
}

View file

@ -0,0 +1,82 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotCreator)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
snapshot "github.com/kanisterio/kanister/pkg/kube/snapshot"
types "github.com/kastenhq/kubestr/pkg/csi/types"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
reflect "reflect"
)
// MockSnapshotCreator is a mock of SnapshotCreator interface
type MockSnapshotCreator struct {
ctrl *gomock.Controller
recorder *MockSnapshotCreatorMockRecorder
}
// MockSnapshotCreatorMockRecorder is the mock recorder for MockSnapshotCreator
type MockSnapshotCreatorMockRecorder struct {
mock *MockSnapshotCreator
}
// NewMockSnapshotCreator creates a new mock instance
func NewMockSnapshotCreator(ctrl *gomock.Controller) *MockSnapshotCreator {
mock := &MockSnapshotCreator{ctrl: ctrl}
mock.recorder = &MockSnapshotCreatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockSnapshotCreator) EXPECT() *MockSnapshotCreatorMockRecorder {
return m.recorder
}
// CreateFromSourceCheck mocks base method
func (m *MockSnapshotCreator) CreateFromSourceCheck(arg0 context.Context, arg1 snapshot.Snapshotter, arg2 *types.CreateFromSourceCheckArgs, arg3 *v1.GroupVersionForDiscovery) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateFromSourceCheck", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// CreateFromSourceCheck indicates an expected call of CreateFromSourceCheck
func (mr *MockSnapshotCreatorMockRecorder) CreateFromSourceCheck(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFromSourceCheck", reflect.TypeOf((*MockSnapshotCreator)(nil).CreateFromSourceCheck), arg0, arg1, arg2, arg3)
}
// CreateSnapshot mocks base method
func (m *MockSnapshotCreator) CreateSnapshot(arg0 context.Context, arg1 snapshot.Snapshotter, arg2 *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1, arg2)
ret0, _ := ret[0].(*snapv1.VolumeSnapshot)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateSnapshot indicates an expected call of CreateSnapshot
func (mr *MockSnapshotCreatorMockRecorder) CreateSnapshot(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockSnapshotCreator)(nil).CreateSnapshot), arg0, arg1, arg2)
}
// NewSnapshotter mocks base method
func (m *MockSnapshotCreator) NewSnapshotter() (snapshot.Snapshotter, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewSnapshotter")
ret0, _ := ret[0].(snapshot.Snapshotter)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NewSnapshotter indicates an expected call of NewSnapshotter
func (mr *MockSnapshotCreatorMockRecorder) NewSnapshotter() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSnapshotter", reflect.TypeOf((*MockSnapshotCreator)(nil).NewSnapshotter))
}

View file

@ -0,0 +1,124 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotRestoreStepper)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
types "github.com/kastenhq/kubestr/pkg/csi/types"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
reflect "reflect"
)
// MockSnapshotRestoreStepper is a mock of SnapshotRestoreStepper interface
type MockSnapshotRestoreStepper struct {
ctrl *gomock.Controller
recorder *MockSnapshotRestoreStepperMockRecorder
}
// MockSnapshotRestoreStepperMockRecorder is the mock recorder for MockSnapshotRestoreStepper
type MockSnapshotRestoreStepperMockRecorder struct {
mock *MockSnapshotRestoreStepper
}
// NewMockSnapshotRestoreStepper creates a new mock instance
func NewMockSnapshotRestoreStepper(ctrl *gomock.Controller) *MockSnapshotRestoreStepper {
mock := &MockSnapshotRestoreStepper{ctrl: ctrl}
mock.recorder = &MockSnapshotRestoreStepperMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockSnapshotRestoreStepper) EXPECT() *MockSnapshotRestoreStepperMockRecorder {
return m.recorder
}
// Cleanup mocks base method
func (m *MockSnapshotRestoreStepper) Cleanup(arg0 context.Context, arg1 *types.CSISnapshotRestoreResults) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Cleanup", arg0, arg1)
}
// Cleanup indicates an expected call of Cleanup
func (mr *MockSnapshotRestoreStepperMockRecorder) Cleanup(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).Cleanup), arg0, arg1)
}
// CreateApplication mocks base method
func (m *MockSnapshotRestoreStepper) CreateApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 string) (*v1.Pod, *v1.PersistentVolumeClaim, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateApplication", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(*v1.PersistentVolumeClaim)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// CreateApplication indicates an expected call of CreateApplication
func (mr *MockSnapshotRestoreStepperMockRecorder) CreateApplication(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateApplication", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).CreateApplication), arg0, arg1, arg2)
}
// RestoreApplication mocks base method
func (m *MockSnapshotRestoreStepper) RestoreApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RestoreApplication", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(*v1.PersistentVolumeClaim)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// RestoreApplication indicates an expected call of RestoreApplication
func (mr *MockSnapshotRestoreStepperMockRecorder) RestoreApplication(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreApplication", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).RestoreApplication), arg0, arg1, arg2)
}
// SnapshotApplication mocks base method
func (m *MockSnapshotRestoreStepper) SnapshotApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 *v1.PersistentVolumeClaim, arg3 string) (*snapv1.VolumeSnapshot, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SnapshotApplication", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*snapv1.VolumeSnapshot)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SnapshotApplication indicates an expected call of SnapshotApplication
func (mr *MockSnapshotRestoreStepperMockRecorder) SnapshotApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SnapshotApplication", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).SnapshotApplication), arg0, arg1, arg2, arg3)
}
// ValidateArgs mocks base method
func (m *MockSnapshotRestoreStepper) ValidateArgs(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateArgs", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateArgs indicates an expected call of ValidateArgs
func (mr *MockSnapshotRestoreStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateArgs", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).ValidateArgs), arg0, arg1)
}
// ValidateData mocks base method
func (m *MockSnapshotRestoreStepper) ValidateData(arg0 context.Context, arg1 *v1.Pod, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateData", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateData indicates an expected call of ValidateData
func (mr *MockSnapshotRestoreStepperMockRecorder) ValidateData(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateData", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).ValidateData), arg0, arg1, arg2)
}

649
pkg/csi/snapshot_restore.go Normal file
View file

@ -0,0 +1,649 @@
package csi
import (
"context"
"fmt"
"time"
kankube "github.com/kanisterio/kanister/pkg/kube"
kansnapshot "github.com/kanisterio/kanister/pkg/kube/snapshot"
"github.com/kastenhq/kubestr/pkg/common"
"github.com/kastenhq/kubestr/pkg/csi/types"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
const (
originalPVCGenerateName = "kubestr-csi-original-pvc"
originalPodGenerateName = "kubestr-csi-original-pod"
clonedPVCGenerateName = "kubestr-csi-cloned-pvc"
clonedPodGenerateName = "kubestr-csi-cloned-pod"
createdByLabel = "created-by-kubestr-csi"
clonePrefix = "kubestr-clone-"
snapshotPrefix = "kubestr-snapshot-"
)
type SnapshotRestoreRunner struct {
KubeCli kubernetes.Interface
DynCli dynamic.Interface
srSteps SnapshotRestoreStepper
}
func (r *SnapshotRestoreRunner) RunSnapshotRestore(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) {
r.srSteps = &snapshotRestoreSteps{
validateOps: &validateOperations{
kubeCli: r.KubeCli,
dynCli: r.DynCli,
},
versionFetchOps: &apiVersionFetch{
kubeCli: r.KubeCli,
},
createAppOps: &applicationCreate{
kubeCli: r.KubeCli,
},
dataValidatorOps: &validateData{
kubeCli: r.KubeCli,
},
snapshotCreateOps: &snapshotCreate{
kubeCli: r.KubeCli,
dynCli: r.DynCli,
},
cleanerOps: &cleanse{
kubeCli: r.KubeCli,
dynCli: r.DynCli,
},
}
return r.RunSnapshotRestoreHelper(ctx, args)
}
func (r *SnapshotRestoreRunner) RunSnapshotRestoreHelper(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) {
results := &types.CSISnapshotRestoreResults{}
var err error
if r.KubeCli == nil || r.DynCli == nil {
return results, fmt.Errorf("cli uninitialized")
}
if err := r.srSteps.ValidateArgs(ctx, args); err != nil {
return results, errors.Wrap(err, "Failed to validate arguments.")
}
data := time.Now().Format("20060102150405")
fmt.Println("Creating application")
results.OriginalPod, results.OriginalPVC, err = r.srSteps.CreateApplication(ctx, args, data)
if err == nil {
if results.OriginalPod != nil && results.OriginalPVC != nil {
fmt.Printf(" -> Created pod (%s) and pvc (%s)\n", results.OriginalPod.Name, results.OriginalPVC.Name)
}
err = r.srSteps.ValidateData(ctx, results.OriginalPod, data)
}
snapName := snapshotPrefix + data
if err == nil {
fmt.Println("Taking a snapshot")
results.Snapshot, err = r.srSteps.SnapshotApplication(ctx, args, results.OriginalPVC, snapName)
}
if err == nil {
if results.Snapshot != nil {
fmt.Printf(" -> Created snapshot (%s)\n", results.Snapshot.Name)
}
fmt.Println("Restoring application")
results.ClonedPod, results.ClonedPVC, err = r.srSteps.RestoreApplication(ctx, args, results.Snapshot)
}
if err == nil {
if results.ClonedPod != nil && results.ClonedPVC != nil {
fmt.Printf(" -> Restored pod (%s) and pvc (%s)\n", results.ClonedPod.Name, results.ClonedPVC.Name)
}
err = r.srSteps.ValidateData(ctx, results.ClonedPod, data)
}
if args.Cleanup {
fmt.Println("Cleaning up resources")
r.srSteps.Cleanup(ctx, results)
}
return results, err
}
//go:generate mockgen -destination=mocks/mock_snapshot_restore_stepper.go -package=mocks . SnapshotRestoreStepper
type SnapshotRestoreStepper interface {
ValidateArgs(ctx context.Context, args *types.CSISnapshotRestoreArgs) error
CreateApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, data string) (*v1.Pod, *v1.PersistentVolumeClaim, error)
ValidateData(ctx context.Context, pod *v1.Pod, data string) error
SnapshotApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, pvc *v1.PersistentVolumeClaim, snapshotName string) (*snapv1.VolumeSnapshot, error)
RestoreApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, snapshot *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error)
Cleanup(ctx context.Context, results *types.CSISnapshotRestoreResults)
}
type snapshotRestoreSteps struct {
validateOps ArgumentValidator
versionFetchOps ApiVersionFetcher
createAppOps ApplicationCreator
dataValidatorOps DataValidator
snapshotCreateOps SnapshotCreator
cleanerOps Cleaner
SnapshotGroupVersion *metav1.GroupVersionForDiscovery
}
func (s *snapshotRestoreSteps) ValidateArgs(ctx context.Context, args *types.CSISnapshotRestoreArgs) error {
if err := args.Validate(); err != nil {
return errors.Wrap(err, "Failed to validate input arguments")
}
if err := s.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil {
return errors.Wrap(err, "Failed to validate Namespace")
}
sc, err := s.validateOps.ValidateStorageClass(ctx, args.StorageClass)
if err != nil {
return errors.Wrap(err, "Failed to validate Storageclass")
}
groupVersion, err := s.versionFetchOps.GetCSISnapshotGroupVersion()
if err != nil {
return errors.Wrap(err, "Failed to fetch groupVersion")
}
s.SnapshotGroupVersion = groupVersion
uVSC, err := s.validateOps.ValidateVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, groupVersion)
if err != nil {
return errors.Wrap(err, "Failed to validate VolumeSnapshotClass")
}
vscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion)
if sc.Provisioner != vscDriver {
return fmt.Errorf("StorageClass provisioner (%s) and VolumeSnapshotClass driver (%s) are different.", sc.Provisioner, vscDriver)
}
return nil
}
func (s *snapshotRestoreSteps) CreateApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, genString string) (*v1.Pod, *v1.PersistentVolumeClaim, error) {
pvcArgs := &types.CreatePVCArgs{
GenerateName: originalPVCGenerateName,
StorageClass: args.StorageClass,
Namespace: args.Namespace,
}
pvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs)
if err != nil {
return nil, nil, errors.Wrap(err, "Failed to create PVC")
}
podArgs := &types.CreatePodArgs{
GenerateName: originalPodGenerateName,
PVCName: pvc.Name,
Namespace: args.Namespace,
Cmd: fmt.Sprintf("echo '%s' >> /data/out.txt; sync; tail -f /dev/null", genString),
RunAsUser: args.RunAsUser,
ContainerImage: args.ContainerImage,
}
pod, err := s.createAppOps.CreatePod(ctx, podArgs)
if err != nil {
return nil, pvc, errors.Wrap(err, "Failed to create POD")
}
if err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {
return pod, pvc, errors.Wrap(err, "Pod failed to become ready")
}
return pod, pvc, nil
}
func (s *snapshotRestoreSteps) ValidateData(ctx context.Context, pod *v1.Pod, data string) error {
podData, err := s.dataValidatorOps.FetchPodData(pod.Name, pod.Namespace)
if err != nil {
return errors.Wrap(err, "Failed to fetch data from pod. Failure may be due to permissions issues. Try again with runAsUser=1000 option.")
}
if podData != data {
return fmt.Errorf("string didn't match (%s , %s)", podData, data)
}
return nil
}
func (s *snapshotRestoreSteps) SnapshotApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, pvc *v1.PersistentVolumeClaim, snapshotName string) (*snapv1.VolumeSnapshot, error) {
snapshotter, err := s.snapshotCreateOps.NewSnapshotter()
if err != nil {
return nil, errors.Wrap(err, "Failed to load snapshotter")
}
createSnapshotArgs := &types.CreateSnapshotArgs{
Namespace: args.Namespace,
PVCName: pvc.Name,
VolumeSnapshotClass: args.VolumeSnapshotClass,
SnapshotName: snapshotName,
}
snapshot, err := s.snapshotCreateOps.CreateSnapshot(ctx, snapshotter, createSnapshotArgs)
if err != nil {
return nil, errors.Wrap(err, "Failed to create Snapshot")
}
if !args.SkipCFSCheck {
cfsArgs := &types.CreateFromSourceCheckArgs{
VolumeSnapshotClass: args.VolumeSnapshotClass,
SnapshotName: snapshot.Name,
Namespace: args.Namespace,
}
if err = s.snapshotCreateOps.CreateFromSourceCheck(ctx, snapshotter, cfsArgs, s.SnapshotGroupVersion); err != nil {
return snapshot, errors.Wrap(err, "Failed to create duplicate snapshot from source. To skip check use '--skipcfs=true' option.")
}
}
return snapshot, nil
}
func (s *snapshotRestoreSteps) RestoreApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, snapshot *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error) {
snapshotAPIGroup := "snapshot.storage.k8s.io"
snapshotKind := "VolumeSnapshot"
dataSource := &v1.TypedLocalObjectReference{
APIGroup: &snapshotAPIGroup,
Kind: snapshotKind,
Name: snapshot.Name,
}
pvcArgs := &types.CreatePVCArgs{
GenerateName: clonedPVCGenerateName,
StorageClass: args.StorageClass,
Namespace: args.Namespace,
DataSource: dataSource,
RestoreSize: snapshot.Status.RestoreSize,
}
pvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs)
if err != nil {
return nil, nil, errors.Wrap(err, "Failed to restore PVC")
}
podArgs := &types.CreatePodArgs{
GenerateName: clonedPodGenerateName,
PVCName: pvc.Name,
Namespace: args.Namespace,
Cmd: "tail -f /dev/null",
RunAsUser: args.RunAsUser,
ContainerImage: args.ContainerImage,
}
pod, err := s.createAppOps.CreatePod(ctx, podArgs)
if err != nil {
return nil, pvc, errors.Wrap(err, "Failed to create restored Pod")
}
if err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {
return pod, pvc, errors.Wrap(err, "Pod failed to become ready")
}
return pod, pvc, nil
}
func (s *snapshotRestoreSteps) Cleanup(ctx context.Context, results *types.CSISnapshotRestoreResults) {
if results == nil {
return
}
if results.OriginalPVC != nil {
err := s.cleanerOps.DeletePVC(ctx, results.OriginalPVC.Name, results.OriginalPVC.Namespace)
if err != nil {
fmt.Printf("Error deleteing PVC (%s) - (%v)\n", results.OriginalPVC.Name, err)
}
}
if results.OriginalPod != nil {
err := s.cleanerOps.DeletePod(ctx, results.OriginalPod.Name, results.OriginalPod.Namespace)
if err != nil {
fmt.Printf("Error deleteing Pod (%s) - (%v)\n", results.OriginalPod.Name, err)
}
}
if results.ClonedPVC != nil {
err := s.cleanerOps.DeletePVC(ctx, results.ClonedPVC.Name, results.ClonedPVC.Namespace)
if err != nil {
fmt.Printf("Error deleteing PVC (%s) - (%v)\n", results.ClonedPVC.Name, err)
}
}
if results.ClonedPod != nil {
err := s.cleanerOps.DeletePod(ctx, results.ClonedPod.Name, results.ClonedPod.Namespace)
if err != nil {
fmt.Printf("Error deleteing Pod (%s) - (%v)\n", results.ClonedPod.Name, err)
}
}
if results.Snapshot != nil {
err := s.cleanerOps.DeleteSnapshot(ctx, results.Snapshot.Name, results.Snapshot.Namespace, s.SnapshotGroupVersion)
if err != nil {
fmt.Printf("Error deleteing Snapshot (%s) - (%v)\n", results.Snapshot.Name, err)
}
}
}
//go:generate mockgen -destination=mocks/mock_argument_validator.go -package=mocks . ArgumentValidator
type ArgumentValidator interface {
ValidateNamespace(ctx context.Context, namespace string) error
ValidateStorageClass(ctx context.Context, storageClass string) (*sv1.StorageClass, error)
ValidateVolumeSnapshotClass(ctx context.Context, volumeSnapshotClass string, groupVersion *metav1.GroupVersionForDiscovery) (*unstructured.Unstructured, error)
}
type validateOperations struct {
kubeCli kubernetes.Interface
dynCli dynamic.Interface
}
func (o *validateOperations) ValidateNamespace(ctx context.Context, namespace string) error {
if o.kubeCli == nil {
return fmt.Errorf("kubeCli not initialized")
}
_, err := o.kubeCli.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
return err
}
func (o *validateOperations) ValidateStorageClass(ctx context.Context, storageClass string) (*sv1.StorageClass, error) {
if o.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
}
return o.kubeCli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{})
}
func (o *validateOperations) ValidateVolumeSnapshotClass(ctx context.Context, volumeSnapshotClass string, groupVersion *metav1.GroupVersionForDiscovery) (*unstructured.Unstructured, error) {
if o.dynCli == nil {
return nil, fmt.Errorf("dynCli not initialized")
}
VolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: groupVersion.Version, Resource: common.VolumeSnapshotClassResourcePlural}
return o.dynCli.Resource(VolSnapClassGVR).Get(ctx, volumeSnapshotClass, metav1.GetOptions{})
}
//go:generate mockgen -destination=mocks/mock_application_creator.go -package=mocks . ApplicationCreator
type ApplicationCreator interface {
CreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error)
CreatePod(ctx context.Context, args *types.CreatePodArgs) (*v1.Pod, error)
WaitForPodReady(ctx context.Context, namespace string, podName string) error
}
type applicationCreate struct {
kubeCli kubernetes.Interface
}
func (c *applicationCreate) CreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) {
if c.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
}
if err := args.Validate(); err != nil {
return nil, err
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: args.GenerateName,
Namespace: args.Namespace,
Labels: map[string]string{
createdByLabel: "yes",
},
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &args.StorageClass,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
if args.DataSource != nil {
pvc.Spec.DataSource = args.DataSource
}
if args.RestoreSize != nil && !args.RestoreSize.IsZero() {
pvc.Spec.Resources.Requests[v1.ResourceStorage] = *args.RestoreSize
}
pvcRes, err := c.kubeCli.CoreV1().PersistentVolumeClaims(args.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil {
return pvc, err
}
return pvcRes, nil
}
func (c *applicationCreate) CreatePod(ctx context.Context, args *types.CreatePodArgs) (*v1.Pod, error) {
if c.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
}
if err := args.Validate(); err != nil {
return nil, err
}
if args.ContainerImage == "" {
args.ContainerImage = common.DefaultPodImage
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: args.GenerateName,
Namespace: args.Namespace,
Labels: map[string]string{
createdByLabel: "yes",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: args.GenerateName,
Image: args.ContainerImage,
Command: []string{"/bin/sh"},
Args: []string{"-c", args.Cmd},
VolumeMounts: []v1.VolumeMount{{
Name: "persistent-storage",
MountPath: "/data",
}},
}},
Volumes: []v1.Volume{{
Name: "persistent-storage",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: args.PVCName,
},
}},
},
},
}
if args.RunAsUser > 0 {
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &args.RunAsUser,
FSGroup: &args.RunAsUser,
}
}
podRes, err := c.kubeCli.CoreV1().Pods(args.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return pod, err
}
return podRes, nil
}
func (c *applicationCreate) WaitForPodReady(ctx context.Context, namespace string, podName string) error {
if c.kubeCli == nil {
return fmt.Errorf("kubeCli not initialized")
}
err := kankube.WaitForPodReady(ctx, c.kubeCli, namespace, podName)
return err
}
//go:generate mockgen -destination=mocks/mock_snapshot_creator.go -package=mocks . SnapshotCreator
type SnapshotCreator interface {
NewSnapshotter() (kansnapshot.Snapshotter, error)
CreateSnapshot(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error)
CreateFromSourceCheck(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateFromSourceCheckArgs, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error
}
type snapshotCreate struct {
kubeCli kubernetes.Interface
dynCli dynamic.Interface
}
func (c *snapshotCreate) NewSnapshotter() (kansnapshot.Snapshotter, error) {
if c.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
}
if c.dynCli == nil {
return nil, fmt.Errorf("dynCli not initialized")
}
return kansnapshot.NewSnapshotter(c.kubeCli, c.dynCli)
}
func (c *snapshotCreate) CreateSnapshot(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error) {
if snapshotter == nil || args == nil {
return nil, fmt.Errorf("snapshotter or args are empty")
}
if err := args.Validate(); err != nil {
return nil, err
}
err := snapshotter.Create(ctx, args.SnapshotName, args.Namespace, args.PVCName, &args.VolumeSnapshotClass, true)
if err != nil {
return nil, errors.Wrapf(err, "CSI Driver failed to create snapshot for PVC (%s) in Namspace (%s)", args.PVCName, args.Namespace)
}
snap, err := snapshotter.Get(ctx, args.SnapshotName, args.Namespace)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get CSI snapshot (%s) in Namespace (%s)", args.SnapshotName, args.Namespace)
}
return snap, nil
}
func (c *snapshotCreate) CreateFromSourceCheck(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateFromSourceCheckArgs, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error {
if c.dynCli == nil {
return fmt.Errorf("dynCli not initialized")
}
if SnapshotGroupVersion == nil || SnapshotGroupVersion.Version == "" {
return fmt.Errorf("snapshot group version not provided")
}
if snapshotter == nil || args == nil {
return fmt.Errorf("snapshotter or args are nil")
}
if err := args.Validate(); err != nil {
return err
}
targetSnapClassName := clonePrefix + args.VolumeSnapshotClass
err := snapshotter.CloneVolumeSnapshotClass(args.VolumeSnapshotClass, targetSnapClassName, kansnapshot.DeletionPolicyRetain, nil)
if err != nil {
return errors.Wrapf(err, "Failed to create a VolumeSnapshotClass to use to restore the snapshot")
}
defer func() {
VolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: SnapshotGroupVersion.Version, Resource: common.VolumeSnapshotClassResourcePlural}
err := c.dynCli.Resource(VolSnapClassGVR).Delete(ctx, targetSnapClassName, metav1.DeleteOptions{})
if err != nil {
fmt.Printf("Delete VSC Error (%s) - (%v)\n", targetSnapClassName, err)
}
}()
snapSrc, err := snapshotter.GetSource(ctx, args.SnapshotName, args.Namespace)
if err != nil {
return errors.Wrapf(err, "Failed to get source snapshot source (%s)", args.SnapshotName)
}
snapshotCFSCloneName := clonePrefix + args.SnapshotName
// test the CreateFromSource API
defer func() {
_, _ = snapshotter.Delete(context.Background(), snapshotCFSCloneName, args.Namespace)
}()
src := &kansnapshot.Source{
Handle: snapSrc.Handle,
Driver: snapSrc.Driver,
VolumeSnapshotClassName: targetSnapClassName,
}
err = snapshotter.CreateFromSource(ctx, src, snapshotCFSCloneName, args.Namespace, true)
if err != nil {
return errors.Wrapf(err, "Failed to clone snapshot from source (%s)", snapshotCFSCloneName)
}
return nil
}
//go:generate mockgen -destination=mocks/mock_cleaner.go -package=mocks . Cleaner
type Cleaner interface {
DeletePVC(ctx context.Context, pvcName string, namespace string) error
DeletePod(ctx context.Context, podName string, namespace string) error
DeleteSnapshot(ctx context.Context, snapshotName string, namespace string, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error
}
type cleanse struct {
kubeCli kubernetes.Interface
dynCli dynamic.Interface
}
func (c *cleanse) DeletePVC(ctx context.Context, pvcName string, namespace string) error {
if c.kubeCli == nil {
return fmt.Errorf("kubeCli not initialized")
}
return c.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{})
}
func (c *cleanse) DeletePod(ctx context.Context, podName string, namespace string) error {
if c.kubeCli == nil {
return fmt.Errorf("kubeCli not initialized")
}
return c.kubeCli.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
}
func (c *cleanse) DeleteSnapshot(ctx context.Context, snapshotName string, namespace string, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error {
if c.dynCli == nil {
return fmt.Errorf("dynCli not initialized")
}
if SnapshotGroupVersion == nil || SnapshotGroupVersion.Version == "" {
return fmt.Errorf("snapshot group version not provided")
}
VolSnapGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: SnapshotGroupVersion.Version, Resource: common.VolumeSnapshotResourcePlural}
return c.dynCli.Resource(VolSnapGVR).Namespace(namespace).Delete(ctx, snapshotName, metav1.DeleteOptions{})
}
//go:generate mockgen -destination=mocks/mock_api_version_fetcher.go -package=mocks . ApiVersionFetcher
type ApiVersionFetcher interface {
GetCSISnapshotGroupVersion() (*metav1.GroupVersionForDiscovery, error)
}
type apiVersionFetch struct {
kubeCli kubernetes.Interface
}
func (p *apiVersionFetch) GetCSISnapshotGroupVersion() (*metav1.GroupVersionForDiscovery, error) {
if p.kubeCli == nil {
return nil, fmt.Errorf("kubeCli not initialized")
}
groups, _, err := p.kubeCli.Discovery().ServerGroupsAndResources()
if err != nil {
return nil, err
}
for _, group := range groups {
if group.Name == common.SnapGroupName {
return &group.PreferredVersion, nil
}
}
return nil, fmt.Errorf("Snapshot API group not found")
}
//go:generate mockgen -destination=mocks/mock_data_validator.go -package=mocks . DataValidator
type DataValidator interface {
FetchPodData(podName string, podNamespace string) (string, error)
}
type validateData struct {
kubeCli kubernetes.Interface
}
func (p *validateData) FetchPodData(podName string, podNamespace string) (string, error) {
if p.kubeCli == nil {
return "", fmt.Errorf("kubeCli not initialized")
}
stdout, _, err := kankube.Exec(p.kubeCli, podNamespace, podName, "", []string{"sh", "-c", "cat /data/out.txt"}, nil)
return stdout, err
}
func getDriverNameFromUVSC(vsc unstructured.Unstructured, version string) string {
var driverName interface{}
var ok bool
switch version {
case common.SnapshotAlphaVersion:
driverName, ok = vsc.Object[common.VolSnapClassAlphaDriverKey]
if !ok {
return ""
}
case common.SnapshotBetaVersion:
driverName, ok = vsc.Object[common.VolSnapClassBetaDriverKey]
if !ok {
return ""
}
case common.SnapshotStableVersion:
driverName, ok = vsc.Object[common.VolSnapClassStableDriverKey]
if !ok {
return ""
}
}
driver, ok := driverName.(string)
if !ok {
return ""
}
return driver
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,904 @@
package csi
import (
"context"
"fmt"
"github.com/golang/mock/gomock"
"github.com/kastenhq/kubestr/pkg/common"
"github.com/kastenhq/kubestr/pkg/csi/mocks"
"github.com/kastenhq/kubestr/pkg/csi/types"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
. "gopkg.in/check.v1"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func (s *CSITestSuite) TestValidateArgs(c *C) {
ctx := context.Background()
type fields struct {
validateOps *mocks.MockArgumentValidator
versionOps *mocks.MockApiVersionFetcher
}
for _, tc := range []struct {
args *types.CSISnapshotRestoreArgs
prepare func(f *fields)
errChecker Checker
}{
{ // valid args
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
prepare: func(f *fields) {
gomock.InOrder(
f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil),
f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return(
&sv1.StorageClass{
Provisioner: "p1",
}, nil),
f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(
&metav1.GroupVersionForDiscovery{
GroupVersion: common.SnapshotAlphaVersion,
}, nil),
f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{
GroupVersion: common.SnapshotAlphaVersion,
}).Return(&unstructured.Unstructured{
Object: map[string]interface{}{
common.VolSnapClassAlphaDriverKey: "p1",
},
}, nil),
)
},
errChecker: IsNil,
},
{ // driver mismatch
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
prepare: func(f *fields) {
gomock.InOrder(
f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil),
f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return(
&sv1.StorageClass{
Provisioner: "p1",
}, nil),
f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(
&metav1.GroupVersionForDiscovery{
GroupVersion: common.SnapshotAlphaVersion,
}, nil),
f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{
GroupVersion: common.SnapshotAlphaVersion,
}).Return(&unstructured.Unstructured{
Object: map[string]interface{}{
common.VolSnapClassAlphaDriverKey: "p2",
},
}, nil),
)
},
errChecker: NotNil,
},
{ // vsc error
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
prepare: func(f *fields) {
gomock.InOrder(
f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil),
f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return(
&sv1.StorageClass{
Provisioner: "p1",
}, nil),
f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(
&metav1.GroupVersionForDiscovery{
GroupVersion: common.SnapshotAlphaVersion,
}, nil),
f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{
GroupVersion: common.SnapshotAlphaVersion,
}).Return(nil, fmt.Errorf("vsc error")),
)
},
errChecker: NotNil,
},
{ // groupversion error
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
prepare: func(f *fields) {
gomock.InOrder(
f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil),
f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return(
&sv1.StorageClass{
Provisioner: "p1",
}, nil),
f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(
nil, fmt.Errorf("groupversion error")),
)
},
errChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
prepare: func(f *fields) {
gomock.InOrder(
f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil),
f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return(
nil, fmt.Errorf("sc error")),
)
},
errChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
prepare: func(f *fields) {
gomock.InOrder(
f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(fmt.Errorf("ns error")),
)
},
errChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "",
VolumeSnapshotClass: "vsc",
Namespace: "ns",
},
errChecker: NotNil,
}, {
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "",
Namespace: "ns",
},
errChecker: NotNil,
}, {
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
VolumeSnapshotClass: "vsc",
Namespace: "",
},
errChecker: NotNil,
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
validateOps: mocks.NewMockArgumentValidator(ctrl),
versionOps: mocks.NewMockApiVersionFetcher(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
stepper := &snapshotRestoreSteps{
validateOps: f.validateOps,
versionFetchOps: f.versionOps,
}
err := stepper.ValidateArgs(ctx, tc.args)
c.Check(err, tc.errChecker)
}
}
func (s *CSITestSuite) TestCreateApplication(c *C) {
ctx := context.Background()
type fields struct {
createAppOps *mocks.MockApplicationCreator
}
for _, tc := range []struct {
args *types.CSISnapshotRestoreArgs
genString string
prepare func(f *fields)
errChecker Checker
podChecker Checker
pvcChecker Checker
}{
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
genString: "some string",
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{
GenerateName: originalPVCGenerateName,
StorageClass: "sc",
Namespace: "ns",
}).Return(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
}, nil),
f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{
GenerateName: originalPodGenerateName,
PVCName: "pvc1",
Namespace: "ns",
Cmd: "echo 'some string' >> /data/out.txt; sync; tail -f /dev/null",
RunAsUser: 100,
ContainerImage: "image",
}).Return(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
}, nil),
f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(nil),
)
},
errChecker: IsNil,
podChecker: NotNil,
pvcChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
genString: "some string",
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{
GenerateName: originalPVCGenerateName,
StorageClass: "sc",
Namespace: "ns",
}).Return(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
}, nil),
f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{
GenerateName: originalPodGenerateName,
PVCName: "pvc1",
Namespace: "ns",
Cmd: "echo 'some string' >> /data/out.txt; sync; tail -f /dev/null",
RunAsUser: 100,
ContainerImage: "image",
}).Return(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
}, nil),
f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(fmt.Errorf("pod ready error")),
)
},
errChecker: NotNil,
podChecker: NotNil,
pvcChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
genString: "some string",
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
}, nil),
f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pod error")),
)
},
errChecker: NotNil,
podChecker: IsNil,
pvcChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
genString: "some string",
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pvc error")),
)
},
errChecker: NotNil,
podChecker: IsNil,
pvcChecker: IsNil,
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
createAppOps: mocks.NewMockApplicationCreator(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
stepper := &snapshotRestoreSteps{
createAppOps: f.createAppOps,
}
pod, pvc, err := stepper.CreateApplication(ctx, tc.args, tc.genString)
c.Check(err, tc.errChecker)
c.Check(pod, tc.podChecker)
c.Check(pvc, tc.pvcChecker)
}
}
func (s *CSITestSuite) TestSnapshotApplication(c *C) {
ctx := context.Background()
snapshotter := &fakeSnapshotter{name: "snapshotter"}
groupversion := &metav1.GroupVersionForDiscovery{
GroupVersion: "gv",
Version: "v",
}
type fields struct {
snapshotOps *mocks.MockSnapshotCreator
}
for _, tc := range []struct {
args *types.CSISnapshotRestoreArgs
pvc *v1.PersistentVolumeClaim
snapshotName string
prepare func(f *fields)
errChecker Checker
snapChecker Checker
}{
{
args: &types.CSISnapshotRestoreArgs{
Namespace: "ns",
VolumeSnapshotClass: "vsc",
},
pvc: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
},
snapshotName: "snap1",
prepare: func(f *fields) {
gomock.InOrder(
f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),
f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{
Namespace: "ns",
PVCName: "pvc1",
VolumeSnapshotClass: "vsc",
SnapshotName: "snap1",
}).Return(&snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "createdName",
},
}, nil),
f.snapshotOps.EXPECT().CreateFromSourceCheck(gomock.Any(), snapshotter, &types.CreateFromSourceCheckArgs{
VolumeSnapshotClass: "vsc",
SnapshotName: "createdName",
Namespace: "ns",
}, groupversion).Return(nil),
)
},
errChecker: IsNil,
snapChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
Namespace: "ns",
VolumeSnapshotClass: "vsc",
SkipCFSCheck: true,
},
pvc: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
},
snapshotName: "snap1",
prepare: func(f *fields) {
gomock.InOrder(
f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),
f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{
Namespace: "ns",
PVCName: "pvc1",
VolumeSnapshotClass: "vsc",
SnapshotName: "snap1",
}).Return(&snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "createdName",
},
}, nil),
)
},
errChecker: IsNil,
snapChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
Namespace: "ns",
VolumeSnapshotClass: "vsc",
},
pvc: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
},
snapshotName: "snap1",
prepare: func(f *fields) {
gomock.InOrder(
f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),
f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{
Namespace: "ns",
PVCName: "pvc1",
VolumeSnapshotClass: "vsc",
SnapshotName: "snap1",
}).Return(&snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "createdName",
},
}, nil),
f.snapshotOps.EXPECT().CreateFromSourceCheck(gomock.Any(), snapshotter, &types.CreateFromSourceCheckArgs{
VolumeSnapshotClass: "vsc",
SnapshotName: "createdName",
Namespace: "ns",
}, groupversion).Return(fmt.Errorf("cfs error")),
)
},
errChecker: NotNil,
snapChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
Namespace: "ns",
VolumeSnapshotClass: "vsc",
},
pvc: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
},
snapshotName: "snap1",
prepare: func(f *fields) {
gomock.InOrder(
f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),
f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{
Namespace: "ns",
PVCName: "pvc1",
VolumeSnapshotClass: "vsc",
SnapshotName: "snap1",
}).Return(nil, fmt.Errorf("create snapshot error")),
)
},
errChecker: NotNil,
snapChecker: IsNil,
},
{
args: &types.CSISnapshotRestoreArgs{
Namespace: "ns",
VolumeSnapshotClass: "vsc",
},
pvc: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
},
snapshotName: "snap1",
prepare: func(f *fields) {
gomock.InOrder(
f.snapshotOps.EXPECT().NewSnapshotter().Return(nil, fmt.Errorf("snapshotter error")),
)
},
errChecker: NotNil,
snapChecker: IsNil,
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
snapshotOps: mocks.NewMockSnapshotCreator(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
stepper := &snapshotRestoreSteps{
snapshotCreateOps: f.snapshotOps,
SnapshotGroupVersion: groupversion,
}
snapshot, err := stepper.SnapshotApplication(ctx, tc.args, tc.pvc, tc.snapshotName)
c.Check(err, tc.errChecker)
c.Check(snapshot, tc.snapChecker)
}
}
func (s *CSITestSuite) TestRestoreApplication(c *C) {
ctx := context.Background()
resourceQuantity := resource.MustParse("1Gi")
snapshotAPIGroup := "snapshot.storage.k8s.io"
type fields struct {
createAppOps *mocks.MockApplicationCreator
}
for _, tc := range []struct {
args *types.CSISnapshotRestoreArgs
snapshot *snapv1.VolumeSnapshot
prepare func(f *fields)
errChecker Checker
podChecker Checker
pvcChecker Checker
}{
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snap1",
},
Status: &snapv1.VolumeSnapshotStatus{
RestoreSize: &resourceQuantity,
},
},
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{
GenerateName: clonedPVCGenerateName,
StorageClass: "sc",
Namespace: "ns",
DataSource: &v1.TypedLocalObjectReference{
APIGroup: &snapshotAPIGroup,
Kind: "VolumeSnapshot",
Name: "snap1",
},
RestoreSize: &resourceQuantity,
}).Return(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
}, nil),
f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{
GenerateName: clonedPodGenerateName,
PVCName: "pvc1",
Namespace: "ns",
Cmd: "tail -f /dev/null",
RunAsUser: 100,
ContainerImage: "image",
}).Return(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
}, nil),
f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(nil),
)
},
errChecker: IsNil,
podChecker: NotNil,
pvcChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snap1",
},
Status: &snapv1.VolumeSnapshotStatus{
RestoreSize: &resourceQuantity,
},
},
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{
GenerateName: clonedPVCGenerateName,
StorageClass: "sc",
Namespace: "ns",
DataSource: &v1.TypedLocalObjectReference{
APIGroup: &snapshotAPIGroup,
Kind: "VolumeSnapshot",
Name: "snap1",
},
RestoreSize: &resourceQuantity,
}).Return(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
}, nil),
f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{
GenerateName: clonedPodGenerateName,
PVCName: "pvc1",
Namespace: "ns",
Cmd: "tail -f /dev/null",
RunAsUser: 100,
ContainerImage: "image",
}).Return(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
}, nil),
f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(fmt.Errorf("pod ready error")),
)
},
errChecker: NotNil,
podChecker: NotNil,
pvcChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snap1",
},
Status: &snapv1.VolumeSnapshotStatus{
RestoreSize: &resourceQuantity,
},
},
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
},
}, nil),
f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pod error")),
)
},
errChecker: NotNil,
podChecker: IsNil,
pvcChecker: NotNil,
},
{
args: &types.CSISnapshotRestoreArgs{
StorageClass: "sc",
Namespace: "ns",
RunAsUser: 100,
ContainerImage: "image",
},
snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snap1",
},
Status: &snapv1.VolumeSnapshotStatus{
RestoreSize: &resourceQuantity,
},
},
prepare: func(f *fields) {
gomock.InOrder(
f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pvc error")),
)
},
errChecker: NotNil,
podChecker: IsNil,
pvcChecker: IsNil,
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
createAppOps: mocks.NewMockApplicationCreator(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
stepper := &snapshotRestoreSteps{
createAppOps: f.createAppOps,
}
pod, pvc, err := stepper.RestoreApplication(ctx, tc.args, tc.snapshot)
c.Check(err, tc.errChecker)
c.Check(pod, tc.podChecker)
c.Check(pvc, tc.pvcChecker)
}
}
func (s *CSITestSuite) TestCleanup(c *C) {
ctx := context.Background()
groupversion := &metav1.GroupVersionForDiscovery{
GroupVersion: "gv",
Version: "v",
}
type fields struct {
cleanerOps *mocks.MockCleaner
}
for _, tc := range []struct {
results *types.CSISnapshotRestoreResults
prepare func(f *fields)
}{
{
results: nil,
},
{
results: &types.CSISnapshotRestoreResults{
OriginalPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
},
OriginalPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
},
ClonedPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc2",
Namespace: "ns",
},
},
ClonedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
},
Snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
Namespace: "ns",
},
},
},
prepare: func(f *fields) {
gomock.InOrder(
f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc1", "ns").Return(nil),
f.cleanerOps.EXPECT().DeletePod(ctx, "pod1", "ns").Return(nil),
f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc2", "ns").Return(nil),
f.cleanerOps.EXPECT().DeletePod(ctx, "pod2", "ns").Return(nil),
f.cleanerOps.EXPECT().DeleteSnapshot(ctx, "snapshot", "ns", groupversion).Return(nil),
)
},
},
{
results: &types.CSISnapshotRestoreResults{
OriginalPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
},
OriginalPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
},
ClonedPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc2",
Namespace: "ns",
},
},
ClonedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
},
Snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
Namespace: "ns",
},
},
},
prepare: func(f *fields) {
gomock.InOrder(
f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc1", "ns").Return(fmt.Errorf("err")),
f.cleanerOps.EXPECT().DeletePod(ctx, "pod1", "ns").Return(fmt.Errorf("err")),
f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc2", "ns").Return(fmt.Errorf("err")),
f.cleanerOps.EXPECT().DeletePod(ctx, "pod2", "ns").Return(fmt.Errorf("err")),
f.cleanerOps.EXPECT().DeleteSnapshot(ctx, "snapshot", "ns", groupversion).Return(fmt.Errorf("err")),
)
},
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
cleanerOps: mocks.NewMockCleaner(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
stepper := &snapshotRestoreSteps{
cleanerOps: f.cleanerOps,
SnapshotGroupVersion: groupversion,
}
stepper.Cleanup(ctx, tc.results)
}
}
func (s *CSITestSuite) TestValidateData(c *C) {
ctx := context.Background()
type fields struct {
validatorOps *mocks.MockDataValidator
}
for _, tc := range []struct {
prepare func(f *fields)
pod *v1.Pod
data string
errChecker Checker
}{
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod",
Namespace: "ns",
},
},
data: "somedata",
prepare: func(f *fields) {
gomock.InOrder(
f.validatorOps.EXPECT().FetchPodData("pod", "ns").Return("somedata", nil),
)
},
errChecker: IsNil,
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod",
Namespace: "ns",
},
},
data: "somedata",
prepare: func(f *fields) {
gomock.InOrder(
f.validatorOps.EXPECT().FetchPodData("pod", "ns").Return("someotherdata", nil),
)
},
errChecker: NotNil,
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod",
Namespace: "ns",
},
},
data: "somedata",
prepare: func(f *fields) {
gomock.InOrder(
f.validatorOps.EXPECT().FetchPodData("pod", "ns").Return("", fmt.Errorf("error")),
)
},
errChecker: NotNil,
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
validatorOps: mocks.NewMockDataValidator(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
stepper := &snapshotRestoreSteps{
dataValidatorOps: f.validatorOps,
}
err := stepper.ValidateData(ctx, tc.pod, tc.data)
c.Check(err, tc.errChecker)
}
}

View file

@ -0,0 +1,421 @@
package csi
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/kastenhq/kubestr/pkg/csi/mocks"
"github.com/kastenhq/kubestr/pkg/csi/types"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
. "gopkg.in/check.v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
fakedynamic "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
func Test(t *testing.T) { TestingT(t) }
type CSITestSuite struct{}
var _ = Suite(&CSITestSuite{})
func (s *CSITestSuite) TestRunSnapshotRestoreHelper(c *C) {
ctx := context.Background()
type fields struct {
stepperOps *mocks.MockSnapshotRestoreStepper
}
for _, tc := range []struct {
kubeCli kubernetes.Interface
dynCli dynamic.Interface
args *types.CSISnapshotRestoreArgs
prepare func(f *fields)
result *types.CSISnapshotRestoreResults
errChecker Checker
}{
{ // success
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: true,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
},
&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
},
nil,
),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
}, gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(),
&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
}, gomock.Any(),
).Return(
&snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
},
},
nil,
),
f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(),
&snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
},
},
).Return(
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
},
&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc2",
Namespace: "ns",
},
},
nil,
),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
}, gomock.Any()).Return(nil),
f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any()).Return(),
)
},
result: &types.CSISnapshotRestoreResults{
OriginalPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
},
OriginalPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
},
ClonedPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc2",
Namespace: "ns",
},
},
ClonedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
},
Snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
},
},
},
errChecker: IsNil,
},
{ // no cleanup
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),
f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: IsNil,
},
{ // restored data validation fails
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),
f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("validation error")),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // restore error, objects still returned
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),
f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
},
&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc2",
Namespace: "ns",
},
},
fmt.Errorf("restore error"),
),
)
},
result: &types.CSISnapshotRestoreResults{
ClonedPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc2",
Namespace: "ns",
},
},
ClonedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
},
},
},
errChecker: NotNil,
},
{ // restore error, no objects returned
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),
f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("restore error")),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // snapshot error, object still returned
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
&snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
},
},
fmt.Errorf("snapshot error"),
),
)
},
result: &types.CSISnapshotRestoreResults{
Snapshot: &snapv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshot",
},
},
},
errChecker: NotNil,
},
{ // snapshot error, object not returned
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("snapshot error")),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // created data validation error
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),
f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("validation error")),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // create error, objects still returned
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
},
&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
},
fmt.Errorf("create error"),
),
)
},
result: &types.CSISnapshotRestoreResults{
OriginalPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc1",
Namespace: "ns",
},
},
OriginalPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
},
},
errChecker: NotNil,
},
{ // create error, objects not returned
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),
f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("create error")),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // args validate error
kubeCli: fake.NewSimpleClientset(),
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
args: &types.CSISnapshotRestoreArgs{
Cleanup: false,
},
prepare: func(f *fields) {
gomock.InOrder(
f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(fmt.Errorf("create error")),
)
},
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // empty cli
kubeCli: nil,
dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
{ // empty dyncli
kubeCli: fake.NewSimpleClientset(),
dynCli: nil,
result: &types.CSISnapshotRestoreResults{},
errChecker: NotNil,
},
} {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
f := fields{
stepperOps: mocks.NewMockSnapshotRestoreStepper(ctrl),
}
if tc.prepare != nil {
tc.prepare(&f)
}
runner := &SnapshotRestoreRunner{
KubeCli: tc.kubeCli,
DynCli: tc.dynCli,
srSteps: f.stepperOps,
}
result, err := runner.RunSnapshotRestoreHelper(ctx, tc.args)
c.Check(err, tc.errChecker)
c.Assert(result, DeepEquals, tc.result)
}
}
func (s *CSITestSuite) TestRunSnapshotRestoreRunner(c *C) {
ctx := context.Background()
r := &SnapshotRestoreRunner{}
_, err := r.RunSnapshotRestore(ctx, nil)
c.Check(err, NotNil)
}

View file

@ -0,0 +1,92 @@
package types
import (
"fmt"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
type CSISnapshotRestoreArgs struct {
StorageClass string
VolumeSnapshotClass string
Namespace string
RunAsUser int64
ContainerImage string
Cleanup bool
SkipCFSCheck bool
}
func (a *CSISnapshotRestoreArgs) Validate() error {
if a.StorageClass == "" || a.VolumeSnapshotClass == "" || a.Namespace == "" {
return fmt.Errorf("Require fields are missing. (StorageClass, VolumeSnapshotClass, Namespace)")
}
return nil
}
type CSISnapshotRestoreResults struct {
OriginalPVC *v1.PersistentVolumeClaim
OriginalPod *v1.Pod
Snapshot *snapv1.VolumeSnapshot
ClonedPVC *v1.PersistentVolumeClaim
ClonedPod *v1.Pod
}
type CreatePVCArgs struct {
GenerateName string
StorageClass string
Namespace string
DataSource *v1.TypedLocalObjectReference
RestoreSize *resource.Quantity
}
func (c *CreatePVCArgs) Validate() error {
if c.GenerateName == "" || c.StorageClass == "" || c.Namespace == "" {
return fmt.Errorf("Invalid CreatePVCArgs (%v)", c)
}
return nil
}
type CreatePodArgs struct {
GenerateName string
PVCName string
Namespace string
Cmd string
RunAsUser int64
ContainerImage string
}
func (c *CreatePodArgs) Validate() error {
if c.GenerateName == "" || c.PVCName == "" || c.Namespace == "" || c.Cmd == "" {
return fmt.Errorf("Invalid CreatePodArgs (%v)", c)
}
return nil
}
type CreateSnapshotArgs struct {
Namespace string
PVCName string
VolumeSnapshotClass string
SnapshotName string
}
func (c *CreateSnapshotArgs) Validate() error {
if c.Namespace == "" || c.PVCName == "" || c.VolumeSnapshotClass == "" || c.SnapshotName == "" {
return fmt.Errorf("Invalid CreateSnapshotArgs (%v)", c)
}
return nil
}
type CreateFromSourceCheckArgs struct {
VolumeSnapshotClass string
SnapshotName string
Namespace string
}
func (c *CreateFromSourceCheckArgs) Validate() error {
if c.VolumeSnapshotClass == "" || c.SnapshotName == "" || c.Namespace == "" {
return fmt.Errorf("Invalid CreateFromSourceCheckArgs (%v)", c)
}
return nil
}

1
pkg/fio/_config.yml Normal file
View file

@ -0,0 +1 @@
baseurl: "/fio"

21
pkg/fio/dbench_license Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 LogDNA
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

382
pkg/fio/fio.go Normal file
View file

@ -0,0 +1,382 @@
package fio
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"time"
"github.com/briandowns/spinner"
kankube "github.com/kanisterio/kanister/pkg/kube"
"github.com/kastenhq/kubestr/pkg/common"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// DefaultNS describes the default namespace
DefaultNS = "default"
// PodNamespaceEnvKey describes the pod namespace env variable
PodNamespaceEnvKey = "POD_NAMESPACE"
// DefaultFIOJob describes the default FIO job
DefaultFIOJob = "default-fio"
// KubestrFIOJobGenName describes the generate name
KubestrFIOJobGenName = "kubestr-fio"
// ConfigMapJobKey is the default fio job key
ConfigMapJobKey = "fiojob"
// DefaultPVCSize is the default PVC size
DefaultPVCSize = "100Gi"
// PVCGenerateName is the name to generate for the PVC
PVCGenerateName = "kubestr-fio-pvc-"
// PodGenerateName is the name to generate for the POD
PodGenerateName = "kubestr-fio-pod-"
// ContainerName is the name of the container that runs the job
ContainerName = "kubestr-fio"
// PodNameEnvKey is the name of the variable used to get the current pod name
PodNameEnvKey = "HOSTNAME"
// ConfigMapMountPath is the path where we mount the configmap
ConfigMapMountPath = "/etc/fio-config"
// VolumeMountPath is the path where we mount the volume
VolumeMountPath = "/dataset"
// CreatedByFIOLabel is the key that desrcibes the label used to mark configmaps
CreatedByFIOLabel = "createdbyfio"
)
// FIO is an interface that represents FIO related commands
type FIO interface {
RunFio(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) // , test config
}
// FIOrunner implments FIO
type FIOrunner struct {
Cli kubernetes.Interface
fioSteps fioSteps
}
type RunFIOArgs struct {
StorageClass string
Size string
Namespace string
FIOJobFilepath string
FIOJobName string
Image string
}
func (a *RunFIOArgs) Validate() error {
if a.StorageClass == "" || a.Size == "" || a.Namespace == "" {
return fmt.Errorf("Require fields are missing. (StorageClass, Size, Namespace)")
}
return nil
}
type RunFIOResult struct {
Size string `json:"size,omitempty"`
StorageClass *sv1.StorageClass `json:"storageClass,omitempty"`
FioConfig string `json:"fioConfig,omitempty"`
Result FioResult `json:"result,omitempty"`
}
func (f *FIOrunner) RunFio(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) {
f.fioSteps = &fioStepper{
cli: f.Cli,
podReady: &podReadyChecker{cli: f.Cli},
kubeExecutor: &kubeExecutor{cli: f.Cli},
}
return f.RunFioHelper(ctx, args)
}
func (f *FIOrunner) RunFioHelper(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) {
// create a configmap with test parameters
if f.Cli == nil { // for UT purposes
return nil, fmt.Errorf("cli uninitialized")
}
if err := args.Validate(); err != nil {
return nil, err
}
if err := f.fioSteps.validateNamespace(ctx, args.Namespace); err != nil {
return nil, errors.Wrapf(err, "Unable to find namespace (%s)", args.Namespace)
}
sc, err := f.fioSteps.storageClassExists(ctx, args.StorageClass)
if err != nil {
return nil, errors.Wrap(err, "Cannot find StorageClass")
}
configMap, err := f.fioSteps.loadConfigMap(ctx, args)
if err != nil {
return nil, errors.Wrap(err, "Unable to create a ConfigMap")
}
defer func() {
_ = f.fioSteps.deleteConfigMap(context.TODO(), configMap, args.Namespace)
}()
testFileName, err := fioTestFilename(configMap.Data)
if err != nil {
return nil, errors.Wrap(err, "Failed to get test file name.")
}
pvc, err := f.fioSteps.createPVC(ctx, args.StorageClass, args.Size, args.Namespace)
if err != nil {
return nil, errors.Wrap(err, "Failed to create PVC")
}
defer func() {
_ = f.fioSteps.deletePVC(context.TODO(), pvc.Name, args.Namespace)
}()
fmt.Println("PVC created", pvc.Name)
pod, err := f.fioSteps.createPod(ctx, pvc.Name, configMap.Name, testFileName, args.Namespace, args.Image)
defer func() {
_ = f.fioSteps.deletePod(context.TODO(), pod.Name, args.Namespace)
}()
if err != nil {
return nil, errors.Wrap(err, "Failed to create POD")
}
fmt.Println("Pod created", pod.Name)
fmt.Printf("Running FIO test (%s) on StorageClass (%s) with a PVC of Size (%s)\n", testFileName, args.StorageClass, args.Size)
fioOutput, err := f.fioSteps.runFIOCommand(ctx, pod.Name, ContainerName, testFileName, args.Namespace)
if err != nil {
return nil, errors.Wrap(err, "Failed while running FIO test.")
}
return &RunFIOResult{
Size: args.Size,
StorageClass: sc,
FioConfig: configMap.Data[testFileName],
Result: fioOutput,
}, nil
}
type fioSteps interface {
validateNamespace(ctx context.Context, namespace string) error
storageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error)
loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error)
createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error)
deletePVC(ctx context.Context, pvcName, namespace string) error
createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, image string) (*v1.Pod, error)
deletePod(ctx context.Context, podName, namespace string) error
runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error)
deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error
}
type fioStepper struct {
cli kubernetes.Interface
podReady waitForPodReadyInterface
kubeExecutor kubeExecInterface
}
func (s *fioStepper) validateNamespace(ctx context.Context, namespace string) error {
if _, err := s.cli.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}); err != nil {
return err
}
return nil
}
func (s *fioStepper) storageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error) {
return s.cli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{})
}
func (s *fioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) {
configMap := &v1.ConfigMap{
Data: make(map[string]string),
}
switch {
case args.FIOJobFilepath != "":
data, err := ioutil.ReadFile(args.FIOJobFilepath)
if err != nil {
return nil, errors.Wrap(err, "File reading error")
}
configMap.Data[filepath.Base(args.FIOJobFilepath)] = string(data)
case args.FIOJobName != "":
if _, ok := fioJobs[args.FIOJobName]; !ok {
return nil, fmt.Errorf("FIO job not found- (%s)", args.FIOJobName)
}
configMap.Data[args.FIOJobName] = fioJobs[args.FIOJobName]
default:
configMap.Data[DefaultFIOJob] = fioJobs[DefaultFIOJob]
}
// create
configMap.GenerateName = KubestrFIOJobGenName
configMap.Labels = map[string]string{CreatedByFIOLabel: "true"}
return s.cli.CoreV1().ConfigMaps(args.Namespace).Create(ctx, configMap, metav1.CreateOptions{})
}
func (s *fioStepper) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) {
sizeResource, err := resource.ParseQuantity(size)
if err != nil {
return nil, errors.Wrapf(err, "Unable to parse PVC size (%s)", size)
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: PVCGenerateName,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &storageclass,
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): sizeResource,
},
},
},
}
return s.cli.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{})
}
func (s *fioStepper) deletePVC(ctx context.Context, pvcName, namespace string) error {
return s.cli.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{})
}
func (s *fioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, image string) (*v1.Pod, error) {
if pvcName == "" || configMapName == "" || testFileName == "" {
return nil, fmt.Errorf("Create pod missing required arguments.")
}
if image == "" {
image = common.DefaultPodImage
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: PodGenerateName,
Namespace: namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: ContainerName,
Command: []string{"/bin/sh"},
Args: []string{"-c", "tail -f /dev/null"},
VolumeMounts: []v1.VolumeMount{
{Name: "persistent-storage", MountPath: VolumeMountPath},
{Name: "config-map", MountPath: ConfigMapMountPath},
},
Image: image,
}},
Volumes: []v1.Volume{
{
Name: "persistent-storage",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName},
},
},
{
Name: "config-map",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
},
},
},
},
},
}
podRes, err := s.cli.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return podRes, err
}
err = s.podReady.waitForPodReady(ctx, namespace, podRes.Name)
if err != nil {
return nil, err
}
podRes, err = s.cli.CoreV1().Pods(namespace).Get(ctx, podRes.Name, metav1.GetOptions{})
if err != nil {
return podRes, err
}
return podRes, nil
}
func (s *fioStepper) deletePod(ctx context.Context, podName, namespace string) error {
return s.cli.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
}
func (s *fioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) {
jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, testFileName)
command := []string{"fio", "--directory", VolumeMountPath, jobFilePath, "--output-format=json"}
done := make(chan bool, 1)
var fioOut FioResult
var stdout string
var stderr string
var err error
timestart := time.Now()
go func() {
stdout, stderr, err = s.kubeExecutor.exec(namespace, podName, containerName, command)
if err != nil || stderr != "" {
if err == nil {
err = fmt.Errorf("stderr when running FIO")
}
err = errors.Wrapf(err, "Error running command:(%v), stderr:(%s)", command, stderr)
}
done <- true
}()
spin := spinner.New(spinner.CharSets[9], 100*time.Millisecond)
spin.Start()
<-done
spin.Stop()
elapsed := time.Since(timestart)
fmt.Println("Elapsed time-", elapsed)
if err != nil {
return fioOut, err
}
err = json.Unmarshal([]byte(stdout), &fioOut)
if err != nil {
return fioOut, errors.Wrapf(err, "Unable to parse fio output into json.")
}
return fioOut, nil
}
// deleteConfigMap only deletes a config map if it has the label
func (s *fioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error {
if val, ok := configMap.Labels[CreatedByFIOLabel]; ok && val == "true" {
return s.cli.CoreV1().ConfigMaps(namespace).Delete(ctx, configMap.Name, metav1.DeleteOptions{})
}
return nil
}
func fioTestFilename(configMap map[string]string) (string, error) {
if len(configMap) != 1 {
return "", fmt.Errorf("Unable to find fio file in configmap/more than one found %v", configMap)
}
var fileName string
for key := range configMap {
fileName = key
}
return fileName, nil
}
type waitForPodReadyInterface interface {
waitForPodReady(ctx context.Context, namespace string, name string) error
}
type podReadyChecker struct {
cli kubernetes.Interface
}
func (p *podReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error {
return kankube.WaitForPodReady(ctx, p.cli, namespace, name)
}
type kubeExecInterface interface {
exec(namespace, podName, containerName string, command []string) (string, string, error)
}
type kubeExecutor struct {
cli kubernetes.Interface
}
func (k *kubeExecutor) exec(namespace, podName, containerName string, command []string) (string, string, error) {
return kankube.Exec(k.cli, namespace, podName, containerName, command, nil)
}

68
pkg/fio/fio_jobs.go Normal file
View file

@ -0,0 +1,68 @@
package fio
var fioJobs = map[string]string{
DefaultFIOJob: testJob1,
"randrw": randReadWrite,
}
var testJob1 = `[global]
randrepeat=0
verify=0
ioengine=libaio
direct=1
gtod_reduce=1
[job1]
name=read_iops
bs=4K
iodepth=64
size=2G
readwrite=randread
time_based
ramp_time=2s
runtime=15s
[job2]
name=write_iops
bs=4K
iodepth=64
size=2G
readwrite=randwrite
time_based
ramp_time=2s
runtime=15s
[job3]
name=read_bw
bs=128K
iodepth=64
size=2G
readwrite=randread
time_based
ramp_time=2s
runtime=15s
[job4]
name=write_bw
bs=128k
iodepth=64
size=2G
readwrite=randwrite
time_based
ramp_time=2s
runtime=15s
`
var randReadWrite = `[global]
randrepeat=0
verify=0
ioengine=libaio
direct=1
gtod_reduce=1
[job1]
name=rand_readwrite
bs=4K
iodepth=64
size=4G
readwrite=randrw
rwmixread=75
time_based
ramp_time=2s
runtime=15s
`

889
pkg/fio/fio_test.go Normal file
View file

@ -0,0 +1,889 @@
package fio
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/kastenhq/kubestr/pkg/common"
"github.com/pkg/errors"
. "gopkg.in/check.v1"
v1 "k8s.io/api/core/v1"
scv1 "k8s.io/api/storage/v1"
sv1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
)
func Test(t *testing.T) { TestingT(t) }
type FIOTestSuite struct{}
var _ = Suite(&FIOTestSuite{})
func (s *FIOTestSuite) TestRunner(c *C) {
ctx := context.Background()
runner := &FIOrunner{
Cli: nil,
}
_, err := runner.RunFio(ctx, nil)
c.Check(err, NotNil)
}
func (s *FIOTestSuite) TestRunFioHelper(c *C) {
ctx := context.Background()
for i, tc := range []struct {
cli kubernetes.Interface
stepper *fakeFioStepper
args *RunFIOArgs
expectedSteps []string
checker Checker
expectedCM string
expectedSC string
expectedSize string
expectedTFN string
expectedPVC string
}{
{ // invalid args (storageclass)
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{},
args: &RunFIOArgs{},
checker: NotNil,
},
{ // invalid args (size)
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{},
args: &RunFIOArgs{
StorageClass: "sc",
},
checker: NotNil,
},
{ // invalid args (namespace)
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
},
checker: NotNil,
},
{ // namespace doesn't exist
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
vnErr: fmt.Errorf("namespace Err"),
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN"},
},
{ // storageclass not found
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
sceErr: fmt.Errorf("storageclass Err"),
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN", "SCE"},
},
{ // success
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
lcmConfigMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "CM1",
},
Data: map[string]string{
"testfile.fio": "testfiledata",
},
},
cPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "PVC",
},
},
cPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "Pod",
},
},
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: IsNil,
expectedSteps: []string{"VN", "SCE", "LCM", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"},
expectedSC: "sc",
expectedSize: DefaultPVCSize,
expectedTFN: "testfile.fio",
expectedCM: "CM1",
expectedPVC: "PVC",
},
{ // fio test error
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
lcmConfigMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "CM1",
},
Data: map[string]string{
"testfile.fio": "testfiledata",
},
},
cPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "PVC",
},
},
cPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "Pod",
},
},
rFIOErr: fmt.Errorf("run fio error"),
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN", "SCE", "LCM", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"},
},
{ // create pod error
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
lcmConfigMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "CM1",
},
Data: map[string]string{
"testfile.fio": "testfiledata",
},
},
cPVC: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "PVC",
},
},
cPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "Pod",
},
},
cPodErr: fmt.Errorf("pod create error"),
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN", "SCE", "LCM", "CPVC", "CPOD", "DPOD", "DPVC", "DCM"},
},
{ // create PVC error
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
lcmConfigMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "CM1",
},
Data: map[string]string{
"testfile.fio": "testfiledata",
},
},
cPVCErr: fmt.Errorf("pvc create error"),
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN", "SCE", "LCM", "CPVC", "DCM"},
},
{ // testfilename retrieval error, more than one provided
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
lcmConfigMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "CM1",
},
Data: map[string]string{
"testfile.fio": "testfiledata",
"testfile.fio2": "testfiledata",
},
},
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN", "SCE", "LCM", "DCM"},
},
{ // load configmap error
cli: fake.NewSimpleClientset(),
stepper: &fakeFioStepper{
lcmErr: fmt.Errorf("failed to load configmap"),
},
args: &RunFIOArgs{
StorageClass: "sc",
Size: "100Gi",
Namespace: "foo",
},
checker: NotNil,
expectedSteps: []string{"VN", "SCE", "LCM"},
},
} {
c.Log(i)
fio := &FIOrunner{
Cli: tc.cli,
fioSteps: tc.stepper,
}
_, err := fio.RunFioHelper(ctx, tc.args)
c.Check(err, tc.checker)
c.Assert(tc.stepper.steps, DeepEquals, tc.expectedSteps)
if err == nil {
c.Assert(tc.expectedSC, Equals, tc.stepper.cPVCExpSC)
c.Assert(tc.expectedSize, Equals, tc.stepper.cPVCExpSize)
c.Assert(tc.expectedTFN, Equals, tc.stepper.cPodExpFN)
c.Assert(tc.expectedCM, Equals, tc.stepper.cPodExpCM)
c.Assert(tc.expectedPVC, Equals, tc.stepper.cPodExpPVC)
}
}
}
type fakeFioStepper struct {
steps []string
vnErr error
sceSC *sv1.StorageClass
sceErr error
lcmConfigMap *v1.ConfigMap
lcmErr error
cPVCExpSC string
cPVCExpSize string
cPVC *v1.PersistentVolumeClaim
cPVCErr error
dPVCErr error
cPodExpFN string
cPodExpCM string
cPodExpPVC string
cPod *v1.Pod
cPodErr error
dPodErr error
rFIOout FioResult
rFIOErr error
}
func (f *fakeFioStepper) validateNamespace(ctx context.Context, namespace string) error {
f.steps = append(f.steps, "VN")
return f.vnErr
}
func (f *fakeFioStepper) storageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error) {
f.steps = append(f.steps, "SCE")
return f.sceSC, f.sceErr
}
func (f *fakeFioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) {
f.steps = append(f.steps, "LCM")
return f.lcmConfigMap, f.lcmErr
}
func (f *fakeFioStepper) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) {
f.steps = append(f.steps, "CPVC")
f.cPVCExpSC = storageclass
f.cPVCExpSize = size
return f.cPVC, f.cPVCErr
}
func (f *fakeFioStepper) deletePVC(ctx context.Context, pvcName, namespace string) error {
f.steps = append(f.steps, "DPVC")
return f.dPVCErr
}
func (f *fakeFioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, image string) (*v1.Pod, error) {
f.steps = append(f.steps, "CPOD")
f.cPodExpCM = configMapName
f.cPodExpFN = testFileName
f.cPodExpPVC = pvcName
return f.cPod, f.cPodErr
}
func (f *fakeFioStepper) deletePod(ctx context.Context, podName, namespace string) error {
f.steps = append(f.steps, "DPOD")
return f.dPodErr
}
func (f *fakeFioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) {
f.steps = append(f.steps, "RFIOC")
return f.rFIOout, f.rFIOErr
}
func (f *fakeFioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error {
f.steps = append(f.steps, "DCM")
return nil
}
func (s *FIOTestSuite) TestStorageClassExists(c *C) {
ctx := context.Background()
for _, tc := range []struct {
cli kubernetes.Interface
storageClass string
checker Checker
}{
{
cli: fake.NewSimpleClientset(),
storageClass: "sc",
checker: NotNil,
},
{
cli: fake.NewSimpleClientset(&scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc"}}),
storageClass: "sc",
checker: IsNil,
},
} {
stepper := &fioStepper{cli: tc.cli}
_, err := stepper.storageClassExists(ctx, tc.storageClass)
c.Check(err, tc.checker)
}
}
func (s *FIOTestSuite) TestValidateNamespace(c *C) {
ctx := context.Background()
stepper := &fioStepper{cli: fake.NewSimpleClientset()}
err := stepper.validateNamespace(ctx, "ns")
c.Assert(err, NotNil)
stepper = &fioStepper{cli: fake.NewSimpleClientset(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
})}
err = stepper.validateNamespace(ctx, "ns")
c.Assert(err, IsNil)
}
func (s *FIOTestSuite) TestLoadConfigMap(c *C) {
ctx := context.Background()
file, err := ioutil.TempFile("", "tempTLCfile")
c.Check(err, IsNil)
defer os.Remove(file.Name())
for i, tc := range []struct {
cli kubernetes.Interface
configMapName string
jobName string
args *RunFIOArgs
cmChecker Checker
errChecker Checker
failCreates bool
hasLabel bool
}{
{ // provided file name not found
cli: fake.NewSimpleClientset(),
args: &RunFIOArgs{
FIOJobFilepath: "nonexistantfile",
},
cmChecker: IsNil,
errChecker: NotNil,
},
{ // specified config map found
cli: fake.NewSimpleClientset(),
args: &RunFIOArgs{
FIOJobFilepath: file.Name(),
FIOJobName: "random", // won't use this case
},
cmChecker: NotNil,
errChecker: IsNil,
},
{ // specified job name, not found
cli: fake.NewSimpleClientset(),
args: &RunFIOArgs{
FIOJobName: "random",
},
cmChecker: IsNil,
errChecker: NotNil,
},
{ // specified job name, found
cli: fake.NewSimpleClientset(),
args: &RunFIOArgs{
FIOJobName: DefaultFIOJob,
},
cmChecker: NotNil,
errChecker: IsNil,
},
{ // use default job
cli: fake.NewSimpleClientset(),
args: &RunFIOArgs{},
cmChecker: NotNil,
errChecker: IsNil,
},
{ // Fails to create configMap
cli: fake.NewSimpleClientset(),
cmChecker: IsNil,
errChecker: NotNil,
args: &RunFIOArgs{},
failCreates: true,
},
} {
c.Log(i)
stepper := &fioStepper{cli: tc.cli}
if tc.failCreates {
stepper.cli.(*fake.Clientset).Fake.PrependReactor("create", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("Error creating object")
})
}
cm, err := stepper.loadConfigMap(ctx, tc.args)
c.Check(err, tc.errChecker)
c.Check(cm, tc.cmChecker)
if cm != nil {
_, ok := cm.Labels[CreatedByFIOLabel]
c.Assert(ok, Equals, true)
}
}
}
func (s *FIOTestSuite) TestCreatePVC(c *C) {
ctx := context.Background()
for _, tc := range []struct {
cli kubernetes.Interface
storageclass string
size string
errChecker Checker
pvcChecker Checker
failCreates bool
}{
{
cli: fake.NewSimpleClientset(),
storageclass: "fakesc",
size: "20Gi",
errChecker: IsNil,
pvcChecker: NotNil,
},
{ // Fails to create pvc
cli: fake.NewSimpleClientset(),
storageclass: "fakesc",
size: "10Gi",
pvcChecker: IsNil,
errChecker: NotNil,
failCreates: true,
},
{ // parse error
cli: fake.NewSimpleClientset(),
storageclass: "fakesc",
size: "Not a quantity",
pvcChecker: IsNil,
errChecker: NotNil,
},
} {
stepper := &fioStepper{cli: tc.cli}
if tc.failCreates {
stepper.cli.(*fake.Clientset).Fake.PrependReactor("create", "*", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("Error creating object")
})
}
pvc, err := stepper.createPVC(ctx, tc.storageclass, tc.size, DefaultNS)
c.Check(err, tc.errChecker)
c.Check(pvc, tc.pvcChecker)
if pvc != nil {
c.Assert(pvc.GenerateName, Equals, PVCGenerateName)
c.Assert(*pvc.Spec.StorageClassName, Equals, tc.storageclass)
value, ok := pvc.Spec.Resources.Requests.Storage().AsInt64()
c.Assert(ok, Equals, true)
c.Assert(value, Equals, int64(21474836480))
}
}
}
func (s *FIOTestSuite) TestDeletePVC(c *C) {
ctx := context.Background()
stepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc",
Namespace: DefaultNS,
}})}
err := stepper.deletePVC(ctx, "pvc", DefaultNS)
c.Assert(err, IsNil)
err = stepper.deletePVC(ctx, "pvc", DefaultNS)
c.Assert(err, NotNil)
}
func (s *FIOTestSuite) TestCreatPod(c *C) {
ctx := context.Background()
for _, tc := range []struct {
pvcName string
configMapName string
testFileName string
image string
reactor []k8stesting.Reactor
podReadyErr error
errChecker Checker
}{
{
pvcName: "pvc",
configMapName: "cm",
testFileName: "testfile",
errChecker: IsNil,
},
{
pvcName: "pvc",
configMapName: "cm",
testFileName: "testfile",
errChecker: NotNil,
reactor: []k8stesting.Reactor{
&k8stesting.SimpleReactor{
Verb: "create",
Resource: "*",
Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}}, nil
},
},
&k8stesting.SimpleReactor{
Verb: "get",
Resource: "*",
Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("Error getting object")
},
},
},
},
{
pvcName: "pvc",
configMapName: "cm",
testFileName: "testfile",
errChecker: NotNil,
reactor: []k8stesting.Reactor{
&k8stesting.SimpleReactor{
Verb: "create",
Resource: "*",
Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}}, nil
},
},
},
podReadyErr: fmt.Errorf("pod ready error"),
},
{
pvcName: "pvc",
configMapName: "cm",
testFileName: "testfile",
errChecker: NotNil,
reactor: []k8stesting.Reactor{
&k8stesting.SimpleReactor{
Verb: "create",
Resource: "*",
Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, fmt.Errorf("pod create error")
},
},
},
},
{
pvcName: "pvc",
configMapName: "cm",
testFileName: "",
image: "someotherimage",
errChecker: NotNil,
},
{
pvcName: "",
configMapName: "cm",
testFileName: "asdf",
errChecker: NotNil,
},
{
pvcName: "pvc",
configMapName: "",
testFileName: "asd",
errChecker: NotNil,
},
} {
stepper := &fioStepper{
cli: fake.NewSimpleClientset(),
podReady: &fakePodReadyChecker{prcErr: tc.podReadyErr},
}
if tc.reactor != nil {
stepper.cli.(*fake.Clientset).Fake.ReactionChain = tc.reactor
}
pod, err := stepper.createPod(ctx, tc.pvcName, tc.configMapName, tc.testFileName, DefaultNS, tc.image)
c.Check(err, tc.errChecker)
if err == nil {
c.Assert(pod.GenerateName, Equals, PodGenerateName)
c.Assert(len(pod.Spec.Volumes), Equals, 2)
for _, vol := range pod.Spec.Volumes {
switch vol.Name {
case "persistent-storage":
c.Assert(vol.VolumeSource.PersistentVolumeClaim.ClaimName, Equals, tc.pvcName)
case "config-map":
c.Assert(vol.VolumeSource.ConfigMap.Name, Equals, tc.configMapName)
}
}
c.Assert(len(pod.Spec.Containers), Equals, 1)
c.Assert(pod.Spec.Containers[0].Name, Equals, ContainerName)
c.Assert(pod.Spec.Containers[0].Command, DeepEquals, []string{"/bin/sh"})
c.Assert(pod.Spec.Containers[0].Args, DeepEquals, []string{"-c", "tail -f /dev/null"})
c.Assert(pod.Spec.Containers[0].VolumeMounts, DeepEquals, []v1.VolumeMount{
{Name: "persistent-storage", MountPath: VolumeMountPath},
{Name: "config-map", MountPath: ConfigMapMountPath},
})
if tc.image == "" {
c.Assert(pod.Spec.Containers[0].Image, Equals, common.DefaultPodImage)
} else {
c.Assert(pod.Spec.Containers[0].Image, Equals, tc.image)
}
}
}
}
func (s *FIOTestSuite) TestDeletePod(c *C) {
ctx := context.Background()
stepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod",
Namespace: DefaultNS,
}})}
err := stepper.deletePod(ctx, "pod", DefaultNS)
c.Assert(err, IsNil)
err = stepper.deletePod(ctx, "pod", DefaultNS)
c.Assert(err, NotNil)
}
func (s *FIOTestSuite) TestFioTestFileName(c *C) {
for _, tc := range []struct {
configMap map[string]string
retVal string
errChecker Checker
}{
{
configMap: map[string]string{
"testfile.fio": "some test data",
},
retVal: "testfile.fio",
errChecker: IsNil,
},
{
configMap: map[string]string{
"ConfigMapSCKey": "storageclass",
"ConfigMapSizeKey": "10Gi",
"testfile.fio": "some test data",
},
retVal: "",
errChecker: NotNil,
},
} {
ret, err := fioTestFilename(tc.configMap)
c.Check(err, tc.errChecker)
c.Assert(ret, Equals, tc.retVal)
}
}
func (s *FIOTestSuite) TestRunFioCommand(c *C) {
var parsedout FioResult
err := json.Unmarshal([]byte(parsableFioOutput), &parsedout)
c.Assert(err, IsNil)
ctx := context.Background()
for _, tc := range []struct {
executor *fakeKubeExecutor
errChecker Checker
podName string
containerName string
testFileName string
out FioResult
}{
{
executor: &fakeKubeExecutor{
keErr: nil,
keStrErr: "",
keStdOut: parsableFioOutput,
},
errChecker: IsNil,
podName: "pod",
containerName: "container",
testFileName: "tfName",
out: parsedout,
},
{
executor: &fakeKubeExecutor{
keErr: nil,
keStrErr: "",
keStdOut: "unparsable string",
},
errChecker: NotNil,
podName: "pod",
containerName: "container",
testFileName: "tfName",
out: FioResult{},
},
{
executor: &fakeKubeExecutor{
keErr: fmt.Errorf("kubeexec err"),
keStrErr: "",
keStdOut: "unparsable string",
},
errChecker: NotNil,
podName: "pod",
containerName: "container",
testFileName: "tfName",
out: FioResult{},
},
{
executor: &fakeKubeExecutor{
keErr: nil,
keStrErr: "execution error",
keStdOut: "unparsable string",
},
errChecker: NotNil,
podName: "pod",
containerName: "container",
testFileName: "tfName",
out: FioResult{},
},
} {
stepper := &fioStepper{
kubeExecutor: tc.executor,
}
out, err := stepper.runFIOCommand(ctx, tc.podName, tc.containerName, tc.testFileName, DefaultNS)
c.Check(err, tc.errChecker)
c.Assert(out, DeepEquals, tc.out)
c.Assert(tc.executor.keInPodName, Equals, tc.podName)
c.Assert(tc.executor.keInContainerName, Equals, tc.containerName)
c.Assert(len(tc.executor.keInCommand), Equals, 5)
c.Assert(tc.executor.keInCommand[0], Equals, "fio")
c.Assert(tc.executor.keInCommand[1], Equals, "--directory")
c.Assert(tc.executor.keInCommand[2], Equals, VolumeMountPath)
jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, tc.testFileName)
c.Assert(tc.executor.keInCommand[3], Equals, jobFilePath)
}
}
func (s *FIOTestSuite) TestDeleteConfigMap(c *C) {
ctx := context.Background()
defaultNS := "default"
os.Setenv(PodNamespaceEnvKey, defaultNS)
for _, tc := range []struct {
cli kubernetes.Interface
cm *v1.ConfigMap
errChecker Checker
lenCMList int
}{
{ // Don't delete it unless it has the label
cli: fake.NewSimpleClientset(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: defaultNS,
},
}),
cm: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: defaultNS,
},
},
errChecker: IsNil,
lenCMList: 1,
},
{ // Has label delete
cli: fake.NewSimpleClientset(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: defaultNS,
},
}),
cm: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: defaultNS,
Labels: map[string]string{
CreatedByFIOLabel: "true",
},
},
},
errChecker: IsNil,
lenCMList: 0,
},
{ // No cm exists
cli: fake.NewSimpleClientset(),
cm: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: defaultNS,
Labels: map[string]string{
CreatedByFIOLabel: "true",
},
},
},
errChecker: NotNil,
},
} {
stepper := &fioStepper{cli: tc.cli}
err := stepper.deleteConfigMap(ctx, tc.cm, DefaultNS)
c.Check(err, tc.errChecker)
if err == nil {
list, err := stepper.cli.CoreV1().ConfigMaps(defaultNS).List(ctx, metav1.ListOptions{})
c.Check(err, IsNil)
c.Assert(len(list.Items), Equals, tc.lenCMList)
}
}
os.Unsetenv(PodNamespaceEnvKey)
}
func (s *FIOTestSuite) TestWaitForPodReady(c *C) {
ctx := context.Background()
prChecker := &podReadyChecker{
cli: fake.NewSimpleClientset(),
}
err := prChecker.waitForPodReady(ctx, "somens", "somePod")
c.Check(err, NotNil)
prChecker.cli = fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "somePod",
Namespace: "somens",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
})
}
type fakePodReadyChecker struct {
prcErr error
}
func (f *fakePodReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error {
return f.prcErr
}
type fakeKubeExecutor struct {
keErr error
keStdOut string
keStrErr string
keInNS string
keInPodName string
keInContainerName string
keInCommand []string
}
func (fk *fakeKubeExecutor) exec(namespace, podName, containerName string, command []string) (string, string, error) {
fk.keInNS = namespace
fk.keInPodName = podName
fk.keInContainerName = containerName
fk.keInCommand = command
return fk.keStdOut, fk.keStrErr, fk.keErr
}

186
pkg/fio/fio_types.go Normal file
View file

@ -0,0 +1,186 @@
package fio
import "fmt"
type FioResult struct {
FioVersion string `json:"fio version,omitempty"`
Timestamp int64 `json:"timestamp,omitempty"`
TimestampMS int64 `json:"timestamp_ms,omitempty"`
Time string `json:"time,omitempty"`
GlobalOptions FioGlobalOptions `json:"global options,omitempty"`
Jobs []FioJobs `json:"jobs,omitempty"`
DiskUtil []FioDiskUtil `json:"disk_util,omitempty"`
}
func (f FioResult) Print() string {
var res string
res += fmt.Sprintf("FIO version - %s\n", f.FioVersion)
res += fmt.Sprintf("Global options - %s\n\n", f.GlobalOptions.Print())
for _, job := range f.Jobs {
res += fmt.Sprintf("%s\n", job.Print())
}
res += "Disk stats (read/write):\n"
for _, du := range f.DiskUtil {
res += fmt.Sprintf("%s\n", du.Print())
}
return res
}
type FioGlobalOptions struct {
Directory string `json:"directory,omitempty"`
RandRepeat string `json:"randrepeat,omitempty"`
Verify string `json:"verify,omitempty"`
IOEngine string `json:"ioengine,omitempty"`
Direct string `json:"direct,omitempty"`
GtodReduce string `json:"gtod_reduce,omitempty"`
}
func (g FioGlobalOptions) Print() string {
return fmt.Sprintf("ioengine=%s verify=%s direct=%s gtod_reduce=%s", g.IOEngine, g.Verify, g.Direct, g.GtodReduce)
}
type FioJobs struct {
JobName string `json:"jobname,omitempty"`
GroupID int `json:"groupid,omitempty"`
Error int `json:"error,omitempty"`
Eta int `json:"eta,omitempty"`
Elapsed int `json:"elapsed,omitempty"`
JobOptions FioJobOptions `json:"job options,omitempty"`
Read FioStats `json:"read,omitempty"`
Write FioStats `json:"write,omitempty"`
Trim FioStats `json:"trim,omitempty"`
Sync FioStats `json:"sync,omitempty"`
JobRuntime int32 `json:"job_runtime,omitempty"`
UsrCpu float32 `json:"usr_cpu,omitempty"`
SysCpu float32 `json:"sys_cpu,omitempty"`
Ctx int32 `json:"ctx,omitempty"`
MajF int32 `json:"majf,omitempty"`
MinF int32 `json:"minf,omitempty"`
IoDepthLevel FioDepth `json:"iodepth_level,omitempty"`
IoDepthSubmit FioDepth `json:"iodepth_submit,omitempty"`
IoDepthComplete FioDepth `json:"iodepth_complete,omitempty"`
LatencyNs FioLatency `json:"latency_ns,omitempty"`
LatencyUs FioLatency `json:"latency_us,omitempty"`
LatencyMs FioLatency `json:"latency_ms,omitempty"`
LatencyDepth int32 `json:"latency_depth,omitempty"`
LatencyTarget int32 `json:"latency_target,omitempty"`
LatencyPercentile float32 `json:"latency_percentile,omitempty"`
LatencyWindow int32 `json:"latency_window,omitempty"`
}
func (j FioJobs) Print() string {
var job string
job += fmt.Sprintf("%s\n", j.JobOptions.Print())
if j.Read.Iops != 0 || j.Read.BW != 0 {
job += fmt.Sprintf("read:\n%s\n", j.Read.Print())
}
if j.Write.Iops != 0 || j.Write.BW != 0 {
job += fmt.Sprintf("write:\n%s\n", j.Write.Print())
}
return job
}
type FioJobOptions struct {
Name string `json:"name,omitempty"`
BS string `json:"bs,omitempty"`
IoDepth string `json:"iodepth,omitempty"`
Size string `json:"size,omitempty"`
RW string `json:"rw,omitempty"`
RampTime string `json:"ramp_time,omitempty"`
RunTime string `json:"runtime,omitempty"`
}
func (o FioJobOptions) Print() string {
return fmt.Sprintf("JobName: %s\n blocksize=%s filesize=%s iodepth=%s rw=%s", o.Name, o.BS, o.Size, o.IoDepth, o.RW)
}
type FioStats struct {
IOBytes int64 `json:"io_bytes,omitempty"`
IOKBytes int64 `json:"io_kbytes,omitempty"`
BWBytes int64 `json:"bw_bytes,omitempty"`
BW int64 `json:"bw,omitempty"`
Iops float32 `json:"iops,omitempty"`
Runtime int64 `json:"runtime,omitempty"`
TotalIos int64 `json:"total_ios,omitempty"`
ShortIos int64 `json:"short_ios,omitempty"`
DropIos int64 `json:"drop_ios,omitempty"`
SlatNs FioNS `json:"slat_ns,omitempty"`
ClatNs FioNS `json:"clat_ns,omitempty"`
LatNs FioNS `json:"lat_ns,omitempty"`
BwMin int64 `json:"bw_min,omitempty"`
BwMax int64 `json:"bw_max,omitempty"`
BwAgg float32 `json:"bw_agg,omitempty"`
BwMean float32 `json:"bw_mean,omitempty"`
BwDev float32 `json:"bw_dev,omitempty"`
BwSamples int32 `json:"bw_samples,omitempty"`
IopsMin int32 `json:"iops_min,omitempty"`
IopsMax int32 `json:"iops_max,omitempty"`
IopsMean float32 `json:"iops_mean,omitempty"`
IopsStdDev float32 `json:"iops_stddev,omitempty"`
IopsSamples int32 `json:"iops_samples,omitempty"`
}
func (s FioStats) Print() string {
var stats string
stats += fmt.Sprintf(" IOPS=%f BW(KiB/s)=%d\n", s.Iops, s.BW)
stats += fmt.Sprintf(" iops: min=%d max=%d avg=%f\n", s.IopsMin, s.IopsMax, s.IopsMean)
stats += fmt.Sprintf(" bw(KiB/s): min=%d max=%d avg=%f", s.BwMin, s.BwMax, s.BwMean)
return stats
}
type FioNS struct {
Min int64 `json:"min,omitempty"`
Max int64 `json:"max,omitempty"`
Mean float32 `json:"mean,omitempty"`
StdDev float32 `json:"stddev,omitempty"`
N int64 `json:"N,omitempty"`
}
type FioDepth struct {
FioDepth0 float32 `json:"0,omitempty"`
FioDepth1 float32 `json:"1,omitempty"`
FioDepth2 float32 `json:"2,omitempty"`
FioDepth4 float32 `json:"4,omitempty"`
FioDepth8 float32 `json:"8,omitempty"`
FioDepth16 float32 `json:"16,omitempty"`
FioDepth32 float32 `json:"32,omitempty"`
FioDepth64 float32 `json:"64,omitempty"`
FioDepthGE64 float32 `json:">=64,omitempty"`
}
type FioLatency struct {
FioLat2 float32 `json:"2,omitempty"`
FioLat4 float32 `json:"4,omitempty"`
FioLat10 float32 `json:"10,omitempty"`
FioLat20 float32 `json:"20,omitempty"`
FioLat50 float32 `json:"50,omitempty"`
FioLat100 float32 `json:"100,omitempty"`
FioLat250 float32 `json:"250,omitempty"`
FioLat500 float32 `json:"500,omitempty"`
FioLat750 float32 `json:"750,omitempty"`
FioLat1000 float32 `json:"1000,omitempty"`
FioLat2000 float32 `json:"2000,omitempty"`
FioLatGE2000 float32 `json:">=2000,omitempty"`
}
type FioDiskUtil struct {
Name string `json:"name,omitempty"`
ReadIos int64 `json:"read_ios,omitempty"`
WriteIos int64 `json:"write_ios,omitempty"`
ReadMerges int64 `json:"read_merges,omitempty"`
WriteMerges int64 `json:"write_merges,omitempty"`
ReadTicks int64 `json:"read_ticks,omitempty"`
WriteTicks int64 `json:"write_ticks,omitempty"`
InQueue int64 `json:"in_queue,omitempty"`
Util float32 `json:"util,omitempty"`
}
func (d FioDiskUtil) Print() string {
//Disk stats (read/write):
//rbd4: ios=30022/11982, merge=0/313, ticks=1028675/1022768, in_queue=2063740, util=99.67%
var du string
du += fmt.Sprintf(" %s: ios=%d/%d merge=%d/%d ticks=%d/%d in_queue=%d, util=%f%%", d.Name, d.ReadIos,
d.WriteIos, d.ReadMerges, d.WriteMerges, d.ReadTicks, d.WriteTicks, d.InQueue, d.Util)
return du
}

View file

@ -0,0 +1,491 @@
package fio
const parsableFioOutput = `{
"fio version" : "fio-3.20",
"timestamp" : 1611952282,
"timestamp_ms" : 1611952282240,
"time" : "Fri Jan 29 20:31:22 2021",
"global options" : {
"directory" : "/dataset",
"randrepeat" : "0",
"verify" : "0",
"ioengine" : "libaio",
"direct" : "1",
"gtod_reduce" : "1"
},
"jobs" : [
{
"jobname" : "read_iops",
"groupid" : 0,
"error" : 0,
"eta" : 0,
"elapsed" : 18,
"job options" : {
"name" : "read_iops",
"bs" : "4K",
"iodepth" : "64",
"size" : "2G",
"rw" : "randread",
"ramp_time" : "2s",
"runtime" : "15s"
},
"read" : {
"io_bytes" : 61886464,
"io_kbytes" : 60436,
"bw_bytes" : 4039322,
"bw" : 3944,
"iops" : 982.050780,
"runtime" : 15321,
"total_ios" : 15046,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"bw_min" : 1919,
"bw_max" : 7664,
"bw_agg" : 100.000000,
"bw_mean" : 3995.000000,
"bw_dev" : 1200.820783,
"bw_samples" : 30,
"iops_min" : 479,
"iops_max" : 1916,
"iops_mean" : 998.566667,
"iops_stddev" : 300.247677,
"iops_samples" : 30
},
"write" : {
"io_bytes" : 0,
"io_kbytes" : 0,
"bw_bytes" : 0,
"bw" : 0,
"iops" : 0.000000,
"runtime" : 0,
"total_ios" : 0,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"bw_min" : 0,
"bw_max" : 0,
"bw_agg" : 0.000000,
"bw_mean" : 0.000000,
"bw_dev" : 0.000000,
"bw_samples" : 0,
"iops_min" : 0,
"iops_max" : 0,
"iops_mean" : 0.000000,
"iops_stddev" : 0.000000,
"iops_samples" : 0
},
"trim" : {
"io_bytes" : 0,
"io_kbytes" : 0,
"bw_bytes" : 0,
"bw" : 0,
"iops" : 0.000000,
"runtime" : 0,
"total_ios" : 0,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"bw_min" : 0,
"bw_max" : 0,
"bw_agg" : 0.000000,
"bw_mean" : 0.000000,
"bw_dev" : 0.000000,
"bw_samples" : 0,
"iops_min" : 0,
"iops_max" : 0,
"iops_mean" : 0.000000,
"iops_stddev" : 0.000000,
"iops_samples" : 0
},
"sync" : {
"total_ios" : 0,
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
}
},
"job_runtime" : 15322,
"usr_cpu" : 1.109516,
"sys_cpu" : 3.648349,
"ctx" : 17991,
"majf" : 1,
"minf" : 62,
"iodepth_level" : {
"1" : 0.000000,
"2" : 0.000000,
"4" : 0.000000,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
">=64" : 100.000000
},
"iodepth_submit" : {
"0" : 0.000000,
"4" : 100.000000,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
"64" : 0.000000,
">=64" : 0.000000
},
"iodepth_complete" : {
"0" : 0.000000,
"4" : 99.993354,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
"64" : 0.100000,
">=64" : 0.000000
},
"latency_ns" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000
},
"latency_us" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000
},
"latency_ms" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000,
"2000" : 0.000000,
">=2000" : 0.000000
},
"latency_depth" : 64,
"latency_target" : 0,
"latency_percentile" : 100.000000,
"latency_window" : 0
},
{
"jobname" : "write_iops",
"groupid" : 0,
"error" : 0,
"eta" : 0,
"elapsed" : 18,
"job options" : {
"name" : "write_iops",
"bs" : "4K",
"iodepth" : "64",
"size" : "2G",
"rw" : "randwrite",
"ramp_time" : "2s",
"runtime" : "15s"
},
"read" : {
"io_bytes" : 0,
"io_kbytes" : 0,
"bw_bytes" : 0,
"bw" : 0,
"iops" : 0.000000,
"runtime" : 0,
"total_ios" : 0,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"bw_min" : 0,
"bw_max" : 0,
"bw_agg" : 0.000000,
"bw_mean" : 0.000000,
"bw_dev" : 0.000000,
"bw_samples" : 0,
"iops_min" : 0,
"iops_max" : 0,
"iops_mean" : 0.000000,
"iops_stddev" : 0.000000,
"iops_samples" : 0
},
"write" : {
"io_bytes" : 24805376,
"io_kbytes" : 24224,
"bw_bytes" : 1616406,
"bw" : 1578,
"iops" : 390.525218,
"runtime" : 15346,
"total_ios" : 5993,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"bw_min" : 512,
"bw_max" : 2706,
"bw_agg" : 100.000000,
"bw_mean" : 1581.066667,
"bw_dev" : 476.641189,
"bw_samples" : 30,
"iops_min" : 128,
"iops_max" : 676,
"iops_mean" : 395.033333,
"iops_stddev" : 119.151738,
"iops_samples" : 30
},
"trim" : {
"io_bytes" : 0,
"io_kbytes" : 0,
"bw_bytes" : 0,
"bw" : 0,
"iops" : 0.000000,
"runtime" : 0,
"total_ios" : 0,
"short_ios" : 0,
"drop_ios" : 0,
"slat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"clat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
},
"bw_min" : 0,
"bw_max" : 0,
"bw_agg" : 0.000000,
"bw_mean" : 0.000000,
"bw_dev" : 0.000000,
"bw_samples" : 0,
"iops_min" : 0,
"iops_max" : 0,
"iops_mean" : 0.000000,
"iops_stddev" : 0.000000,
"iops_samples" : 0
},
"sync" : {
"total_ios" : 0,
"lat_ns" : {
"min" : 0,
"max" : 0,
"mean" : 0.000000,
"stddev" : 0.000000,
"N" : 0
}
},
"job_runtime" : 15345,
"usr_cpu" : 0.508309,
"sys_cpu" : 2.280873,
"ctx" : 7411,
"majf" : 1,
"minf" : 63,
"iodepth_level" : {
"1" : 0.000000,
"2" : 0.000000,
"4" : 0.000000,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
">=64" : 100.000000
},
"iodepth_submit" : {
"0" : 0.000000,
"4" : 100.000000,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
"64" : 0.000000,
">=64" : 0.000000
},
"iodepth_complete" : {
"0" : 0.000000,
"4" : 99.983317,
"8" : 0.000000,
"16" : 0.000000,
"32" : 0.000000,
"64" : 0.100000,
">=64" : 0.000000
},
"latency_ns" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000
},
"latency_us" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000
},
"latency_ms" : {
"2" : 0.000000,
"4" : 0.000000,
"10" : 0.000000,
"20" : 0.000000,
"50" : 0.000000,
"100" : 0.000000,
"250" : 0.000000,
"500" : 0.000000,
"750" : 0.000000,
"1000" : 0.000000,
"2000" : 0.000000,
">=2000" : 0.000000
},
"latency_depth" : 64,
"latency_target" : 0,
"latency_percentile" : 100.000000,
"latency_window" : 0
}
],
"disk_util" : [
{
"name" : "rbd4",
"read_ios" : 16957,
"write_ios" : 6896,
"read_merges" : 0,
"write_merges" : 207,
"read_ticks" : 1072290,
"write_ticks" : 1043421,
"in_queue" : 2119036,
"util" : 99.712875
}
]
}`

113
pkg/kubestr/csi-drivers.go Normal file
View file

@ -0,0 +1,113 @@
package kubestr
// THIS FILE IS AUTO_GENERATED.
// To generate file run "go generate" at the top level
// This file must be checked in.
var CSIDriverList = []*CSIDriver{
{NameUrl: "[Alicloud Disk](https://github.com/AliyunContainerService/csi-plugin)", DriverName: "diskplugin.csi.alibabacloud.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Alicloud Disk", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"},
{NameUrl: "[Alicloud NAS](https://github.com/AliyunContainerService/csi-plugin)", DriverName: "nasplugin.csi.alibabacloud.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Alicloud Network Attached Storage (NAS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""},
{NameUrl: "[Alicloud OSS](https://github.com/AliyunContainerService/csi-plugin)", DriverName: "ossplugin.csi.alibabacloud.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Alicloud Object Storage Service (OSS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""},
{NameUrl: "[ArStor CSI](https://github.com/huayun-docs/csi-driver-arstor)", DriverName: "arstor.csi.huayun.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Huayun Storage Service (ArStor)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[AWS Elastic Block Storage](https://github.com/kubernetes-sigs/aws-ebs-csi-driver)", DriverName: "ebs.csi.aws.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for AWS Elastic Block Storage (EBS)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"},
{NameUrl: "[AWS Elastic File System](https://github.com/aws/aws-efs-csi-driver)", DriverName: "efs.csi.aws.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for AWS Elastic File System (EFS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""},
{NameUrl: "[AWS FSx for Lustre](https://github.com/aws/aws-fsx-csi-driver)", DriverName: "fsx.csi.aws.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for AWS FSx for Lustre (EBS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Azure disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver)", DriverName: "disk.csi.azure.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Azure disk", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Azure file](https://github.com/kubernetes-sigs/azurefile-csi-driver)", DriverName: "file.csi.azure.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Azure file", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Bigtera VirtualStor (block)](https://github.com/bigtera-ce/ceph-csi)", DriverName: "csi.block.bigtera.com", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for Bigtera VirtualStor block storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"},
{NameUrl: "[Bigtera VirtualStor (filesystem)](https://github.com/bigtera-ce/ceph-csi)", DriverName: "csi.fs.bigtera.com", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for Bigtera VirtualStor filesystem", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"},
{NameUrl: "[CephFS](https://github.com/ceph/ceph-csi)", DriverName: "cephfs.csi.ceph.com", Versions: "v0.3, v1.0.0, v1.1.0, v1.2.0", Description: "A Container Storage Interface (CSI) Driver for CephFS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion, Snapshot, Clone"},
{NameUrl: "[Ceph RBD](https://github.com/ceph/ceph-csi)", DriverName: "rbd.csi.ceph.com", Versions: "v0.3, v1.0.0, v1.1.0, v1.2.0", Description: "A Container Storage Interface (CSI) Driver for Ceph RBD", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology, Cloning"},
{NameUrl: "[ChubaoFS](https://github.com/chubaofs/chubaofs-csi)", DriverName: "csi.chubaofs.com", Versions: "v1.0.0", Description: "A Container Storage Interface (CSI) Driver for ChubaoFS Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Cinder](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/cinder)", DriverName: "cinder.csi.openstack.org", Versions: "v0.3, v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for OpenStack Cinder", Persistence: "Persistent and Ephemeral", AccessModes: "Depends on the storage backend used", DynamicProvisioning: "Yes, if storage backend supports it", Features: "Raw Block, Snapshot, Expansion"},
{NameUrl: "[cloudscale.ch](https://github.com/cloudscale-ch/csi-cloudscale)", DriverName: "csi.cloudscale.ch", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for the [cloudscale.ch](https://www.cloudscale.ch/) IaaS platform", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"},
{NameUrl: "[Datatom-InfinityCSI](https://github.com/datatom-infinity/infinity-csi)", DriverName: "csi-infiblock-plugin", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for DATATOM Infinity storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"},
{NameUrl: "[Datatom-InfinityCSI (filesystem)](https://github.com/datatom-infinity/infinity-csi)", DriverName: "csi-infifs-plugin", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for DATATOM Infinity filesystem storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"},
{NameUrl: "[Datera](https://github.com/Datera/datera-csi)", DriverName: "dsp.csi.daterainc.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Datera Data Services Platform (DSP)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"},
{NameUrl: "[DDN EXAScaler](https://github.com/DDNStorage/exa-csi-driver)", DriverName: "exa.csi.ddn.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for DDN EXAScaler filesystems", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"},
{NameUrl: "[Dell EMC PowerScale](https://github.com/dell/csi-powerscale)", DriverName: "csi-isilon.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC PowerScale](https://www.delltechnologies.com/en-us/storage/powerscale.htm)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"},
{NameUrl: "[Dell EMC PowerMax](https://github.com/dell/csi-powermax)", DriverName: "csi-powermax.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC PowerMax](https://www.delltechnologies.com/en-us/storage/powermax.htm)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Dell EMC PowerStore](https://github.com/dell/csi-powerstore)", DriverName: "csi-powerstore.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC PowerStore](https://www.delltechnologies.com/en-us/storage/powerstore-storage-appliance.htm)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Dell EMC Unity](https://github.com/dell/csi-unity)", DriverName: "csi-unity.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC Unity](https://www.delltechnologies.com/en-us/storage/unity.htm)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Dell EMC VxFlexOS](https://github.com/dell/csi-vxflexos)", DriverName: "csi-vxflexos.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC VxFlexOS](https://www.delltechnologies.com/en-us/hyperconverged-infrastructure/vxflex.htm)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"},
{NameUrl: "[democratic-csi](https://github.com/democratic-csi/democratic-csi)", DriverName: "org.democratic-csi.[X]", Versions: "v1.0,v1.1,v1.2", Description: "Generic CSI plugin supporting zfs based solutions ([FreeNAS](https://www.freenas.org/) / [TrueNAS](https://www.truenas.com/) and [ZoL](https://zfsonlinux.org/) solutions such as [Ubuntu](https://ubuntu.com/))", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume)", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Diamanti-CSI](https://diamanti.com/use-cases/io-acceleration/#csi)", DriverName: "dcx.csi.diamanti.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Diamanti DCX Platform", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"},
{NameUrl: "[DigitalOcean Block Storage](https://github.com/digitalocean/csi-digitalocean)", DriverName: "dobs.csi.digitalocean.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for DigitalOcean Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"},
{NameUrl: "[DriveScale](https://github.com/DriveScale/k8s-plugins)", DriverName: "csi.drivescale.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for DriveScale software composable infrastructure solution", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Ember CSI](https://ember-csi.io)", DriverName: "[x].ember-csi.io", Versions: "v0.2, v0.3, v1.0", Description: "Multi-vendor CSI plugin supporting over 80 Drivers to provide block and mount storage to Container Orchestration systems.", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"},
{NameUrl: "[Excelero NVMesh](https://github.com/Excelero/nvmesh-csi-driver)", DriverName: "nvmesh-csi.excelero.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for Excelero NVMesh", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion"},
{NameUrl: "[GCE Persistent Disk](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver)", DriverName: "pd.csi.storage.gke.io", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Google Compute Engine Persistent Disk (GCE PD)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"},
{NameUrl: "[Google Cloud Filestore](https://github.com/kubernetes-sigs/gcp-filestore-csi-driver)", DriverName: "com.google.csi.filestore", Versions: "v0.3", Description: "A Container Storage Interface (CSI) Driver for Google Cloud Filestore", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Google Cloud Storage](https://github.com/ofek/csi-gcs)", DriverName: "gcs.csi.ofek.dev", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Google Cloud Storage", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"},
{NameUrl: "[GlusterFS](https://github.com/gluster/gluster-csi-driver)", DriverName: "org.gluster.glusterfs", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for GlusterFS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot"},
{NameUrl: "[Gluster VirtBlock](https://github.com/gluster/gluster-csi-driver)", DriverName: "org.gluster.glustervirtblock", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Gluster Virtual Block volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Hammerspace CSI](https://github.com/hammer-space/csi-plugin)", DriverName: "com.hammerspace.csi", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Hammerspace Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"},
{NameUrl: "[Hedvig](https://documentation.commvault.com/commvault/hedvig/others/pdf/Hedvig_CSI_User_Guide.pdf)", DriverName: "io.hedvig.csi", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Hedvig", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"},
{NameUrl: "[Hetzner Cloud Volumes CSI](https://github.com/hetznercloud/csi-driver)", DriverName: "csi.hetzner.cloud", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Hetzner Cloud Volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion"},
{NameUrl: "[Hitachi Vantara](https://knowledge.hitachivantara.com/Documents/Adapters_and_Drivers/Storage_Adapters_and_Drivers/Containers)", DriverName: "hspc.csi.hitachi.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for VSP series Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[HPE](https://github.com/hpe-storage/csi-driver)", DriverName: "csi.hpe.com", Versions: "v1.0, v1.1, v1.2", Description: "A [multi-platform](https://scod.hpedev.io/csi_driver) Container Storage Interface (CSI) driver. Supports [HPE Nimble Storage](https://hpe.com/storage/nimble), [HPE Primera](https://hpe.com/storage/primera) and [HPE 3PAR](https://hpe.com/storage/3par)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Huawei Storage CSI](https://github.com/Huawei/eSDK_K8S_Plugin)", DriverName: "csi.huawei.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for FusionStorage, OceanStor 100D, OceanStor Dorado V3, OceanStor Dorado V6, OceanStor V3, OceanStor V5", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pod", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"},
{NameUrl: "[HyperV CSI](https://github.com/Zetanova/hyperv-csi-driver)", DriverName: "eu.zetanova.csi.hyperv", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) driver to manage hyperv hosts", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[IBM Block Storage](https://github.com/ibm/ibm-block-csi-driver)", DriverName: "block.csi.ibm.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) [Driver](https://www.ibm.com/support/knowledgecenter/SSRQ8T) for IBM Spectrum Virtualize Family, IBM FlashSystem A9000 and A9000R, IBM DS8880 and DS8900.", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[IBM Spectrum Scale](https://github.com/IBM/ibm-spectrum-scale-csi)", DriverName: "spectrumscale.csi.ibm.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) [Driver](https://www.ibm.com/support/knowledgecenter/STXKQY_CSI_SHR) for the IBM Spectrum Scale File System", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[IBM Cloud Block Storage VPC CSI Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block)", DriverName: "vpc.block.csi.ibm.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) [Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) for IBM Cloud Kubernetes Service and Red Hat OpenShift on IBM Cloud", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block"},
{NameUrl: "[Infinidat](https://github.com/Infinidat/infinibox-csi-driver)", DriverName: "infinibox-csi-driver", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for Infinidat [InfiniBox](https://infinidat.com/en/products-technology/infinibox)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Inspur InStorage CSI](https://github.com/OpenInspur/instorage-k8s)", DriverName: "csi-instorage", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for inspur AS/HF/CS/CF Series Primary Storage, inspur AS13000 Series SDS Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Intel PMEM-CSI](https://github.com/intel/pmem-csi)", DriverName: "pmem-csi.intel.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) driver for [PMEM](https://pmem.io/) from Intel", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block"},
{NameUrl: "[JuiceFS](https://github.com/juicedata/juicefs-csi-driver)", DriverName: "csi.juicefs.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for JuiceFS File System", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[kaDalu](https://github.com/kadalu/kadalu)", DriverName: "org.kadalu.gluster", Versions: "v0.3", Description: "A CSI Driver (and operator) for GlusterFS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[KumoScale Block Storage](https://github.com/KioxiaAmerica/kumoscale-csi)", DriverName: "kumoscale.kioxia.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for KumoScale Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"},
{NameUrl: "[Linode Block Storage](https://github.com/linode/linode-blockstorage-csi-driver)", DriverName: "linodebs.csi.linode.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Linode Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[LINSTOR](https://github.com/LINBIT/linstor-csi)", DriverName: "io.drbd.linstor-csi", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [LINSTOR](https://www.linbit.com/en/linstor/) volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"},
{NameUrl: "[Longhorn](https://github.com/longhorn/longhorn)", DriverName: "driver.longhorn.io", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Longhorn](https://longhorn.io/) volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Node", DynamicProvisioning: "Yes", Features: "Raw Block"},
{NameUrl: "[MacroSAN](https://github.com/macrosan-csi/macrosan-csi-driver)", DriverName: "csi-macrosan", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for MacroSAN Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Manila](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/manila)", DriverName: "manila.csi.openstack.org", Versions: "v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for OpenStack Shared File System Service (Manila)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Topology"},
{NameUrl: "[MapR](https://github.com/mapr/mapr-csi)", DriverName: "com.mapr.csi-kdf", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for MapR Data Platform", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot"},
{NameUrl: "[MooseFS](https://github.com/moosefs/moosefs-csi)", DriverName: "com.tuxera.csi.moosefs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [MooseFS](https://moosefs.com/) clusters.", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[NetApp](https://github.com/NetApp/trident)", DriverName: "csi.trident.netapp.io", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for NetApp's [Trident](https://netapp-trident.readthedocs.io/) container storage orchestrator", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"},
{NameUrl: "[NexentaStor File Storage](https://github.com/Nexenta/nexentastor-csi-driver)", DriverName: "nexentastor-csi-driver.nexenta.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for NexentaStor File Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology"},
{NameUrl: "[NexentaStor Block Storage](https://github.com/Nexenta/nexentastor-csi-driver-block)", DriverName: "nexentastor-block-csi-driver.nexenta.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for NexentaStor over iSCSI protocol", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology, Raw block"},
{NameUrl: "[Nutanix](https://github.com/nutanix/csi-plugin)", DriverName: "com.nutanix.csi", Versions: "v0.3, v1.0, v1.2", Description: "A Container Storage Interface (CSI) Driver for Nutanix", Persistence: "Persistent", AccessModes: "Read/Write Single Pod with Nutanix Volumes and Read/Write Multiple Pods with Nutanix Files", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[OpenEBS](https://github.com/openebs/csi)", DriverName: "cstor.csi.openebs.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [OpenEBS](https://www.openebs.io/)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Expansion, Snapshot, Cloning"},
{NameUrl: "[OpenSDS](https://github.com/opensds/nbp/tree/master/csi)", DriverName: "csi-opensdsplugin", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [OpenSDS]((https://www.opensds.io/))", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"},
{NameUrl: "[Open-E](https://github.com/open-e/JovianDSS-KubernetesCSI)", DriverName: "com.open-e.joviandss.csi", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Open-E JovianDSS Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot, Cloning"},
{NameUrl: "[oVirt](https://github.com/openshift/ovirt-csi-driver)", DriverName: "csi.ovirt.org", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [oVirt](https://ovirt.org)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Block, File Storage"},
{NameUrl: "[Portworx](https://github.com/libopenstorage/openstorage/tree/master/csi)", DriverName: "pxd.openstorage.org", Versions: "v0.3, v1.1", Description: "A Container Storage Interface (CSI) Driver for [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion"},
{NameUrl: "[Pure Storage CSI](https://github.com/purestorage/pso-csi)", DriverName: "pure-csi", Versions: "v1.0, v1.1, v1.2, v1.3", Description: "A Container Storage Interface (CSI) Driver for Pure Storage's [Pure Service Orchestrator](https://purestorage.com/containers)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Cloning, Raw Block, Topology, Expansion"},
{NameUrl: "[QingCloud CSI](https://github.com/yunify/qingcloud-csi)", DriverName: "disk.csi.qingcloud.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for QingCloud Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[QingStor CSI](https://github.com/yunify/qingstor-csi)", DriverName: "neonsan.csi.qingstor.com", Versions: "v0.3, v1.1", Description: "A Container Storage Interface (CSI) Driver for NeonSAN storage system", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Quobyte](https://github.com/quobyte/quobyte-csi)", DriverName: "quobyte-csi", Versions: "v0.2", Description: "A Container Storage Interface (CSI) Driver for Quobyte", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[ROBIN](https://get.robin.io/)", DriverName: "robin", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for [ROBIN](https://docs.robin.io)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[SandStone](https://github.com/sandstone-storage/sandstone-csi-driver)", DriverName: "csi-sandstone-plugin", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for SandStone USP", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Sangfor-EDS-File-Storage](https://github.com/evan37717/sangfor-eds-csi)", DriverName: "eds.csi.file.sangfor.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Sangfor Distributed File Storage(EDS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Sangfor-EDS-Block-Storage](https://github.com/eds-wzc/sangfor-eds-csi)", DriverName: "eds.csi.block.sangfor.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Sangfor Block Storage(EDS)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[SeaweedFS](https://github.com/seaweedfs/seaweedfs-csi-driver)", DriverName: "seaweedfs-csi-driver", Versions: "v1.0", Description: "A Container Storage Interface (CSI Driver for [SeaweedFS](https://github.com/chrislusf/seaweedfs))", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver)", DriverName: "secrets-store.csi.k8s.io", Versions: "v0.0.10", Description: "A Container Storage Interface (CSI) Driver for mounting secrets, keys, and certs stored in enterprise-grade external secrets stores as volumes.", Persistence: "Ephemeral", AccessModes: "N/A", DynamicProvisioning: "N/A", Features: ""},
{NameUrl: "[SmartX](http://www.smartx.com/?locale=en)", DriverName: "csi-smtx-plugin", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for SmartX ZBS Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion"},
{NameUrl: "[SPDK-CSI](https://github.com/spdk/spdk-csi)", DriverName: "csi.spdk.io", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [SPDK](https://spdk.io/)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[StorageOS](https://docs.storageos.com/docs/platforms/kubernetes/install/)", DriverName: "storageos", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for [StorageOS](https://storageos.com/)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Storidge](https://docs.storidge.com/kubernetes_storage/overview.html)", DriverName: "csi.cio.storidge.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for [Storidge CIO](https://storidge.com/)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion"},
{NameUrl: "[StorPool](https://kb.storpool.com/storpool_integrations/github/kubernetes.html)", DriverName: "csi-driver.storpool.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [StorPool](https://storpool.com/)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"},
{NameUrl: "[Tencent Cloud Block Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)", DriverName: "com.tencent.cloud.csi.cbs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Tencent Cloud Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"},
{NameUrl: "[Tencent Cloud File Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)", DriverName: "com.tencent.cloud.csi.cfs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Tencent Cloud File Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Tencent Cloud Object Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)", DriverName: "com.tencent.cloud.csi.cosfs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Tencent Cloud Object Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""},
{NameUrl: "[TopoLVM](https://github.com/cybozu-go/topolvm)", DriverName: "topolvm.cybozu.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for LVM", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion, Topology Aware"},
{NameUrl: "[VAST Data](https://github.com/vast-data/vast-csi)", DriverName: "csi.vastdata.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for VAST Data", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[XSKY-EBS](https://xsky-storage.github.io/xsky-csi-driver/csi-block.html)", DriverName: "csi.block.xsky.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for XSKY Distributed Block Storage (X-EBS)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[XSKY-EUS](https://xsky-storage.github.io/xsky-csi-driver/csi-fs.html)", DriverName: "csi.fs.xsky.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for XSKY Distributed File Storage (X-EUS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Vault](https://github.com/kubevault/csi-driver)", DriverName: "secrets.csi.kubevault.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for mounting HashiCorp Vault secrets as volumes.", Persistence: "Ephemeral", AccessModes: "N/A", DynamicProvisioning: "N/A", Features: ""},
{NameUrl: "[Veritas InfoScale Volumes](https://www.veritas.com/solution/virtualization/containers.html)", DriverName: "org.veritas.infoscale", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for Veritas InfoScale volumes", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"},
{NameUrl: "[vSphere](https://github.com/kubernetes-sigs/vsphere-csi-driver)", DriverName: "csi.vsphere.vmware.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for VMware vSphere", Persistence: "Persistent", AccessModes: "Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume)", DynamicProvisioning: "Yes", Features: "Raw Block,<br/><br/>Expansion (Block Volume),<br/><br/>Topology Aware (Block Volume)"},
{NameUrl: "[Vultr Block Storage](https://github.com/vultr/vultr-csi)", DriverName: "block.csi.vultr.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for Vultr Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[WekaIO](https://github.com/weka/csi-wekafs)", DriverName: "csi.weka.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for mounting WekaIO WekaFS filesystem as volumes", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Yandex.Cloud](https://github.com/flant/yandex-csi-driver)", DriverName: "yandex.csi.flant.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) plugin for Yandex.Cloud Compute Disks", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[YanRongYun](http://www.yanrongyun.com/)", DriverName: "?", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for YanRong YRCloudFile Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""},
{NameUrl: "[Zadara-CSI](https://github.com/zadarastorage/zadara-csi)", DriverName: "csi.zadara.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) plugin for Zadara VPSA Storage Array & VPSA All-Flash", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"},
{NameUrl: "[Flexvolume](https://github.com/kubernetes-csi/csi-driver-flex)", DriverName: "Sample", Versions: "", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
{NameUrl: "[HostPath](https://github.com/kubernetes-csi/csi-driver-host-path)", DriverName: "v1.2.0", Versions: "Only use for a single node tests. See the [Example](example.html) page for Kubernetes-specific instructions.", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
{NameUrl: "[ImagePopulator](https://github.com/kubernetes-csi/csi-driver-image-populator)", DriverName: "Prototype", Versions: "Driver that lets you use a container image as an ephemeral volume.", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
{NameUrl: "[In-memory Sample Mock Driver](https://github.com/kubernetes-csi/csi-test/tree/master/mock/service)", DriverName: "v0.3.0", Versions: "The sample mock driver used for [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity)", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
{NameUrl: "[NFS](https://github.com/kubernetes-csi/csi-driver-nfs)", DriverName: "Sample", Versions: "", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
{NameUrl: "[Synology NAS](https://github.com/jparklab/synology-csi)", DriverName: "v1.0.0", Versions: "An unofficial (and unsupported) Container Storage Interface Driver for Synology NAS.", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
{NameUrl: "[VFS Driver](https://github.com/thecodeteam/csi-vfs)", DriverName: "Released", Versions: "A CSI plugin that provides a virtual file system.", Description: "", Persistence: "", AccessModes: "", DynamicProvisioning: "", Features: ""},
}

View file

@ -0,0 +1,118 @@
package kubestr
import (
"fmt"
"strconv"
"github.com/pkg/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
version "k8s.io/apimachinery/pkg/version"
)
const (
// MinK8sMajorVersion is the minimum supported Major version
MinK8sMajorVersion = 1
// MinK8sMinorVersion is the minimum supported Minor version
MinK8sMinorVersion = 12
// MinK8sGitVersion is the minimum supported k8s version
MinK8sGitVersion = "v1.12.0"
// RbacGroupName describe hte rbac group name
RbacGroupName = "rbac.authorization.k8s.io"
)
// KubernetesChecks runs all the baseline checks on the cluster
func (p *Kubestr) KubernetesChecks() []*TestOutput {
var result []*TestOutput
result = append(result, p.validateK8sVersion())
result = append(result, p.validateRBAC())
result = append(result, p.validateAggregatedLayer())
return result
}
// validateK8sVersion validates the clusters K8s version
func (p *Kubestr) validateK8sVersion() *TestOutput {
testName := "Kubernetes Version Check"
version, err := p.validateK8sVersionHelper()
if err != nil {
return MakeTestOutput(testName, StatusError, err.Error(), nil)
}
return MakeTestOutput(testName, StatusOK, fmt.Sprintf("Valid kubernetes version (%s)", version.String()), version)
}
// getK8sVersion fetches the k8s vesion
func (p *Kubestr) validateK8sVersionHelper() (*version.Info, error) {
version, err := p.cli.Discovery().ServerVersion()
if err != nil {
return nil, err
}
majorStr := version.Major
if len(majorStr) > 1 && string(majorStr[len(majorStr)-1]) == "+" {
majorStr = majorStr[:len(majorStr)-1]
}
major, err := strconv.Atoi(majorStr)
if err != nil {
return nil, errors.Wrap(err, "Unable to derive kubernetes major version")
}
minorStr := version.Minor
if len(minorStr) > 1 && string(minorStr[len(minorStr)-1]) == "+" {
minorStr = minorStr[:len(minorStr)-1]
}
minor, err := strconv.Atoi(minorStr)
if err != nil {
return nil, errors.Wrap(err, "Unable to derive kubernetes minor version")
}
if (major < MinK8sMajorVersion) ||
(major == MinK8sMajorVersion && minor < MinK8sMinorVersion) {
return version, fmt.Errorf("Current kubernetes version (%s) is not supported. Minimum version is %s", version.String(), MinK8sGitVersion)
}
return version, nil
}
func (p *Kubestr) validateRBAC() *TestOutput {
testName := "RBAC Check"
//fmt.Println(" Checking if Kubernetes RBAC is enabled:")
group, err := p.validateRBACHelper()
if err != nil {
return MakeTestOutput(testName, StatusError, err.Error(), nil)
}
return MakeTestOutput(testName, StatusOK, "Kubernetes RBAC is enabled", *group)
}
// getRBAC runs the Rbac test
func (p *Kubestr) validateRBACHelper() (*v1.APIGroup, error) {
serverGroups, err := p.cli.Discovery().ServerGroups()
if err != nil {
return nil, err
}
for _, group := range serverGroups.Groups {
if group.Name == RbacGroupName {
return &group, nil
}
}
return nil, fmt.Errorf("Kubernetes RBAC is not enabled")
}
func (p *Kubestr) validateAggregatedLayer() *TestOutput {
testName := "Aggregated Layer Check"
resourceList, err := p.validateAggregatedLayerHelper()
if err != nil {
MakeTestOutput(testName, StatusError, err.Error(), nil)
}
return MakeTestOutput(testName, StatusOK, "The Kubernetes Aggregated Layer is enabled", resourceList)
}
// getAggregatedLayer checks the aggregated API layer
func (p *Kubestr) validateAggregatedLayerHelper() (*v1.APIResourceList, error) {
_, serverResources, err := p.cli.Discovery().ServerGroupsAndResources()
if err != nil {
return nil, err
}
for _, resourceList := range serverResources {
if resourceList.GroupVersion == "apiregistration.k8s.io/v1" || resourceList.GroupVersion == "apiregistration.k8s.io/v1beta1" {
return resourceList, nil
}
}
return nil, fmt.Errorf("Can not detect the Aggregated Layer. Is it enabled?")
}

View file

@ -0,0 +1,160 @@
package kubestr
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
version "k8s.io/apimachinery/pkg/version"
discoveryfake "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/kubernetes/fake"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type K8sChecksTestSuite struct{}
var _ = Suite(&K8sChecksTestSuite{})
func (s *K8sChecksTestSuite) TestGetK8sVersion(c *C) {
for _, tc := range []struct {
ver *version.Info
checker Checker
out *version.Info
}{
{
ver: &version.Info{Major: "1", Minor: "17", GitVersion: "v1.17"},
checker: IsNil,
out: &version.Info{Major: "1", Minor: "17", GitVersion: "v1.17"},
},
{
ver: &version.Info{Major: "1", Minor: "11", GitVersion: "v1.11"},
checker: NotNil,
out: &version.Info{Major: "1", Minor: "11", GitVersion: "v1.11"},
},
{
ver: &version.Info{Major: "1", Minor: "", GitVersion: "v1."},
checker: NotNil,
out: nil,
},
{
ver: &version.Info{Major: "", Minor: "11", GitVersion: "v."},
checker: NotNil,
out: nil,
},
} {
cli := fake.NewSimpleClientset()
cli.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = tc.ver
p := &Kubestr{cli: cli}
out, err := p.validateK8sVersionHelper()
c.Assert(out, DeepEquals, tc.out)
c.Check(err, tc.checker)
}
}
func (s *K8sChecksTestSuite) TestValidateRBAC(c *C) {
for _, tc := range []struct {
resources []*metav1.APIResourceList
checker Checker
out *v1.APIGroup
}{
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "/////",
},
},
checker: NotNil,
out: nil,
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "rbac.authorization.k8s.io/v1",
},
},
checker: IsNil,
out: &v1.APIGroup{
Name: "rbac.authorization.k8s.io",
Versions: []v1.GroupVersionForDiscovery{
{GroupVersion: "rbac.authorization.k8s.io/v1", Version: "v1"},
},
PreferredVersion: v1.GroupVersionForDiscovery{GroupVersion: "rbac.authorization.k8s.io/v1", Version: "v1"},
},
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "notrbac.authorization.k8s.io/v1",
},
},
checker: NotNil,
out: nil,
},
} {
cli := fake.NewSimpleClientset()
cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources
p := &Kubestr{cli: cli}
out, err := p.validateRBACHelper()
c.Assert(out, DeepEquals, tc.out)
c.Check(err, tc.checker)
}
}
func (s *K8sChecksTestSuite) TestValidateAggregatedLayer(c *C) {
for _, tc := range []struct {
resources []*metav1.APIResourceList
checker Checker
out *metav1.APIResourceList
}{
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "/////",
},
},
checker: NotNil,
out: nil,
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "apiregistration.k8s.io/v1",
},
},
checker: IsNil,
out: &metav1.APIResourceList{
GroupVersion: "apiregistration.k8s.io/v1",
},
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "apiregistration.k8s.io/v1beta1",
},
},
checker: IsNil,
out: &metav1.APIResourceList{
GroupVersion: "apiregistration.k8s.io/v1beta1",
},
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "notapiregistration.k8s.io/v1",
},
},
checker: NotNil,
out: nil,
},
} {
cli := fake.NewSimpleClientset()
cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources
p := &Kubestr{cli: cli}
out, err := p.validateAggregatedLayerHelper()
c.Assert(out, DeepEquals, tc.out)
c.Check(err, tc.checker)
}
}

91
pkg/kubestr/kubestr.go Normal file
View file

@ -0,0 +1,91 @@
package kubestr
import (
"github.com/kanisterio/kanister/pkg/kube"
"github.com/kastenhq/kubestr/pkg/fio"
"github.com/pkg/errors"
sv1 "k8s.io/api/storage/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
// Kubestr is the primary object for running the kubestr tool. It holds all the cluster state information
// as well.
type Kubestr struct {
cli kubernetes.Interface
dynCli dynamic.Interface
sdsfgValidator snapshotDataSourceFG
storageClassList *sv1.StorageClassList
volumeSnapshotClassList *unstructured.UnstructuredList
Fio fio.FIO
}
const Logo = `
**************************************
_ ___ _ ___ ___ ___ _____ ___
| |/ / | | | _ ) __/ __|_ _| _ \
| ' <| |_| | _ \ _|\__ \ | | | /
|_|\_\\___/|___/___|___/ |_| |_|_\
Explore your kuberntes storage options
**************************************
`
var (
DefaultQPS = float32(50)
DefaultBurst = 100
)
// NewKubestr initializes a new kubestr object to run preflight tests
func NewKubestr() (*Kubestr, error) {
cli, err := LoadKubeCli()
if err != nil {
return nil, err
}
dynCli, err := LoadDynCli()
if err != nil {
return nil, err
}
return &Kubestr{
cli: cli,
dynCli: dynCli,
sdsfgValidator: &snapshotDataSourceFGValidator{
cli: cli,
dynCli: dynCli,
},
Fio: &fio.FIOrunner{
Cli: cli,
},
}, nil
}
// LoadDynCli loads the config and returns a dynamic CLI
func LoadDynCli() (dynamic.Interface, error) {
cfg, err := kube.LoadConfig()
if err != nil {
return nil, errors.Wrap(err, "Failed to load config for Dynamic client")
}
clientset, err := dynamic.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "Failed to create Dynamic client")
}
return clientset, nil
}
// LoadKubeCli load the config and returns a kubernetes client
// NewClient returns a k8 client configured by the kanister environment.
func LoadKubeCli() (kubernetes.Interface, error) {
config, err := kube.LoadConfig()
if err != nil {
return nil, err
}
config.QPS = DefaultQPS
config.Burst = DefaultBurst
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientset, nil
}

View file

@ -0,0 +1,429 @@
package kubestr
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
kanvolume "github.com/kanisterio/kanister/pkg/kube/volume"
"github.com/kastenhq/kubestr/pkg/common"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
const (
// APIVersionKey describes the APIVersion key
APIVersionKey = "apiVersion"
// FeatureGateTestPVCName is the name of the pvc created by the feature gate
// validation test
FeatureGateTestPVCName = "kubestr-featuregate-test"
// DefaultNS describes the default namespace
DefaultNS = "default"
// PodNamespaceEnvKey describes the pod namespace env variable
PodNamespaceEnvKey = "POD_NAMESPACE"
)
// Provisioner holds the important information of a provisioner
type Provisioner struct {
ProvisionerName string
CSIDriver *CSIDriver
URL string
StorageClasses []*SCInfo
VolumeSnapshotClasses []*VSCInfo
StatusList []Status
}
type CSIDriver struct {
NameUrl string
DriverName string
Versions string
Description string
Persistence string
AccessModes string
DynamicProvisioning string
Features string
}
func (c *CSIDriver) Provider() string {
re := regexp.MustCompile(`\[(.*?)\]`)
match := re.FindStringSubmatch(c.NameUrl) // find the left most match
if len(match) < 2 {
return ""
}
return match[1]
}
func (c *CSIDriver) URL() string {
re := regexp.MustCompile(`\((.*?)\)`)
match := re.FindAllStringSubmatch(c.NameUrl, -1)
if len(match) < 1 {
return ""
}
url := match[len(match)-1] // find the right most match
if len(url) < 2 {
return ""
}
return url[1]
}
func (c *CSIDriver) Print(prefix string) {
fmt.Printf(prefix+" Provider: %s\n", c.Provider())
fmt.Printf(prefix+" Website: %s\n", c.URL())
fmt.Printf(prefix+" Description: %s\n", c.Description)
fmt.Printf(prefix+" Additional Features: %s\n", c.Features)
}
func (c *CSIDriver) SupportsSnapshots() bool {
return strings.Contains(c.Features, "Snapshot")
}
// SCInfo stores the info of a StorageClass
type SCInfo struct {
Name string
StatusList []Status
Raw interface{} `json:",omitempty"`
}
// VSCInfo stores the info of a VolumeSnapshotClass
type VSCInfo struct {
Name string
StatusList []Status
HasAnnotation bool
Raw interface{} `json:",omitempty"`
}
// Print prints the provionsioner specific details
func (v *Provisioner) Print() {
printSuccessColor(" " + v.ProvisionerName + ":")
for _, status := range v.StatusList {
status.Print(" ")
}
switch {
case v.CSIDriver != nil:
fmt.Println(" This is a CSI driver!")
fmt.Println(" (The following info may not be up to date. Please check with the provider for more information.)")
v.CSIDriver.Print(" ")
case strings.HasPrefix(v.ProvisionerName, "kubernetes.io"):
fmt.Println(" This is an in tree provisioner.")
case strings.Contains(v.ProvisionerName, "csi"):
fmt.Println(" This might be a CSI Driver. But it is not publicly listed.")
default:
fmt.Println(" Unknown driver type.")
}
fmt.Println()
if len(v.StorageClasses) > 0 {
fmt.Printf(" Storage Classes:\n")
for _, sc := range v.StorageClasses {
fmt.Printf(" * %s\n", sc.Name)
for _, status := range sc.StatusList {
status.Print(" ")
}
}
}
if len(v.VolumeSnapshotClasses) > 0 {
fmt.Printf(" Volume Snapshot Classes:\n")
for _, vsc := range v.VolumeSnapshotClasses {
fmt.Printf(" * %s\n", vsc.Name)
for _, status := range vsc.StatusList {
status.Print(" ")
}
}
}
if len(v.StorageClasses) > 0 {
fmt.Println()
fmt.Println(" To perform a FIO test, run-")
fmt.Println(" ./kubestr fio -s <storage class>")
switch {
case len(v.VolumeSnapshotClasses) == 0 && v.CSIDriver != nil && v.CSIDriver.SupportsSnapshots():
fmt.Println()
fmt.Println(" This provisioner supports snapshots, however no Volume Snaphsot Classes were found.")
case len(v.VolumeSnapshotClasses) > 0:
fmt.Println()
fmt.Println(" To test CSI snapshot/restore functionality, run-")
fmt.Println(" ./kubestr csicheck -s <storage class> -v <volume snapshot class>")
}
}
}
// ValidateProvisioners validates the provisioners in a cluster
func (p *Kubestr) ValidateProvisioners(ctx context.Context) ([]*Provisioner, error) {
provisionerList, err := p.provisionerList(ctx)
if err != nil {
return nil, fmt.Errorf("Error listing provisioners: %s", err.Error())
}
var validateProvisionersOutput []*Provisioner
for _, provisioner := range provisionerList {
processedProvisioner, err := p.processProvisioner(ctx, provisioner)
if err != nil {
return nil, err
}
validateProvisionersOutput = append(validateProvisionersOutput, processedProvisioner)
}
return validateProvisionersOutput, nil
}
func (p *Kubestr) processProvisioner(ctx context.Context, provisioner string) (*Provisioner, error) {
retProvisioner := &Provisioner{
ProvisionerName: provisioner,
}
storageClassList, err := p.loadStorageClasses(ctx)
if err != nil {
return nil, err
}
for _, storageClass := range storageClassList.Items {
if storageClass.Provisioner == provisioner {
retProvisioner.StorageClasses = append(retProvisioner.StorageClasses,
p.validateStorageClass(provisioner, storageClass)) // review this
}
}
for _, csiDriver := range CSIDriverList {
if strings.Contains(provisioner, csiDriver.DriverName) {
retProvisioner.CSIDriver = csiDriver
}
}
if retProvisioner.CSIDriver != nil {
if !p.hasCSIDriverObject(ctx, provisioner) {
retProvisioner.StatusList = append(retProvisioner.StatusList,
makeStatus(StatusWarning, "Missing CSIDriver Object. Required by some provisioners.", nil))
}
if clusterCsiSnapshotCapable, err := p.isK8sVersionCSISnapshotCapable(ctx); err != nil || !clusterCsiSnapshotCapable {
retProvisioner.StatusList = append(retProvisioner.StatusList,
makeStatus(StatusInfo, "Cluster is not CSI snapshot capable. Requires VolumeSnapshotDataSource feature gate.", nil))
return retProvisioner, errors.Wrap(err, "Failed to validate if Kubernetes version was CSI capable")
}
csiSnapshotGroupVersion := p.getCSIGroupVersion()
if csiSnapshotGroupVersion == nil {
retProvisioner.StatusList = append(retProvisioner.StatusList,
makeStatus(StatusInfo, "Can't find the CSI snapshot group api version.", nil))
return retProvisioner, nil
}
// load volumeSnapshotClass
vscs, err := p.loadVolumeSnapshotClasses(ctx, csiSnapshotGroupVersion.Version)
if err != nil {
return nil, errors.Wrap(err, "Failed to load volume snapshot classes")
}
for _, vsc := range vscs.Items {
if p.getDriverNameFromUVSC(vsc, csiSnapshotGroupVersion.GroupVersion) == provisioner {
retProvisioner.VolumeSnapshotClasses = append(retProvisioner.VolumeSnapshotClasses,
p.validateVolumeSnapshotClass(vsc, csiSnapshotGroupVersion.GroupVersion))
}
}
}
return retProvisioner, nil
}
// hasCSIDriverObject sees if a provisioner has a CSIDriver Object
func (p *Kubestr) hasCSIDriverObject(ctx context.Context, provisioner string) bool {
csiDrivers, err := p.cli.StorageV1beta1().CSIDrivers().List(ctx, metav1.ListOptions{})
if err != nil {
return false
}
for _, driver := range csiDrivers.Items {
if driver.Name == provisioner {
return true
}
}
return false
}
func (p *Kubestr) isK8sVersionCSISnapshotCapable(ctx context.Context) (bool, error) {
k8sVersion, err := p.validateK8sVersionHelper()
if err != nil {
return false, err
}
minorStr := k8sVersion.Minor
if string(minorStr[len(minorStr)-1]) == "+" {
minorStr = minorStr[:len(minorStr)-1]
}
minor, err := strconv.Atoi(minorStr)
if err != nil {
return false, err
}
if minor < 17 && k8sVersion.Major == "1" {
return p.sdsfgValidator.validate(ctx)
}
return true, nil
}
// validateStorageClass validates a storageclass
func (p *Kubestr) validateStorageClass(provisioner string, storageClass sv1.StorageClass) *SCInfo {
scStatus := &SCInfo{
Name: storageClass.Name,
Raw: storageClass,
}
return scStatus
}
// validateVolumeSnapshotClass validates the VolumeSnapshotClass
func (p *Kubestr) validateVolumeSnapshotClass(vsc unstructured.Unstructured, groupVersion string) *VSCInfo {
retVSC := &VSCInfo{
Name: vsc.GetName(),
Raw: vsc,
}
switch groupVersion {
case common.SnapshotAlphaVersion:
_, ok := vsc.Object[common.VolSnapClassAlphaDriverKey]
if !ok {
retVSC.StatusList = append(retVSC.StatusList,
makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'snapshotter' field", vsc.GetName()), nil))
}
case common.SnapshotBetaVersion:
_, ok := vsc.Object[common.VolSnapClassBetaDriverKey]
if !ok {
retVSC.StatusList = append(retVSC.StatusList,
makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'driver' field", vsc.GetName()), nil))
}
case common.SnapshotStableVersion:
_, ok := vsc.Object[common.VolSnapClassStableDriverKey]
if !ok {
retVSC.StatusList = append(retVSC.StatusList,
makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'driver' field", vsc.GetName()), nil))
}
}
return retVSC
}
func (p *Kubestr) provisionerList(ctx context.Context) ([]string, error) {
storageClassList, err := p.loadStorageClasses(ctx)
if err != nil {
return nil, err
}
provisionerSet := make(map[string]struct{})
for _, storageClass := range storageClassList.Items {
provisionerSet[storageClass.Provisioner] = struct{}{}
}
return convertSetToSlice(provisionerSet), nil
}
func (p *Kubestr) loadStorageClasses(ctx context.Context) (*sv1.StorageClassList, error) {
if p.storageClassList == nil {
sc, err := p.cli.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
p.storageClassList = sc
}
return p.storageClassList, nil
}
func (p *Kubestr) loadVolumeSnapshotClasses(ctx context.Context, version string) (*unstructured.UnstructuredList, error) {
if p.volumeSnapshotClassList == nil {
VolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: version, Resource: common.VolumeSnapshotClassResourcePlural}
us, err := p.dynCli.Resource(VolSnapClassGVR).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
p.volumeSnapshotClassList = us
}
return p.volumeSnapshotClassList, nil
}
// getDriverNameFromUVSC get the driver name from an unstructured VSC
func (p *Kubestr) getDriverNameFromUVSC(vsc unstructured.Unstructured, version string) string {
var driverName interface{}
var ok bool
switch version {
case common.SnapshotAlphaVersion:
driverName, ok = vsc.Object[common.VolSnapClassAlphaDriverKey]
if !ok {
return ""
}
case common.SnapshotBetaVersion:
driverName, ok = vsc.Object[common.VolSnapClassBetaDriverKey]
if !ok {
return ""
}
case common.SnapshotStableVersion:
driverName, ok = vsc.Object[common.VolSnapClassStableDriverKey]
if !ok {
return ""
}
}
driver, ok := driverName.(string)
if !ok {
return ""
}
return driver
}
// getCSIGroupVersion fetches the CSI Group Version
func (p *Kubestr) getCSIGroupVersion() *metav1.GroupVersionForDiscovery {
groups, _, err := p.cli.Discovery().ServerGroupsAndResources()
if err != nil {
return nil
}
for _, group := range groups {
if group.Name == common.SnapGroupName {
return &group.PreferredVersion
}
}
return nil
}
type snapshotDataSourceFG interface {
validate(ctx context.Context) (bool, error)
}
type snapshotDataSourceFGValidator struct {
cli kubernetes.Interface
dynCli dynamic.Interface
}
func (s *snapshotDataSourceFGValidator) validate(ctx context.Context) (bool, error) {
ns := getPodNamespace()
// deletes if exists. If it doesn't exist, this is a noop
err := kanvolume.DeletePVC(s.cli, ns, FeatureGateTestPVCName)
if err != nil {
return false, errors.Wrap(err, "Error deleting VolumeSnapshotDataSource feature-gate validation pvc")
}
// defer delete
defer func() {
_ = kanvolume.DeletePVC(s.cli, ns, FeatureGateTestPVCName)
}()
// create PVC
snapshotKind := "VolumeSnapshot"
snapshotAPIGroup := "snapshot.storage.k8s.io"
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: FeatureGateTestPVCName,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
DataSource: &v1.TypedLocalObjectReference{
APIGroup: &snapshotAPIGroup,
Kind: snapshotKind,
Name: "fakeSnap",
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
pvcRes, err := s.cli.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil {
return false, errors.Wrap(err, "Error creating VolumeSnapshotDataSource feature-gate validation pvc")
}
if pvcRes.Spec.DataSource == nil {
return false, nil
}
return true, nil
}

View file

@ -0,0 +1,352 @@
package kubestr
import (
"context"
"fmt"
"github.com/kanisterio/kanister/pkg/kube/snapshot/apis/v1alpha1"
. "gopkg.in/check.v1"
scv1 "k8s.io/api/storage/v1"
"k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
version "k8s.io/apimachinery/pkg/version"
discoveryfake "k8s.io/client-go/discovery/fake"
fakedynamic "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
type ProvisionerTestSuite struct{}
var _ = Suite(&ProvisionerTestSuite{})
func (s *ProvisionerTestSuite) TestHasCSIDriverObject(c *C) {
ctx := context.Background()
for _, tc := range []struct {
cli kubernetes.Interface
provisionerName string
hasDriver bool
}{
{
cli: fake.NewSimpleClientset(),
provisionerName: "provisioner",
hasDriver: false,
},
{
cli: fake.NewSimpleClientset(&v1beta1.CSIDriverList{
Items: []v1beta1.CSIDriver{
{
ObjectMeta: metav1.ObjectMeta{
Name: "drivername",
},
},
}}),
provisionerName: "drivername",
hasDriver: true,
},
} {
p := &Kubestr{cli: tc.cli}
hasDriver := p.hasCSIDriverObject(ctx, tc.provisionerName)
c.Assert(hasDriver, Equals, tc.hasDriver)
}
}
func (s *ProvisionerTestSuite) TestIsK8sVersionCSISnapshotCapable(c *C) {
ctx := context.Background()
for _, tc := range []struct {
ver *version.Info
checker Checker
capable bool
sdsfg snapshotDataSourceFG
}{
{
ver: &version.Info{Major: "1", Minor: "", GitVersion: "v1.17"},
checker: NotNil,
capable: false,
},
{
ver: &version.Info{Major: "1", Minor: "15+", GitVersion: "v1.15+"},
checker: NotNil,
capable: false,
sdsfg: &fakeSDSFGValidator{err: fmt.Errorf("someerror"), cap: false},
},
{
ver: &version.Info{Major: "1", Minor: "15+", GitVersion: "v1.15+"},
checker: IsNil,
capable: true,
sdsfg: &fakeSDSFGValidator{err: nil, cap: true},
},
{
ver: &version.Info{Major: "1", Minor: "17", GitVersion: "v1.17"},
checker: IsNil,
capable: true,
},
} {
cli := fake.NewSimpleClientset()
cli.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = tc.ver
p := &Kubestr{cli: cli, sdsfgValidator: tc.sdsfg}
cap, err := p.isK8sVersionCSISnapshotCapable(ctx)
c.Check(err, tc.checker)
c.Assert(cap, Equals, tc.capable)
}
}
type fakeSDSFGValidator struct {
err error
cap bool
}
func (f *fakeSDSFGValidator) validate(ctx context.Context) (bool, error) {
return f.cap, f.err
}
func (s *ProvisionerTestSuite) TestValidateVolumeSnapshotClass(c *C) {
for _, tc := range []struct {
vsc unstructured.Unstructured
groupVersion string
out *VSCInfo
}{
{
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "vsc1",
},
"snapshotter": "something",
},
},
groupVersion: "snapshot.storage.k8s.io/v1alpha1",
out: &VSCInfo{
Name: "vsc1",
},
},
{ // failure
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "vsc1",
},
"notsnapshotter": "something",
},
},
groupVersion: "snapshot.storage.k8s.io/v1alpha1",
out: &VSCInfo{
Name: "vsc1",
StatusList: []Status{
makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'snapshotter' field", "vsc1"), nil),
},
},
},
{
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "vsc1",
},
"driver": "something",
},
},
groupVersion: "snapshot.storage.k8s.io/v1beta1",
out: &VSCInfo{
Name: "vsc1",
},
},
{ // failure
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "vsc1",
},
"notdriver": "something",
},
},
groupVersion: "snapshot.storage.k8s.io/v1beta1",
out: &VSCInfo{
Name: "vsc1",
StatusList: []Status{
makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'driver' field", "vsc1"), nil),
},
},
},
} {
p := &Kubestr{}
out := p.validateVolumeSnapshotClass(tc.vsc, tc.groupVersion)
c.Assert(out.Name, Equals, tc.out.Name)
c.Assert(len(out.StatusList), Equals, len(tc.out.StatusList))
}
}
func (s *ProvisionerTestSuite) TestLoadStorageClassesAndProvisioners(c *C) {
ctx := context.Background()
p := &Kubestr{cli: fake.NewSimpleClientset(
&scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc1"}, Provisioner: "provisioner1"},
&scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc2"}, Provisioner: "provisioner2"},
)}
scs, err := p.loadStorageClasses(ctx)
c.Assert(err, IsNil)
c.Assert(len(scs.Items), Equals, 2)
c.Assert(scs, Equals, p.storageClassList)
// reload has the same
p.cli = fake.NewSimpleClientset()
scs, err = p.loadStorageClasses(ctx)
c.Assert(err, IsNil)
c.Assert(len(scs.Items), Equals, 2)
c.Assert(scs, Equals, p.storageClassList)
// proviosners uses loaded list
provisioners, err := p.provisionerList(ctx)
c.Assert(err, IsNil)
c.Assert(len(provisioners), Equals, 2)
}
func (s *ProvisionerTestSuite) TestLoadVolumeSnaphsotClasses(c *C) {
ctx := context.Background()
p := &Kubestr{dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": fmt.Sprintf("%s/%s", v1alpha1.GroupName, v1alpha1.Version),
"kind": "VolumeSnapshotClass",
"metadata": map[string]interface{}{
"name": "theVSC",
},
"snapshotter": "somesnapshotter",
"deletionPolicy": "Delete",
},
})}
vsc, err := p.loadVolumeSnapshotClasses(ctx, v1alpha1.Version)
c.Assert(err, IsNil)
c.Assert(len(vsc.Items), Equals, 1)
c.Assert(vsc, Equals, p.volumeSnapshotClassList)
// reload has the same
p.dynCli = fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())
vsc, err = p.loadVolumeSnapshotClasses(ctx, v1alpha1.Version)
c.Assert(err, IsNil)
c.Assert(len(vsc.Items), Equals, 1)
c.Assert(vsc, Equals, p.volumeSnapshotClassList)
}
func (s *ProvisionerTestSuite) TestGetCSIGroupVersion(c *C) {
for _, tc := range []struct {
resources []*metav1.APIResourceList
out *metav1.GroupVersionForDiscovery
}{
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "/////",
},
},
out: nil,
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "snapshot.storage.k8s.io/v1beta1",
},
{
GroupVersion: "snapshot.storage.k8s.io/v1apha1",
},
},
out: &metav1.GroupVersionForDiscovery{
GroupVersion: "snapshot.storage.k8s.io/v1beta1",
Version: "v1beta1",
},
},
{
resources: []*metav1.APIResourceList{
{
GroupVersion: "NOTsnapshot.storage.k8s.io/v1beta1",
},
},
out: nil,
},
} {
cli := fake.NewSimpleClientset()
cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources
p := &Kubestr{cli: cli}
out := p.getCSIGroupVersion()
c.Assert(out, DeepEquals, tc.out)
}
}
func (s *ProvisionerTestSuite) TestGetDriverNameFromUVSC(c *C) {
for _, tc := range []struct {
vsc unstructured.Unstructured
version string
out string
}{
{ // alpha success
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"snapshotter": "drivername",
},
},
version: "snapshot.storage.k8s.io/v1alpha1",
out: "drivername",
},
{ // key missing
vsc: unstructured.Unstructured{
Object: map[string]interface{}{},
},
version: "snapshot.storage.k8s.io/v1alpha1",
out: "",
},
{ // beta success
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"driver": "drivername",
},
},
version: "snapshot.storage.k8s.io/v1beta1",
out: "drivername",
},
{ // key missing
vsc: unstructured.Unstructured{
Object: map[string]interface{}{},
},
version: "snapshot.storage.k8s.io/v1beta1",
out: "",
},
{ // type conversion
vsc: unstructured.Unstructured{
Object: map[string]interface{}{
"driver": int64(1),
},
},
version: "snapshot.storage.k8s.io/v1beta1",
out: "",
},
} {
p := &Kubestr{}
out := p.getDriverNameFromUVSC(tc.vsc, tc.version)
c.Assert(out, Equals, tc.out)
}
}
// func (s *ProvisionerTestSuite) TestGetDriverStats(c *C) {
// var snapshotCount int
// var expansionCount int
// var cloningCount int
// featureMap := make(map[string]struct{})
// for _, driver := range CSIDriverList {
// if strings.Contains("Snapshot", driver.Features) {
// snapshotCount++
// }
// if strings.Contains("Expansion", driver.Features) {
// expansionCount++
// }
// if strings.Contains("Cloning", driver.Features) {
// cloningCount++
// }
// featureMap[driver.Features] = struct{}{}
// }
// c.Log("totalcsidrivers: ", len(CSIDriverList))
// c.Log("snapshotCount: ", snapshotCount)
// c.Log("expansionCount: ", expansionCount)
// c.Log("cloningCount: ", cloningCount)
// c.Log("unique combinations: ", len(featureMap))
// c.Assert(true, Equals, false)
// }

126
pkg/kubestr/utils.go Normal file
View file

@ -0,0 +1,126 @@
package kubestr
import (
"fmt"
"os"
)
const (
// ErrorColor formatted color red
ErrorColor = "\033[1;31m%s\033[0m"
// SuccessColor formatted color green
SuccessColor = "\033[1;32m%s\033[0m"
// YellowColor formatted color yellow
YellowColor = "\033[1;33m%s\033[0m"
)
// Status is a generic structure to return a status
type Status struct {
StatusCode StatusCode
StatusMessage string
Raw interface{} `json:",omitempty"`
}
// StatusCode type definition
type StatusCode string
const (
// StatusOK is the success status code
StatusOK = StatusCode("OK")
// StatusWarning is the informational status code
StatusWarning = StatusCode("Warning")
// StatusError is the failure status code
StatusError = StatusCode("Error")
// StatusInfo is the Info status code
StatusInfo = StatusCode("Info")
)
// Print prints a status message with a given prefix
func (s *Status) Print(prefix string) {
switch s.StatusCode {
case StatusOK:
printSuccessMessage(prefix + s.StatusMessage)
case StatusError:
printErrorMessage(prefix + s.StatusMessage)
case StatusWarning:
printWarningMessage(prefix + s.StatusMessage)
default:
printInfoMessage(prefix + s.StatusMessage)
}
}
// printErrorMessage prints the error message
func printErrorMessage(errorMesg string) {
fmt.Printf("%s - ", errorMesg)
fmt.Printf(ErrorColor, "Error")
fmt.Println()
}
// printSuccessMessage prints the success message
func printSuccessMessage(message string) {
fmt.Printf("%s - ", message)
fmt.Printf(SuccessColor, "OK")
fmt.Println()
}
func printSuccessColor(message string) {
fmt.Printf(SuccessColor, message)
fmt.Println()
}
// printInfoMessage prints a warning
func printInfoMessage(message string) {
fmt.Println(message)
}
// printWarningMessage prints a warning
func printWarningMessage(message string) {
fmt.Printf(YellowColor+"\n", message)
}
// TestOutput is the generic return value for tests
type TestOutput struct {
TestName string
Status []Status
Raw interface{} `json:",omitempty"`
}
// Print prints a TestRetVal as a string output
func (t *TestOutput) Print() {
fmt.Println(t.TestName + ":")
for _, status := range t.Status {
status.Print(" ")
}
}
func MakeTestOutput(testname string, code StatusCode, mesg string, raw interface{}) *TestOutput {
return &TestOutput{
TestName: testname,
Status: []Status{makeStatus(code, mesg, nil)},
Raw: raw,
}
}
func makeStatus(code StatusCode, mesg string, raw interface{}) Status {
return Status{
StatusCode: code,
StatusMessage: mesg,
Raw: raw,
}
}
func convertSetToSlice(set map[string]struct{}) []string {
var slice []string
for i := range set {
slice = append(slice, i)
}
return slice
}
// getPodNamespace gets the pods namespace or returns default
func getPodNamespace() string {
if val, ok := os.LookupEnv(PodNamespaceEnvKey); ok {
return val
}
return DefaultNS
}

View file

@ -0,0 +1,35 @@
#!/usr/bin/env bash
current_directory=$(dirname "$0")
curl https://raw.githubusercontent.com/kubernetes-csi/docs/master/book/src/drivers.md -o ${current_directory}/../extra/csi-drivers
cat <<EOT >> ${current_directory}/../extra/csi-drivers-temp.go
package kubestr
// THIS FILE IS AUTO_GENERATED.
// To generate file run "go generate" at the top level
// This file must be checked in.
EOT
echo "var CSIDriverList = []*CSIDriver{" >> ${current_directory}/../extra/csi-drivers-temp.go
while read p; do
if [[ $p == [* ]];
then
IFS='|'
read -a fields <<< "$p"
name_url=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[0]})
driver_name=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[1]} | sed 's/`//g')
versions=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[2]})
description=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[3]})
persistence=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[4]})
access_modes=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[5]}| sed 's/"//g')
dynamic_provisioning=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[6]})
features=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[7]})
echo "{NameUrl: \"$name_url\", DriverName: \"$driver_name\", Versions: \"$versions\", Description: \"$description\", Persistence: \"$persistence\", AccessModes: \"$access_modes\", DynamicProvisioning: \"$dynamic_provisioning\", Features: \"$features\"}," >> ${current_directory}/../extra/csi-drivers-temp.go
fi
done <${current_directory}/../extra/csi-drivers
echo "}" >> ${current_directory}/../extra/csi-drivers-temp.go
gofmt ${current_directory}/../extra/csi-drivers-temp.go > ${current_directory}/../pkg/kubestr/csi-drivers.go
rm ${current_directory}/../extra/csi-drivers-temp.go