diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index f31af49a..effe6b9d 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -6,14 +6,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Set up Go 1.18.3
- uses: actions/setup-go@v2
+ - name: Set up Go 1.19
+ uses: actions/setup-go@v4
with:
- go-version: 1.18.3
- id: go_3
+ go-version: 1.19.9
+ cache: false
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- name: Run Deps Check
run: make verify-deps
@@ -23,14 +23,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Set up Go 1.18.3
- uses: actions/setup-go@v2
+ - name: Set up Go 1.19
+ uses: actions/setup-go@v4
with:
- go-version: 1.18.3
- id: go
+ go-version: 1.19.9
+ cache: false
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- name: Build Openebsctl
run: make openebsctl
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
index 5843e880..047d4c49 100644
--- a/.github/workflows/golangci-lint.yml
+++ b/.github/workflows/golangci-lint.yml
@@ -11,8 +11,8 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: golangci-lint
- uses: golangci/golangci-lint-action@v2
+ uses: golangci/golangci-lint-action@v4
with:
- version: v1.46.2
+ version: v1.54
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8eed1ff3..092066e4 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -15,15 +15,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
with:
fetch-depth: 0
- - name: Set up Go
- uses: actions/setup-go@v2
+ - name: Set up Go 1.19
+ uses: actions/setup-go@v4
with:
- go-version: 1.18.3
+ go-version: 1.19.9
+ cache: false
- name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v2
+ uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser
version: latest
diff --git a/.github/workflows/test-coverage.yml b/.github/workflows/test-coverage.yml
index 5451fde0..93e81f1f 100644
--- a/.github/workflows/test-coverage.yml
+++ b/.github/workflows/test-coverage.yml
@@ -10,13 +10,15 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
with:
fetch-depth: 2
- - uses: actions/setup-go@v2
+ - name: Set up Go 1.19
+ uses: actions/setup-go@v4
with:
- go-version: '1.17.7'
+ go-version: 1.19.9
+ cache: false
- name: Run coverage
run: go test ./... --coverprofile=coverage.out --covermode=atomic
- name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v4
diff --git a/.tours/first.tour b/.tours/first.tour
deleted file mode 100644
index dd2c2e72..00000000
--- a/.tours/first.tour
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "$schema": "https://aka.ms/codetour-schema",
- "title": "tour",
- "steps": [
- {
- "file": "cmd/openebs.go",
- "description": "This function gets called when the binary runs via kubectl or directly, read about kubectl plugins or watch this [Kubecon talk](https://www.youtube.com/watch?v=83ITOTsXsHU)",
- "line": 66
- },
- {
- "file": "cmd/get/get.go",
- "description": "The CLI supports getting these resources, think of them as the nouns the CLI can get(verb). Read about [spf13/cobra](https://github.com/spf13/cobra) to understand more",
- "line": 53
- },
- {
- "file": "cmd/get/blockdevice.go",
- "description": "This function is called when `kubectl openebs get bd` is run",
- "line": 46
- },
- {
- "file": "cmd/get/get.go",
- "description": "Each command can have some local or global flags.",
- "line": 56
- },
- {
- "file": "cmd/get/storage.go",
- "description": "This function is called when user runs `kubectl openebs get storage`",
- "line": 49
- },
- {
- "file": "pkg/storage/storage.go",
- "description": "If the storage(pools) of a well defined `casType` are requested, only that function, `f` is called, this happens when user runs something like `kubectl openebs get storage --cas-type=cstor`.",
- "line": 34
- },
- {
- "file": "pkg/storage/storage.go",
- "description": "When no cas-type is specified, each Storage Engines' storage(pool) is listed",
- "line": 49
- },
- {
- "file": "pkg/storage/storage.go",
- "description": "While some or all of `storage` conceptual resource can be listed, they can also be described individually, this function handles that.",
- "line": 76
- },
- {
- "file": "cmd/describe/volume.go",
- "description": "Like storage, volumes can be described too",
- "line": 49
- },
- {
- "file": "cmd/clusterinfo/cluster-info.go",
- "description": "Besides listing information about the cluster's storage resources of storage engines, the CLI can also identify which storage components are installed on the current cluster & can offer some version and health information.",
- "line": 39
- },
- {
- "file": "cmd/upgrade/upgrade.go",
- "description": "The CLI can also schedule jobs to trigger data plane upgrades of the storage components, right now only Jiva and some upgrade features of cstor are supported.",
- "line": 56
- },
- {
- "file": "pkg/volume/cstor.go",
- "description": "The logic for showing volumes are in the pkg/volumes package, all code is seggregated by the storage engine named filename.",
- "line": 62
- },
- {
- "file": "docs/cstor/README.md",
- "description": "When a new feature for a storage engine is added, it's usually documented here.",
- "line": 5
- }
- ],
- "ref": "codewalk"
-}
\ No newline at end of file
diff --git a/README.md b/README.md
index 216e2592..2e016e10 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,7 @@ OpenEBSCTL is a kubectl plugin to manage OpenEBS storage components.
## Project Status
**Alpha**. Under active development and seeking [contributions from the community](#contributing).
-The CLI currently supports managing `cStor`, `Jiva`, `LocalPV-LVM`, `LocalPV-ZFS` Cas-Engines.
+The CLI currently supports managing `LocalPV-LVM`, `LocalPV-ZFS` and `LocalPV-HostPath` Engines.
## Table of Contents
* [Installation](#installation)
@@ -59,44 +59,33 @@ OpenEBSCTL is available on Linux, macOS and Windows platforms.
- `cd openebsctl`
- Run `make openebsctl`
-## Code Walkthrough
-
-1. Install [vscode](https://code.visualstudio.com/)
-2. Install [CodeTour plugin](https://marketplace.visualstudio.com/items?itemName=vsls-contrib.codetour) on vscode
-3. Open this project on vscode & press `[ctrl] + [shift] + [p]` or `[command] + [shift] + [p]` and click `CodeTour: Open The Tour File` and locate the appropriate `*.tour` file. The code walkthrough will begin. Happy Contributing!
-
## Usage
* ```bash
$ kubectl openebs
- openebs is a a kubectl plugin for interacting with OpenEBS storage components such as storage(pools, volumegroups), volumes, blockdevices, pvcs.
+ kubectl openebs is a a kubectl plugin for interacting with OpenEBS storage components such as storage(zfspools, volumegroups), volumes, pvcs.
Find out more about OpenEBS on https://openebs.io/
-
+
Usage:
- kubectl openebs [command] [resource] [...names] [flags]
+ openebs [command]
Available Commands:
- completion Outputs shell completion code for the specified shell (bash or zsh)
- describe Provide detailed information about an OpenEBS resource
- get Provides fetching operations related to a Volume/Pool
- help Help about any command
- version Shows openebs kubectl plugin's version
+ cluster-info Show component version, status and running components for each installed engine
+ completion Outputs shell completion code for the specified shell (bash or zsh)
+ describe Provide detailed information about an OpenEBS resource
+ get Provides fetching operations related to a Volume/Storage
+ help Help about any command
+ version Shows openebs kubectl plugin's version
Flags:
- -h, --help help for openebs
- -n, --namespace string If present, the namespace scope for this CLI request
- --openebs-namespace string to read the openebs namespace from user.
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
- --debug to launch the debugging mode for cstor pvcs.
+ -h, --help help for openebs
+ -c, --kubeconfig string path to config file
+ -v, --version version for openebs
- Use "kubectl openebs command --help" for more information about a command.
+ Use "openebs [command] --help" for more information about a command.
```
* To know more about various engine specific commands check these:-
- * [cStor](docs/cstor/README.md)
- * [Jiva](docs/jiva/README.md)
* [LocalPV-LVM](docs/localpv-lvm/README.md)
* [LocalPV-ZFS](docs/localpv-zfs/README.md)
diff --git a/cmd/clusterinfo/cluster-info.go b/cmd/clusterinfo/cluster-info.go
index 9a74eca8..3962e4ce 100644
--- a/cmd/clusterinfo/cluster-info.go
+++ b/cmd/clusterinfo/cluster-info.go
@@ -22,14 +22,6 @@ import (
"github.com/spf13/cobra"
)
-const (
- clusterInfoCmdHelp = `Usage:
- kubectl openebs cluster-info
-Flags:
- -h, --help help for openebs get command
-`
-)
-
// NewCmdClusterInfo shows OpenEBSCTL cluster-info
func NewCmdClusterInfo(rootCmd *cobra.Command) *cobra.Command {
cmd := &cobra.Command{
@@ -39,6 +31,5 @@ func NewCmdClusterInfo(rootCmd *cobra.Command) *cobra.Command {
util.CheckErr(clusterinfo.ShowClusterInfo(), util.Fatal)
},
}
- cmd.SetUsageTemplate(clusterInfoCmdHelp)
return cmd
}
diff --git a/cmd/describe/describe.go b/cmd/describe/describe.go
index 57b905dc..2cdfaf81 100644
--- a/cmd/describe/describe.go
+++ b/cmd/describe/describe.go
@@ -20,37 +20,11 @@ import (
"github.com/spf13/cobra"
)
-const (
- volumeCommandHelpText = `Show detailed description of a specific OpenEBS resource:
-
-Usage:
- kubectl openebs describe [volume|storage|pvc] [...names] [flags]
-
-Describe a Volume:
- kubectl openebs describe volume [...names] [flags]
-
-Describe PVCs present in the same namespace:
- kubectl openebs describe pvc [...names] [flags]
-
-Describe a Storage :
- kubectl openebs describe storage [...names] [flags]
-
-Flags:
- -h, --help help for openebs
- -n, --namespace string to read the namespace for the pvc.
- --openebs-namespace string to read the openebs namespace from user.
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
- --debug to launch the debugging mode for cstor pvcs.
-`
-)
-
// NewCmdDescribe provides options for managing OpenEBS Volume
func NewCmdDescribe(rootCmd *cobra.Command) *cobra.Command {
cmd := &cobra.Command{
Use: "describe",
- ValidArgs: []string{"pool", "volume", "pvc"},
+ ValidArgs: []string{"storage", "volume", "pvc"},
Short: "Provide detailed information about an OpenEBS resource",
}
cmd.AddCommand(
@@ -58,6 +32,5 @@ func NewCmdDescribe(rootCmd *cobra.Command) *cobra.Command {
NewCmdDescribePVC(),
NewCmdDescribeStorage(),
)
- cmd.SetUsageTemplate(volumeCommandHelpText)
return cmd
}
diff --git a/cmd/describe/pvc.go b/cmd/describe/pvc.go
index 1d603b5c..e8b2c799 100644
--- a/cmd/describe/pvc.go
+++ b/cmd/describe/pvc.go
@@ -22,27 +22,10 @@ import (
"github.com/spf13/cobra"
)
-var (
- pvcInfoCommandHelpText = `This command fetches information and status of the various aspects
-of the PersistentVolumeClaims and its underlying related resources
-in the provided namespace. If no namespace is provided it uses default
-namespace for execution.
-
-Usage:
- kubectl openebs describe pvc [...names] [flags]
-
-Flags:
- -h, --help help for openebs
- -n, --namespace string to read the namespace for the pvc.
- --openebs-namespace string to read the openebs namespace from user.
- If not provided it is determined from components.
- --debug to launch the debugging mode for cstor pvcs.
-`
-)
-
// NewCmdDescribePVC Displays the pvc describe details
func NewCmdDescribePVC() *cobra.Command {
- var debug bool
+ var openebsNs string
+ var pvNs string
cmd := &cobra.Command{
Use: "pvc",
Aliases: []string{"pvcs", "persistentvolumeclaims", "persistentvolumeclaim"},
@@ -53,15 +36,10 @@ func NewCmdDescribePVC() *cobra.Command {
pvNs = "default"
}
openebsNamespace, _ = cmd.Flags().GetString("openebs-namespace")
- if debug {
- util.CheckErr(persistentvolumeclaim.Debug(args, pvNs, openebsNamespace), util.Fatal)
- } else {
- util.CheckErr(persistentvolumeclaim.Describe(args, pvNs, openebsNamespace), util.Fatal)
- }
-
+ util.CheckErr(persistentvolumeclaim.Describe(args, pvNs, openebsNamespace), util.Fatal)
},
}
- cmd.SetUsageTemplate(pvcInfoCommandHelpText)
- cmd.Flags().BoolVar(&debug, "debug", false, "Debug cstor volume")
+ cmd.PersistentFlags().StringVarP(&openebsNs, "openebs-namespace", "", "", "to read the openebs namespace from user.\nIf not provided it is determined from components.")
+ cmd.PersistentFlags().StringVarP(&pvNs, "namespace", "n", "", "to read the namespace of the pvc from the user. If not provided defaults to default namespace.")
return cmd
}
diff --git a/cmd/describe/storage.go b/cmd/describe/storage.go
index c75604a4..49987b5c 100644
--- a/cmd/describe/storage.go
+++ b/cmd/describe/storage.go
@@ -17,6 +17,7 @@ limitations under the License.
package describe
import (
+ "fmt"
"strings"
"github.com/openebs/openebsctl/pkg/storage"
@@ -24,25 +25,10 @@ import (
"github.com/spf13/cobra"
)
-var (
- storageInfoCommandHelpText = `This command fetches information and status of the various aspects
-of the openebs storage and its underlying related resources in the openebs namespace.
-
-Usage:
- kubectl openebs describe storage [...names] [flags]
-
-Flags:
- -h, --help help for openebs
- --openebs-namespace string to read the openebs namespace from user.
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
-`
-)
-
// NewCmdDescribeStorage displays OpenEBS storage related information.
func NewCmdDescribeStorage() *cobra.Command {
var casType string
+ var openebsNs string
cmd := &cobra.Command{
Use: "storage",
Aliases: []string{"storages", "s"},
@@ -54,7 +40,7 @@ func NewCmdDescribeStorage() *cobra.Command {
util.CheckErr(storage.Describe(args, openebsNs, casType), util.Fatal)
},
}
- cmd.SetUsageTemplate(storageInfoCommandHelpText)
- cmd.PersistentFlags().StringVarP(&casType, "cas-type", "", "", "the cas-type filter option for fetching resources")
+ cmd.PersistentFlags().StringVarP(&openebsNs, "openebs-namespace", "", "", "to read the openebs namespace from user.\nIf not provided it is determined from components.")
+ cmd.PersistentFlags().StringVarP(&casType, "cas-type", "", "", fmt.Sprintf("the type of the engine %s, %s", util.LVMCasType, util.ZFSCasType))
return cmd
}
diff --git a/cmd/describe/volume.go b/cmd/describe/volume.go
index 4a0e414c..dce278ef 100644
--- a/cmd/describe/volume.go
+++ b/cmd/describe/volume.go
@@ -22,33 +22,18 @@ import (
"github.com/spf13/cobra"
)
-var (
- volumeInfoCommandHelpText = `This command fetches information and status of the various
-aspects of a cStor Volume such as ISCSI, Controller, and Replica.
-
-Usage:
- kubectl openebs describe volume [...names] [flags]
-
-Flags:
- -h, --help help for openebs
- -n, --namespace string to read the namespace for the pvc.
- --openebs-namespace string to read the openebs namespace from user.
- If not provided it is determined from components.
-`
-)
-
// NewCmdDescribeVolume displays OpenEBS Volume information.
func NewCmdDescribeVolume() *cobra.Command {
+ var openebsNs string
cmd := &cobra.Command{
Use: "volume",
Aliases: []string{"volumes", "vol", "v"},
Short: "Displays Openebs information",
Run: func(cmd *cobra.Command, args []string) {
- // TODO: Get this from flags, pflag, etc
openebsNS, _ := cmd.Flags().GetString("openebs-namespace")
util.CheckErr(volume.Describe(args, openebsNS), util.Fatal)
},
}
- cmd.SetUsageTemplate(volumeInfoCommandHelpText)
+ cmd.PersistentFlags().StringVarP(&openebsNs, "openebs-namespace", "", "", "to read the openebs namespace from user.\nIf not provided it is determined from components.")
return cmd
}
diff --git a/cmd/generate/generate.go b/cmd/generate/generate.go
deleted file mode 100644
index 3bddad9d..00000000
--- a/cmd/generate/generate.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package generate
-
-import (
- "strconv"
- "strings"
-
- "github.com/openebs/openebsctl/pkg/generate"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/spf13/cobra"
-)
-
-// NewCmdGenerate provides options for generating
-func NewCmdGenerate() *cobra.Command {
- cmd := &cobra.Command{
- Use: "generate",
- Short: "Generate one or more OpenEBS resource like cspc",
- ValidArgs: []string{"cspc"},
- }
- cmd.AddCommand(NewCmdGenerateCStorStoragePoolCluster())
- return cmd
-}
-
-// NewCmdGenerateCStorStoragePoolCluster provides options for generating cspc
-// NOTE: When other custom resources need to be generated, the function
-// should be renamed appropriately, as of now it made no sense to generically
-// state pools when other pools aren't supported.
-func NewCmdGenerateCStorStoragePoolCluster() *cobra.Command {
- var nodes, raidType, cap string
- var devices int
- cmd := &cobra.Command{
- Use: "cspc",
- Short: "Generates cspc resources YAML/configuration which can be used to provision cStor storage pool clusters",
- Run: func(cmd *cobra.Command, args []string) {
- node, _ := cmd.Flags().GetString("nodes")
- raid, _ := cmd.Flags().GetString("raidtype")
- capacity, _ := cmd.Flags().GetString("capacity")
- devs := numDevices(cmd)
- nodeList := strings.Split(node, ",")
- util.CheckErr(generate.CSPC(nodeList, devs, raid, capacity), util.Fatal)
- },
- }
- cmd.PersistentFlags().StringVarP(&nodes, "nodes", "", "",
- "comma separated set of nodes for pool creation --nodes=node1,node2,node3,node4")
- _ = cmd.MarkPersistentFlagRequired("nodes")
- cmd.PersistentFlags().StringVarP(&raidType, "raidtype", "", "stripe",
- "allowed RAID configuration such as, stripe, mirror, raid, raidz2")
- cmd.PersistentFlags().StringVarP(&cap, "capacity", "", "10Gi",
- "minimum capacity of the blockdevices to pick up for pool creation")
- cmd.PersistentFlags().IntVar(&devices, "number-of-devices", 1, "number of devices per node, selects default based on raid-type")
- return cmd
-}
-
-// numDevices figures out the number of devices based on the raid type
-func numDevices(cmd *cobra.Command) int {
- // if number-of-devices is not set, set it to appropriate value
- if !cmd.Flag("number-of-devices").Changed {
- var devCount = map[string]int{
- "stripe": 1,
- "mirror": 2,
- "raidz": 3,
- "raidz2": 4}
- switch cmd.Flag("raidtype").Value.String() {
- case "stripe", "mirror", "raidz", "raidz2":
- c := devCount[cmd.Flag("raidtype").Value.String()]
- err := cmd.Flags().Set("number-of-devices", strconv.Itoa(c))
- if err != nil {
- return 1
- }
- return c
- }
- } else {
- d, _ := cmd.Flags().GetInt("number-of-devices")
- return d
- }
- // setting default value to 1
- return 1
-}
diff --git a/cmd/get/blockdevice.go b/cmd/get/blockdevice.go
deleted file mode 100644
index e7e9799a..00000000
--- a/cmd/get/blockdevice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package get
-
-import (
- "github.com/openebs/openebsctl/pkg/blockdevice"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/spf13/cobra"
-)
-
-var (
- bdListCommandHelpText = `This command displays status of available OpenEBS BlockDevice(s).
-
-Usage:
- kubectl openebs get bd [flags]
-
-Flags:
- -h, --help help for openebs get bd command
- --openebs-namespace string filter by a fixed OpenEBS namespace.
-`
-)
-
-// NewCmdGetBD displays status of OpenEBS BlockDevice(s)
-func NewCmdGetBD() *cobra.Command {
- cmd := &cobra.Command{
- Use: "bd",
- Aliases: []string{"bds", "blockdevice", "blockdevices"},
- Short: "Displays status information about BlockDevice(s)",
- Run: func(cmd *cobra.Command, args []string) {
- // TODO: Should this method create the k8sClient object
- openebsNS, _ := cmd.Flags().GetString("openebs-namespace")
- util.CheckErr(blockdevice.Get(args, openebsNS), util.Fatal)
- },
- }
- cmd.SetUsageTemplate(bdListCommandHelpText)
- return cmd
-}
diff --git a/cmd/get/get.go b/cmd/get/get.go
index ea25391f..f3e5310f 100644
--- a/cmd/get/get.go
+++ b/cmd/get/get.go
@@ -20,44 +20,16 @@ import (
"github.com/spf13/cobra"
)
-const (
- getCmdHelp = `Display one or many OpenEBS resources like volumes, storages, blockdevices.
-
-Usage:
- kubectl openebs get [volume|storage|bd] [flags]
-
-Get volumes:
- kubectl openebs get volume [flags]
-
-Get storages:
- kubectl openebs get storage [flags]
-
-Get blockdevices:
- kubectl openebs get bd
-
-Flags:
- -h, --help help for openebs get command
- --openebs-namespace string filter by a fixed OpenEBS namespace
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
-`
-)
-
// NewCmdGet provides options for managing OpenEBS Volume
func NewCmdGet(rootCmd *cobra.Command) *cobra.Command {
- var casType string
cmd := &cobra.Command{
Use: "get",
Short: "Provides fetching operations related to a Volume/Storage",
ValidArgs: []string{"storage", "volume", "bd"},
}
- cmd.SetUsageTemplate(getCmdHelp)
- cmd.PersistentFlags().StringVarP(&casType, "cas-type", "", "", "the cas-type filter option for fetching resources")
cmd.AddCommand(
NewCmdGetVolume(),
NewCmdGetStorage(),
- NewCmdGetBD(),
)
return cmd
}
diff --git a/cmd/get/storage.go b/cmd/get/storage.go
index 8ca40f72..8547ea99 100644
--- a/cmd/get/storage.go
+++ b/cmd/get/storage.go
@@ -17,28 +17,17 @@ limitations under the License.
package get
import (
+ "fmt"
+
"github.com/openebs/openebsctl/pkg/storage"
"github.com/openebs/openebsctl/pkg/util"
"github.com/spf13/cobra"
)
-var (
- storageListCommandHelpText = `This command lists of all/specific known storages in the Cluster.
-
-Usage:
- kubectl openebs get storage [flags]
-
-Flags:
- -h, --help help for openebs get command
- --openebs-namespace string filter by a fixed OpenEBS namespace
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
-`
-)
-
// NewCmdGetStorage displays status of OpenEBS Pool(s)
func NewCmdGetStorage() *cobra.Command {
+ var casType string
+ var openebsNs string
cmd := &cobra.Command{
Use: "storage",
Aliases: []string{"storages", "s"},
@@ -49,6 +38,7 @@ func NewCmdGetStorage() *cobra.Command {
util.CheckErr(storage.Get(args, openebsNS, casType), util.Fatal)
},
}
- cmd.SetUsageTemplate(storageListCommandHelpText)
+ cmd.PersistentFlags().StringVarP(&openebsNs, "openebs-namespace", "", "", "to read the openebs namespace from user.\nIf not provided it is determined from components.")
+ cmd.PersistentFlags().StringVarP(&casType, "cas-type", "", "", fmt.Sprintf("the type of the engine %s, %s", util.LVMCasType, util.ZFSCasType))
return cmd
}
diff --git a/cmd/get/volume.go b/cmd/get/volume.go
index 4e12be09..644fa58e 100644
--- a/cmd/get/volume.go
+++ b/cmd/get/volume.go
@@ -17,37 +17,28 @@ limitations under the License.
package get
import (
+ "fmt"
+
"github.com/openebs/openebsctl/pkg/util"
"github.com/openebs/openebsctl/pkg/volume"
"github.com/spf13/cobra"
)
-var (
- volumesListCommandHelpText = `Usage:
- kubectl openebs get volume [flags]
-
-Flags:
- -h, --help help for openebs get command
- --openebs-namespace string filter by a fixed OpenEBS namespace
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
-`
-)
-
// NewCmdGetVolume displays status of OpenEBS Volume(s)
func NewCmdGetVolume() *cobra.Command {
+ var openebsNs string
+ var casType string
cmd := &cobra.Command{
Use: "volume",
Aliases: []string{"vol", "v", "volumes"},
Short: "Displays status information about Volume(s)",
Run: func(cmd *cobra.Command, args []string) {
- // TODO: Should this method create the k8sClient object
openebsNS, _ := cmd.Flags().GetString("openebs-namespace")
casType, _ := cmd.Flags().GetString("cas-type")
util.CheckErr(volume.Get(args, openebsNS, casType), util.Fatal)
},
}
- cmd.SetUsageTemplate(volumesListCommandHelpText)
+ cmd.PersistentFlags().StringVarP(&openebsNs, "openebs-namespace", "", "", "to read the openebs namespace from user.\nIf not provided it is determined from components.")
+ cmd.PersistentFlags().StringVarP(&casType, "cas-type", "", "", fmt.Sprintf("the type of the engine %s, %s", util.LVMCasType, util.ZFSCasType))
return cmd
}
diff --git a/cmd/openebs.go b/cmd/openebs.go
index 06b07b41..3ae9a436 100644
--- a/cmd/openebs.go
+++ b/cmd/openebs.go
@@ -22,70 +22,35 @@ import (
"github.com/openebs/openebsctl/cmd/clusterinfo"
"github.com/openebs/openebsctl/cmd/completion"
"github.com/openebs/openebsctl/cmd/describe"
- "github.com/openebs/openebsctl/cmd/generate"
"github.com/openebs/openebsctl/cmd/get"
- "github.com/openebs/openebsctl/cmd/upgrade"
v "github.com/openebs/openebsctl/cmd/version"
"github.com/openebs/openebsctl/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
-const (
- usageTemplate = `Usage:
- kubectl openebs [command] [resource] [...names] [flags]
-
-Available Commands:
- completion Outputs shell completion code for the specified shell (bash or zsh)
- describe Provide detailed information about an OpenEBS resource
- generate Helps generate a storage custom resource
- get Provides fetching operations related to a Volume/CSPC
- help Help about any command
- version Shows openebs kubectl plugin's version
- cluster-info Show component version, status and running components for each installed engine
- upgrade Upgrade CSI Interfaces and Volumes
-
-Flags:
- -h, --help help for openebs
- -n, --namespace string If present, the namespace scope for this CLI request
- --openebs-namespace string to read the openebs namespace from user.
- If not provided it is determined from components.
- --cas-type to specify the cas-type of the engine, for engine based filtering.
- ex- cstor, jiva, localpv-lvm, localpv-zfs.
- --debug to launch the debugging mode for cstor pvcs.
- -c, --kubeconfig Path to configuration file
-
-Use "kubectl openebs command --help" for more information about a command.
-`
-)
-
// Version is the version of the openebsctl binary, info filled by go-releaser
var Version = "dev"
// NewOpenebsCommand creates the `openebs` command and its nested children.
func NewOpenebsCommand() *cobra.Command {
- var openebsNs string
+ //var openebsNs string
cmd := &cobra.Command{
Use: "openebs",
- ValidArgs: []string{"get", "describe", "completion", "upgrade"},
- Short: "openebs is a a kubectl plugin for interacting with OpenEBS storage components",
- Long: `openebs is a a kubectl plugin for interacting with OpenEBS storage components such as storage(pools, volumegroups), volumes, blockdevices, pvcs.
+ ValidArgs: []string{"get", "describe", "completion"},
+ Short: "kubectl openebs is a a kubectl plugin for interacting with OpenEBS storage components",
+ Long: `openebs is a a kubectl plugin for interacting with OpenEBS storage components such as storage(zfspools, volumegroups), volumes, pvcs.
Find out more about OpenEBS on https://openebs.io/`,
Version: Version,
TraverseChildren: true,
}
- cmd.SetUsageTemplate(usageTemplate)
cmd.AddCommand(
- // Add a helper command to show what version of X is installed
completion.NewCmdCompletion(cmd),
get.NewCmdGet(cmd),
describe.NewCmdDescribe(cmd),
v.NewCmdVersion(cmd),
clusterinfo.NewCmdClusterInfo(cmd),
- upgrade.NewCmdVolumeUpgrade(cmd),
- generate.NewCmdGenerate(),
)
- cmd.PersistentFlags().StringVarP(&openebsNs, "openebs-namespace", "", "", "to read the openebs namespace from user.\nIf not provided it is determined from components.")
cmd.PersistentFlags().StringVarP(&util.Kubeconfig, "kubeconfig", "c", "", "path to config file")
cmd.Flags().AddGoFlagSet(flag.CommandLine)
_ = flag.CommandLine.Parse([]string{})
diff --git a/cmd/upgrade/status.go b/cmd/upgrade/status.go
deleted file mode 100644
index ce10bb43..00000000
--- a/cmd/upgrade/status.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package upgrade
-
-import (
- "github.com/openebs/openebsctl/pkg/upgrade/status"
- "github.com/spf13/cobra"
-)
-
-func NewCmdUpgradeStatus() *cobra.Command {
- cmd := &cobra.Command{
- Use: "status",
- Aliases: []string{"Status"},
- Short: "Display Upgrade-status for a running upgrade-job",
- Run: func(cmd *cobra.Command, args []string) {
- openebsNS, _ := cmd.Flags().GetString("openebs-namespace")
- status.GetJobStatus(openebsNS)
- },
- }
- return cmd
-}
diff --git a/cmd/upgrade/upgrade.go b/cmd/upgrade/upgrade.go
deleted file mode 100644
index 7d19afee..00000000
--- a/cmd/upgrade/upgrade.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package upgrade
-
-import (
- "fmt"
-
- "github.com/openebs/openebsctl/pkg/upgrade"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/spf13/cobra"
-)
-
-const (
- upgradeCmdHelp = `Upgrade OpenEBS Data Plane Components
-
- Usage:
- kubectl openebs upgrade volume [flags]
-
- Flags:
- -h, --help help for openebs upgrade command
- -f, --file provide menifest file containing job upgrade information
- --cas-type [jiva | cStor | LocalPv] specify the cas-type to upgrade
- --to-version the desired version for upgradation
- --image-prefix if required the image prefix of the volume deployments can be
- changed using the flag, defaults to whatever was present on old
- deployments.
- --image-tag if required the image tags for volume deployments can be changed
- to a custom image tag using the flag,
- defaults to the --to-version mentioned above.
- `
-)
-
-// NewCmdVolumeUpgrade to upgrade volumes and interfaces
-func NewCmdVolumeUpgrade(rootCmd *cobra.Command) *cobra.Command {
- upgradeOpts := upgrade.UpgradeOpts{}
- cmd := &cobra.Command{
- Use: "upgrade",
- Short: "Upgrade Volumes, storage engines, and interfaces in openebs application",
- Aliases: []string{"update"},
- Run: func(cmd *cobra.Command, args []string) {
- switch upgradeOpts.CasType {
- case util.JivaCasType:
- upgrade.InstantiateJivaUpgrade(upgradeOpts)
- case util.CstorCasType:
- upgrade.InstantiateCspcUpgrade(upgradeOpts)
- default:
- fmt.Println("No or wrong cas-type provided")
- fmt.Println("To upgrade other cas-types follow: https://github.com/openebs/upgrade#upgrading-openebs-reources")
- }
- },
- }
- cmd.AddCommand(NewCmdUpgradeStatus())
- cmd.SetUsageTemplate(upgradeCmdHelp)
- cmd.PersistentFlags().StringVarP(&upgradeOpts.CasType, "cas-type", "", "", "the cas-type filter option for fetching resources")
- cmd.PersistentFlags().StringVarP(&upgradeOpts.ToVersion, "to-version", "", "", "the version to which the resources need to be upgraded")
- cmd.PersistentFlags().StringVarP(&upgradeOpts.ImagePrefix, "image-prefix", "", "", "provide image prefix for the volume deployments")
- cmd.PersistentFlags().StringVarP(&upgradeOpts.ImageTag, "image-tag", "", "", "provide custom image tag for the volume deployments")
- return cmd
-}
diff --git a/cmd/version/version.go b/cmd/version/version.go
index 08bf4f09..a3a5e4e5 100644
--- a/cmd/version/version.go
+++ b/cmd/version/version.go
@@ -18,7 +18,7 @@ package get
import (
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"os"
@@ -30,14 +30,6 @@ import (
"k8s.io/cli-runtime/pkg/printers"
)
-const (
- versionCmdHelp = `Usage:
- kubectl openebs version
-Flags:
- -h, --help help for openebs get command
-`
-)
-
// Get versions of components, return "Not Installed" on empty version
func getValidVersion(version string) string {
if version != "" {
@@ -67,25 +59,21 @@ func NewCmdVersion(rootCmd *cobra.Command) *cobra.Command {
{
Cells: []interface{}{"Client", getValidVersion(rootCmd.Version)},
},
- {
- Cells: []interface{}{"OpenEBS CStor", getValidVersion(componentVersionMap[util.CstorCasType])},
- },
- {
- Cells: []interface{}{"OpenEBS Jiva", getValidVersion(componentVersionMap[util.JivaCasType])},
- },
{
Cells: []interface{}{"OpenEBS LVM LocalPV", getValidVersion(componentVersionMap[util.LVMCasType])},
},
{
Cells: []interface{}{"OpenEBS ZFS LocalPV", getValidVersion(componentVersionMap[util.ZFSCasType])},
},
+ {
+ Cells: []interface{}{"OpenEBS HostPath LocalPV", getValidVersion(componentVersionMap[util.LocalPvHostpathCasType])},
+ },
}
util.TablePrinter(util.VersionColumnDefinition, rows, printers.PrintOptions{Wide: true})
checkForLatestVersion(rootCmd.Version)
},
}
- cmd.SetUsageTemplate(versionCmdHelp)
return cmd
}
@@ -102,7 +90,7 @@ func checkForLatestVersion(currVersion string) {
_ = resp.Body.Close()
}()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
// The separator for the error print
fmt.Println()
diff --git a/docs/cstor/README.md b/docs/cstor/README.md
deleted file mode 100644
index c5fd7868..00000000
--- a/docs/cstor/README.md
+++ /dev/null
@@ -1,272 +0,0 @@
-
-
-# CSTOR Storage Engine Components
-
-## Table of Contents
-* [cStor](#cstor)
- * [Get cStor volumes](#get-cstor-volumes)
- * [Get cStor pools](#get-cstor-pools)
- * [Describe cStor volumes](#describe-cstor-volumes)
- * [Describe cStor pool](#describe-cstor-pool)
- * [Describe cStor PVCs](#describe-cstor-pvcs)
- * [Debugging cStor Volumes](#debugging-cstor-volumes)
- * [Generate CSPC](#generate-cspc)
- * [Update CSPC Pools](#update-cspc-pools)
-* [BlockDevice](#blockdevice)
- * [Get BlockDevices by Nodes](#get-blockdevices-by-nodes)
-
-* #### `cStor`
- * #### Get `cStor` volumes
- ```bash
- $ kubectl openebs get volumes --cas-type=cstor
- NAMESPACE NAME STATUS VERSION CAPACITY STORAGE CLASS ATTACHED ACCESS MODE ATTACHED NODE
- cstor pvc-193844d7-3bef-45a3-8b7d-ed3991391b45 Healthy 2.9.0 5.0 GiB cstor-csi-sc Bound ReadWriteOnce N/A
- cstor pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc Healthy 2.0.0 20 GiB common-storageclass Bound ReadWriteOnce node1-virtual-machine
- ```
- Note: For volumes not attached to any application, the `ATTACH NODE` would be shown as `N/A`.
- * #### Get `cStor` pools
- ```bash
- $ kubectl openebs get storage --cas-type=cstor
- NAME HOSTNAME FREE CAPACITY READ ONLY PROVISIONED REPLICAS HEALTHY REPLICAS STATUS AGE
- cstor-storage-k5c2 node1-virtual-machine 45 GiB 45 GiB false 1 0 ONLINE 10d2h
- default-cstor-disk-dcrm node1-virtual-machine 73 GiB 90 GiB false 7 7 ONLINE 27d2h
- default-cstor-disk-fp6v node2-virtual-machine 73 GiB 90 GiB false 7 7 ONLINE 27d2h
- default-cstor-disk-rhwj node1-virtual-machine 73 GiB 90 GiB false 7 4 OFFLINE 27d2h
- ```
- * #### Describe `cStor` volumes
- ```bash
- $ kubectl openebs describe volume pvc-193844d7-3bef-45a3-8b7d-ed3991391b45
-
- pvc-193844d7-3bef-45a3-8b7d-ed3991391b45 Details :
- -----------------
- NAME : pvc-193844d7-3bef-45a3-8b7d-ed3991391b45
- ACCESS MODE : ReadWriteOnce
- CSI DRIVER : cstor.csi.openebs.io
- STORAGE CLASS : cstor-csi
- VOLUME PHASE : Released
- VERSION : 2.9.0
- CSPC : cstor-storage
- SIZE : 5.0 GiB
- STATUS : Init
- REPLICA COUNT : 1
-
- Portal Details :
- ------------------
- IQN : iqn.2016-09.com.openebs.cstor:pvc-193844d7-3bef-45a3-8b7d-ed3991391b45
- VOLUME NAME : pvc-193844d7-3bef-45a3-8b7d-ed3991391b45
- TARGET NODE NAME : node1-virtual-machine
- PORTAL : 10.106.27.10:3260
- TARGET IP : 10.106.27.10
-
- Replica Details :
- -----------------
- NAME TOTAL USED STATUS AGE
- pvc-193844d7-3bef-45a3-8b7d-ed3991391b45-cstor-storage-k5c2 72 KiB 4.8 MiB Healthy 10d3h
-
- Cstor Completed Backup Details :
- -------------------------------
- NAME BACKUP NAME VOLUME NAME LAST SNAP NAME
- backup4-pvc-b026cde1-28d9-40ff-ba95-2f3a6c1d5668 backup4 pvc-193844d7-3bef-45a3-8b7d-ed3991391b45 backup4
-
- Cstor Restores Details :
- -----------------------
- NAME RESTORE NAME VOLUME NAME RESTORE SOURCE STORAGE CLASS STATUS
- backup4-3cc0839b-8428-4361-8b12-eb8509208871 backup4 pvc-193844d7-3bef-45a3-8b7d-ed3991391b45 192.168.1.165:9000 cstor-csi 0
- ```
- * #### Describe `cStor` pool
- ```bash
- $ kubectl openebs describe storage default-cstor-disk-fp6v --openebs-namespace=openebs
-
- default-cstor-disk-fp6v Details :
- ----------------
- NAME : default-cstor-disk-fp6v
- HOSTNAME : node1-virtual-machine
- SIZE : 90 GiB
- FREE CAPACITY : 73 GiB
- READ ONLY STATUS : false
- STATUS : ONLINE
- RAID TYPE : stripe
-
- Blockdevice details :
- ---------------------
- NAME CAPACITY STATE
- blockdevice-8a5b69d8a2b23276f8daeac3c8179f9d 100 GiB Active
-
- Replica Details :
- -----------------
- NAME PVC NAME SIZE STATE
- pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc-default-cstor-disk-fp6v mongo 992 MiB Healthy
- ```
- * #### Describe `cstor` pvcs
- Describe any PVC using this command, it will determine the cas engine and show details accordingly.
- ```bash
- $ kubectl openebs describe pvc mongo
-
- mongo Details :
- ------------------
- NAME : mongo
- NAMESPACE : default
- CAS TYPE : cstor
- BOUND VOLUME : pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc
- ATTACHED TO NODE : node1-virtual-machine
- POOL : default-cstor-disk
- STORAGE CLASS : common-storageclass
- SIZE : 20 GiB
- USED : 1.1 GiB
- PV STATUS : Healthy
-
- Target Details :
- ----------------
- NAMESPACE NAME READY STATUS AGE IP NODE
- openebs pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc-target-7487cbc8bc5ttzl 3/3 Running 26d22h 172.17.0.7 node1-virtual-machine
-
- Replica Details :
- -----------------
- NAME TOTAL USED STATUS AGE
- pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc-default-cstor-disk-dcrm 992 MiB 1.1 GiB Healthy 26d23h
- pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc-default-cstor-disk-fp6v 992 MiB 1.1 GiB Healthy 26d23h
- pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc-default-cstor-disk-rhwj 682 MiB 832 MiB Offline 26d23h
-
- Additional Details from CVC :
- -----------------------------
- NAME : pvc-b84f60ae-3f26-4110-a85d-bce7ec00dacc
- REPLICA COUNT : 3
- POOL INFO : [default-cstor-disk-dcrm default-cstor-disk-fp6v default-cstor-disk-rhwj]
- VERSION : 2.1.0
- UPGRADING : true
- ```
- * #### Debugging `cstor` volumes
- _NOTE: Currently supported only for cstor_
- ```bash
- $ kubectl openebs describe pvc mongo --openebs-namespace=openebs --debug
- ```
- ![img.png](img.png)
-
- * #### Generate CSPC
-
- _NOTE: supported RAID Types include stripe, mirror, raidz, raidz2_
-
- ##### Supported flags
-
- Flag Name | Purpose | Example Values | Default-Value
- --- | --- | --- | ---
- **--nodes** | comma separated list of node's hostnames | `node1` `node1,node2,node3` | ""
- --raidtype | defaults to _stripe_, supports | `stripe`, `mirror`, `raidz`, `raidz2` | stripe
- --capacity | minimum capacity of individual blockdevices in the CSPC | `10Gi`, `10G`, `10GB` | `10Gi`
- --number-of-devices | number of blockdevices in each node | numbers above zero | _value depends on the raidType_
-
-
- ```bash
- # stripe pool example
- $ kubectl openebs generate cspc --nodes=shubham
- apiVersion: cstor.openebs.io/v1
- kind: CStorPoolCluster
- metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
- spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /var/openebs/sparse/0-ndm-sparse.img 10GB
- - blockDeviceName: sparse-6b277da87b7487e501c03ea0001d6d92
- nodeSelector:
- kubernetes.io/hostname: shubham
- poolConfig:
- dataRaidGroupType: stripe
-
- # raidz pool example
- $ kubectl openebs generate cspc --nodes=node1 --raidtype=raidz
- apiVersion: cstor.openebs.io/v1
- kind: CStorPoolCluster
- metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
- spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /dev/nvme2n1 100.0GiB
- - blockDeviceName: blockdevice-8a5b69d8a2b23276f8daeac3c8179f9d
- # /dev/nvme1n1 100.0GiB
- - blockDeviceName: blockdevice-c21bc3b79a98c7e8508f47558cc94f36
- # /dev/nvme10n1 100.0GiB
- - blockDeviceName: blockdevice-e5a1c3c1b66c864588a66d0a7ff8ca58
- nodeSelector:
- kubernetes.io/hostname: node1
- poolConfig:
- dataRaidGroupType: raidz
-
- # raidz2 failure example
- $ kubectl openebs generate cspc --nodes=minikube --raidtype=raidz2
- raidz2 pool requires a minimum of 6 block device per node
- ```
-
- * #### Update CSPC Pools
-
- ```bash
- $ kubectl openebs upgrade --cas-type cstor
- Fetching CSPC control plane and Data Plane Version
- Current Version: 2.12.0
- Desired Version: 3.0.0
- Previous job failed.
- Reason: BackoffLimitExceeded
- Creating a new Job with name: cstor-cspc-upgrade-vfn87
- Creating Dry-run job...
- metadata:
- creationTimestamp: null
- generateName: cstor-cspc-upgrade-
- labels:
- cas-type: cstor
- name: cstor-cspc-upgrade
- namespace: openebs
- spec:
- backoffLimit: 4
- template:
- metadata:
- creationTimestamp: null
- spec:
- containers:
- - args:
- - cstor-cspc
- - --from-version=2.12.0
- - --to-version=3.0.0
- - --v=4
- - cstor-storage
- env:
- - name: OPENEBS_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- image: openebs/upgrade:3.0.0
- imagePullPolicy: IfNotPresent
- name: upgrade-cstor-cspc-go
- resources: {}
- restartPolicy: OnFailure
- serviceAccountName: openebs-maya-operator
- status: {}
- Continue?: y
- Creating a batch job...
- Job Created successfully:
- ```
-
-* #### `BlockDevice`
- * #### Get `BlockDevices` by Nodes
- ```bash
- $ kubectl openebs get bd
- NAME PATH SIZE CLAIMSTATE STATUS FSTYPE MOUNTPOINT
- minikube-2
- ├─blockdevice-94312c16fb24476c3a155c34f0c211c3 /dev/sdb1 50 GiB Unclaimed Inactive ext4 /var/lib/kubelet/mntpt
- └─blockdevice-94312c16fb24476c3a155c34f0c2143c /dev/sdb1 50 GiB Claimed Active
-
- minikube-1
- ├─blockdevice-94312c16fb24476c3a155c34f0c6153a /dev/sdb1 50 GiB Claimed Inactive zfs_member /var/openebs/zfsvol
- ├─blockdevice-8a5b69d8a2b23276f8daeac3c8179f9d /dev/nvme2n1 100 GiB Claimed Active
- └─blockdevice-e5a1c3c1b66c864588a66d0a7ff8ca58 /dev/nvme10n1 100 GiB Claimed Active
-
- minikube-3
- └─blockdevice-94312c16fb24476c3a155c34f0c6199k /dev/sdb1 50 GiB Claimed Active
- ```
-
diff --git a/docs/cstor/img.png b/docs/cstor/img.png
deleted file mode 100644
index 36145487..00000000
Binary files a/docs/cstor/img.png and /dev/null differ
diff --git a/docs/jiva/README.md b/docs/jiva/README.md
deleted file mode 100644
index 20732966..00000000
--- a/docs/jiva/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
-# JIVA Storage Engine Commands
-
-## Table of Contents
-* [Jiva](#jiva)
- * [Get Jiva volumes](#get-jiva-volumes)
- * [Describe Jiva volumes](#describe-jiva-volumes)
- * [Describe Jiva PVCs](#describe-jiva-pvcs)
-
-* #### `Jiva`
- * #### Get `Jiva` volumes
- ```bash
- $ kubectl openebs get volumes --cas-type=jiva
- NAMESPACE NAME STATUS VERSION CAPACITY STORAGE CLASS ATTACHED ACCESS MODE ATTACHED NODE
- openebs pvc-478a8329-f02d-47e5-8288-0c28b582be25 RW 2.9.0 4Gi openebs-jiva-csi-sc Released ReadWriteOnce minikube-2
- ```
- Note: For volumes not attached to any application, the `ATTACH NODE` would be shown as `N/A`.
-
- * #### Describe `Jiva` volumes
- ```bash
- $ kubectl openebs describe volume pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
-
- pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca Details :
- -----------------
- NAME : pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
- ACCESS MODE : ReadWriteOnce
- CSI DRIVER : jiva.csi.openebs.io
- STORAGE CLASS : openebs-jiva-csi-sc
- VOLUME PHASE : Bound
- VERSION : 2.12.1
- JVP : jivavolumepolicy
- SIZE : 4.0GiB
- STATUS : RW
- REPLICA COUNT : 1
-
- Portal Details :
- ------------------
- IQN : iqn.2016-09.com.openebs.jiva:pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
- VOLUME NAME : pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
- TARGET NODE NAME : minikube
- PORTAL : 10.108.189.51:3260
-
- Controller and Replica Pod Details :
- -----------------------------------
- NAMESPACE NAME MODE NODE STATUS IP READY AGE
- jiva pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca-jiva-ctrl-64c964bvtbk5 RW minikube Running 172.17.0.9 1/1 8h25m
- jiva pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca-jiva-rep-0 RW minikube Running 172.17.0.10 1/1 8h25m
-
- Replica Data Volume Details :
- -----------------------------
- NAME STATUS VOLUME CAPACITY STORAGECLASS AGE
- openebs-pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca-jiva-rep-0 Bound pvc-009a193e-aa44-44d8-8b13-58859ffa734d 4.0GiB openebs-hostpath 8h25m
- ```
-
- * #### Describe `Jiva` PVCs
- ```bash
- $ kubectl openebs describe pvc jiva-csi-pvc
-
- jiva-csi-pvc Details :
- -------------------
- NAME : jiva-csi-pvc
- NAMESPACE : default
- CAS TYPE : jiva
- BOUND VOLUME : pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
- ATTACHED TO NODE : minikube
- JIVA VOLUME POLICY : jivavolumepolicy
- STORAGE CLASS : openebs-jiva-csi-sc
- SIZE : 4Gi
- JV STATUS : RW
- PV STATUS : Bound
-
- Portal Details :
- ------------------
- IQN : iqn.2016-09.com.openebs.jiva:pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
- VOLUME NAME : pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca
- TARGET NODE NAME : minikube
- PORTAL : 10.108.189.51:3260
-
- Controller and Replica Pod Details :
- -----------------------------------
- NAMESPACE NAME MODE NODE STATUS IP READY AGE
- jiva pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca-jiva-ctrl-64c964bvtbk5 RW minikube Running 172.17.0.9 1/1 8h24m
- jiva pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca-jiva-rep-0 RW minikube Running 172.17.0.10 1/1 8h24m
-
- Replica Data Volume Details :
- -----------------------------
- NAME STATUS VOLUME CAPACITY STORAGECLASS AGE
- openebs-pvc-e974f45d-8b8f-4939-954a-607f60a8a5ca-jiva-rep-0 Bound pvc-009a193e-aa44-44d8-8b13-58859ffa734d 4.0GiB openebs-hostpath 8h24m
- ```
\ No newline at end of file
diff --git a/docs/localpv-lvm/README.md b/docs/localpv-lvm/README.md
index 9448a7a3..7c034ff9 100644
--- a/docs/localpv-lvm/README.md
+++ b/docs/localpv-lvm/README.md
@@ -50,24 +50,24 @@
```
* #### Describe `LocalPV-LVM` volume
```bash
- $ kubectl openebs describe vol pvc-9999274f-ad01-48bc-9b21-7c51b47a870c
-
- pvc-9999274f-ad01-48bc-9b21-7c51b47a870c Details :
+ $ kubectl openebs describe vol pvc-5265bc5e-dd55-4272-b1d0-2bb3a172970d
+
+ pvc-5265bc5e-dd55-4272-b1d0-2bb3a172970d Details :
------------------
- Name : pvc-9999274f-ad01-48bc-9b21-7c51b47a870c
- Namespace : openebs
- AccessMode : ReadWriteOnce
- CSIDriver : local.csi.openebs.io
- Capacity : 4Gi
- PVC : csi-lvmpv
- VolumePhase : Bound
- StorageClass : openebs-lvmpv
- Version : ci
- Status : Ready
- VolumeGroup : lvmvg
- Shared : no
- ThinProvisioned : no
- NodeID : worker-sh1
+ NAME : pvc-5265bc5e-dd55-4272-b1d0-2bb3a172970d
+ NAMESPACE : lvm
+ ACCESS MODE : ReadWriteOnce
+ CSI DRIVER : local.csi.openebs.io
+ CAPACITY : 5Gi
+ PVC NAME : csi-lvmpv
+ VOLUME PHASE : Bound
+ STORAGE CLASS : openebs-lvmpv
+ VERSION : 1.4.0
+ LVM VOLUME STATUS : Ready
+ VOLUME GROUP : lvmvg
+ SHARED : no
+ THIN PROVISIONED : no
+ NODE ID : node-0-152720
```
* #### Describe `LocalPV-LVM` PVCs
```bash
diff --git a/go.mod b/go.mod
index 63a26779..94655624 100644
--- a/go.mod
+++ b/go.mod
@@ -1,22 +1,16 @@
module github.com/openebs/openebsctl
-go 1.16
+go 1.19
require (
github.com/docker/go-units v0.4.0
github.com/ghodss/yaml v1.0.0
github.com/manifoldco/promptui v0.8.0
- github.com/openebs/api/v2 v2.3.0
- github.com/openebs/jiva-operator v1.12.2-0.20210607114402-811a3af7c34a
github.com/openebs/lvm-localpv v0.6.0
github.com/openebs/zfs-localpv v1.8.0
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.0
- github.com/stretchr/testify v1.6.1
- golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
- golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/api v0.20.2
k8s.io/apimachinery v0.20.2
k8s.io/cli-runtime v0.20.0
@@ -24,4 +18,60 @@ require (
k8s.io/klog v1.0.0
)
+require (
+ cloud.google.com/go v0.54.0 // indirect
+ github.com/Azure/go-autorest v14.2.0+incompatible // indirect
+ github.com/Azure/go-autorest/autorest v0.11.1 // indirect
+ github.com/Azure/go-autorest/autorest/adal v0.9.5 // indirect
+ github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
+ github.com/Azure/go-autorest/logger v0.2.0 // indirect
+ github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/evanphx/json-patch v4.9.0+incompatible // indirect
+ github.com/form3tech-oss/jwt-go v3.2.2+incompatible // indirect
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/go-logr/logr v0.2.0 // indirect
+ github.com/gogo/protobuf v1.3.1 // indirect
+ github.com/golang/protobuf v1.4.3 // indirect
+ github.com/google/gofuzz v1.1.0 // indirect
+ github.com/googleapis/gnostic v0.4.1 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/imdario/mergo v0.3.7 // indirect
+ github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.10 // indirect
+ github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
+ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
+ github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a // indirect
+ github.com/magiconair/properties v1.8.1 // indirect
+ github.com/mattn/go-colorable v0.0.9 // indirect
+ github.com/mattn/go-isatty v0.0.4 // indirect
+ github.com/mitchellh/mapstructure v1.1.2 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/pelletier/go-toml v1.2.0 // indirect
+ github.com/spf13/afero v1.2.2 // indirect
+ github.com/spf13/cast v1.3.0 // indirect
+ github.com/spf13/jwalterweatherman v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/subosito/gotenv v1.2.0 // indirect
+ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
+ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect
+ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
+ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 // indirect
+ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
+ golang.org/x/text v0.3.4 // indirect
+ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
+ google.golang.org/appengine v1.6.5 // indirect
+ google.golang.org/protobuf v1.25.0 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/ini.v1 v1.51.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ k8s.io/klog/v2 v2.4.0 // indirect
+ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd // indirect
+ k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect
+ sigs.k8s.io/yaml v1.2.0 // indirect
+)
+
replace k8s.io/client-go => k8s.io/client-go v0.20.2
diff --git a/go.sum b/go.sum
index 2466f55d..9474c954 100644
--- a/go.sum
+++ b/go.sum
@@ -46,10 +46,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -65,7 +63,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -78,20 +75,14 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -103,11 +94,8 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -134,26 +122,19 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
-github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
-github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo=
-github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -161,7 +142,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -206,25 +186,20 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=
-github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -242,27 +217,25 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/go-ogle-analytics v0.0.0-20161213085824-14b04e0594ef/go.mod h1:PlwhC7q1VSK73InDzdDatVetQrTsQHIbOvcJAZzitY0=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -285,7 +258,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kubernetes-csi/csi-lib-iscsi v0.0.0-20191120152119-1430b53a1741/go.mod h1:4lv40oTBE8S2UI8H/w0/9GYPPv96vXIwVd/AhU0+ta0=
github.com/kubernetes-csi/csi-lib-utils v0.6.1/go.mod h1:GVmlUmxZ+SUjVLXicRFjqWUUvWez0g0Y78zNV9t7KfQ=
github.com/kubernetes-csi/csi-lib-utils v0.9.0/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
@@ -297,7 +269,6 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo=
github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
@@ -306,7 +277,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@@ -327,30 +297,18 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
-github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
-github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/openebs/api/v2 v2.3.0 h1:tkgysm2FnxkkEiC9RxxZ5rTbN4W6iA4qXspcmKRMzPk=
-github.com/openebs/api/v2 v2.3.0/go.mod h1:nLCaNvVjgjkjeD2a+n1fMbv5HjoEYP4XB8OAbwmIXtY=
-github.com/openebs/jiva-operator v1.12.2-0.20210607114402-811a3af7c34a h1:HuCp3D9TOhJogGTcH5JePJuebceQhPRgB5SizB0bmTg=
-github.com/openebs/jiva-operator v1.12.2-0.20210607114402-811a3af7c34a/go.mod h1:5oMQaMQKa0swN1hJnAP7CEMI/MOLVz0S2Mcu0H/l0oc=
github.com/openebs/lib-csi v0.3.0/go.mod h1:uruyzJiTwRoytQPQXOf4spaezn1cjkiAXjvFGw6aY/8=
github.com/openebs/lib-csi v0.6.0/go.mod h1:KWANWF2zNB8RYyELegid8PxHFrP/cdttR320NA9gVUQ=
github.com/openebs/lvm-localpv v0.6.0 h1:2LWSF/qy6jGKNAALtIN1O5y6tEKhwTGcVUcxy0Qgnpk=
@@ -369,7 +327,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -389,14 +346,11 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
-github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -413,20 +367,17 @@ github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -438,30 +389,19 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -521,7 +461,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -530,7 +469,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -564,12 +502,9 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -580,7 +515,6 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -624,9 +558,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -651,7 +582,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
-gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -666,9 +596,8 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -688,7 +617,6 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -715,16 +643,14 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
@@ -733,12 +659,10 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -750,31 +674,20 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
-k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA=
k8s.io/cli-runtime v0.20.0 h1:UfTR9vGUWshJpwuekl7MqRmWumNs5tvqPj20qnmOns8=
k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s=
k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ=
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
-k8s.io/cloud-provider v0.20.2/go.mod h1:TiVc+qwBh37DNkirzDltXkbR6bdfOjfo243Tv/DyjGQ=
-k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
-k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0=
-k8s.io/controller-manager v0.20.2/go.mod h1:5FKx8oDeIiQTanQnQNsLxu/8uUEX1TxDXjiSwRxhM+8=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
@@ -790,16 +703,12 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhD
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
-k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4=
-sigs.k8s.io/controller-runtime v0.8.2 h1:SBWmI0b3uzMIUD/BIXWNegrCeZmPJ503pOtwxY0LPHM=
-sigs.k8s.io/controller-runtime v0.8.2/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
diff --git a/pkg/blockdevice/blockdevice.go b/pkg/blockdevice/blockdevice.go
deleted file mode 100644
index 867d86a0..00000000
--- a/pkg/blockdevice/blockdevice.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package blockdevice
-
-import (
- "github.com/docker/go-units"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/pkg/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/cli-runtime/pkg/printers"
-)
-
-const (
- firstElemPrefix = `├─`
- lastElemPrefix = `└─`
-)
-
-// Get manages various implementations of blockdevice listing
-func Get(bds []string, openebsNS string) error {
- // TODO: Prefer passing the client from outside
- k := client.NewK8sClient(openebsNS)
- err := createTreeByNode(k, bds)
- if err != nil {
- return err
- }
- return nil
-}
-
-// createTreeByNode uses the [node <- list of bds on the node] and creates a tree like output,
-// also showing the relevant details to the bds.
-func createTreeByNode(k *client.K8sClient, bdNames []string) error {
- // 1. Get a list of the BlockDevices
- var bdList *v1alpha1.BlockDeviceList
- bdList, err := k.GetBDs(bdNames, "")
- if err != nil {
- return err
- }
- // 2. Create a map out of the list of bds, by their node names.
- var nodeBDlistMap = map[string][]v1alpha1.BlockDevice{}
- for _, bd := range bdList.Items {
- nodeBDlistMap[bd.Spec.NodeAttributes.NodeName] = append(nodeBDlistMap[bd.Spec.NodeAttributes.NodeName], bd)
- }
- var rows []metav1.TableRow
- if len(nodeBDlistMap) == 0 {
- // If there are no block devices show error
- return errors.New("no blockdevices found in the " + k.Ns + " namespace")
- }
- for nodeName, bds := range nodeBDlistMap {
- // Create the root, which contains only the node-name
- rows = append(rows, metav1.TableRow{Cells: []interface{}{nodeName, "", "", "", "", "", ""}})
- for i, bd := range bds {
- // If the bd is the last bd in the list, or the list has only one bd
- // append lastElementPrefix before bd name
- prefix := ""
- if i == len(bds)-1 {
- prefix = lastElemPrefix
- } else {
- prefix = firstElemPrefix
- }
- rows = append(rows, metav1.TableRow{
- Cells: []interface{}{
- prefix + bd.Name,
- bd.Spec.Path,
- units.BytesSize(float64(bd.Spec.Capacity.Storage)),
- bd.Status.ClaimState,
- bd.Status.State,
- bd.Spec.FileSystem.Type,
- bd.Spec.FileSystem.Mountpoint,
- }})
- }
- // Add an empty row so that the tree looks neat
- rows = append(rows, metav1.TableRow{Cells: []interface{}{"", "", "", "", "", "", ""}})
- }
- if len(rows) == 0 {
- return util.HandleEmptyTableError("Block Device", k.Ns, "")
- }
- // Show the output using cli-runtime
- util.TablePrinter(util.BDTreeListColumnDefinations, rows, printers.PrintOptions{Wide: true})
- return nil
-}
diff --git a/pkg/blockdevice/blockdevice_test.go b/pkg/blockdevice/blockdevice_test.go
deleted file mode 100644
index db0b4bd5..00000000
--- a/pkg/blockdevice/blockdevice_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package blockdevice
-
-import (
- "testing"
-
- openebsFakeClientset "github.com/openebs/api/v2/pkg/client/clientset/versioned/fake"
- "github.com/openebs/openebsctl/pkg/client"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func Test_createTreeByNode(t *testing.T) {
- k8sCS := fake.NewSimpleClientset()
- type args struct {
- k *client.K8sClient
- bds []string
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid bd inputs and across all namespaces",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: nil,
- },
- false,
- },
- {
- "Test with valid bd inputs and in some valid ns",
- args{
- k: &client.K8sClient{
- Ns: "fake-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: nil,
- },
- false,
- },
- {
- "Test with valid bd inputs and in some invalid ns",
- args{
- k: &client.K8sClient{
- Ns: "fake-invalid-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: nil,
- },
- true,
- },
- {
- "Test with invalid bd inputs and in some valid ns",
- args{
- k: &client.K8sClient{
- Ns: "fake-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(),
- },
- bds: nil,
- },
- true,
- },
- {
- "Test with invalid bd inputs across all namespaces",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(),
- },
- bds: nil,
- },
- true,
- },
- {
- "Test with valid bd inputs across all namespaces with some valid bd name passed as args",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-3"},
- },
- false,
- },
- {
- "Test with valid bd inputs across all namespaces with multiple valid bd names passed as args",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-3", "some-fake-bd-2"},
- },
- false,
- },
- {
- "Test with valid bd inputs across all namespaces with some valid and some invalid bd names passed as args",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-365", "some-fake-bd-2"},
- },
- false,
- },
- {
- "Test with valid bd inputs across all namespaces with some invalid bd name passed as args",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-365"},
- },
- true,
- },
- {
- "Test with valid bd inputs in a namespace with some valid bd name passed as args",
- args{
- k: &client.K8sClient{
- Ns: "fake-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-3"},
- },
- false,
- },
- {
- "Test with valid bd inputs in an invalid namespace with some valid bd name passed as args",
- args{
- k: &client.K8sClient{
- Ns: "fake-invalid-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-3"},
- },
- true,
- },
- {
- "Test with valid bd inputs in a valid namespace with some valid bd name passed as args",
- args{
- k: &client.K8sClient{
- Ns: "fake-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-3"},
- },
- false,
- },
- {
- "Test with valid bd inputs in a valid namespace with some invalid bd name passed as args",
- args{
- k: &client.K8sClient{
- Ns: "fake-ns",
- K8sCS: k8sCS,
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&bd1, &bd2, &bd3),
- },
- bds: []string{"some-fake-bd-365"},
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := createTreeByNode(tt.args.k, tt.args.bds); (err != nil) != tt.wantErr {
- t.Errorf("createTreeByNode() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
diff --git a/pkg/blockdevice/testdata_test.go b/pkg/blockdevice/testdata_test.go
deleted file mode 100644
index 5102122f..00000000
--- a/pkg/blockdevice/testdata_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package blockdevice
-
-import (
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var (
- bd1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{Name: "some-fake-bd-1"},
- Spec: v1alpha1.DeviceSpec{
- Path: "/dev/sdb",
- Capacity: v1alpha1.DeviceCapacity{Storage: uint64(132131321)},
- FileSystem: v1alpha1.FileSystemInfo{
- Type: "zfs_member",
- Mountpoint: "/var/some-fake-point",
- },
- NodeAttributes: v1alpha1.NodeAttribute{
- NodeName: "fake-node-1",
- },
- },
- Status: v1alpha1.DeviceStatus{
- ClaimState: "Claimed",
- State: "Active",
- },
- }
- bd2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{Name: "some-fake-bd-2"},
- Spec: v1alpha1.DeviceSpec{
- Path: "/dev/sdb",
- Capacity: v1alpha1.DeviceCapacity{Storage: uint64(132131321)},
- FileSystem: v1alpha1.FileSystemInfo{
- Type: "zfs_member",
- Mountpoint: "/var/some-fake-point",
- },
- NodeAttributes: v1alpha1.NodeAttribute{
- NodeName: "fake-node-1",
- },
- },
- Status: v1alpha1.DeviceStatus{
- ClaimState: "Claimed",
- State: "Active",
- },
- }
- bd3 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{Name: "some-fake-bd-3", Namespace: "fake-ns"},
- Spec: v1alpha1.DeviceSpec{
- Path: "/dev/sdb",
- Capacity: v1alpha1.DeviceCapacity{Storage: uint64(132131321)},
- FileSystem: v1alpha1.FileSystemInfo{
- Type: "lvm_member",
- Mountpoint: "/var/some-fake-point",
- },
- NodeAttributes: v1alpha1.NodeAttribute{
- NodeName: "fake-node-2",
- },
- },
- Status: v1alpha1.DeviceStatus{
- ClaimState: "Claimed",
- State: "Active",
- },
- }
-)
diff --git a/pkg/client/bd.go b/pkg/client/bd.go
deleted file mode 100644
index 50b554a1..00000000
--- a/pkg/client/bd.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package client
-
-import (
- "context"
-
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- "github.com/pkg/errors"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- // required for auth, see: https://github.com/kubernetes/client-go/tree/v0.17.3/plugin/pkg/client/auth
- _ "k8s.io/client-go/plugin/pkg/client/auth"
-)
-
-// GetBD returns the BlockDevice passed as name with OpenEBS's Client
-func (k K8sClient) GetBD(bd string) (*v1alpha1.BlockDevice, error) {
- blockDevice, err := k.OpenebsCS.OpenebsV1alpha1().BlockDevices(k.Ns).Get(context.TODO(), bd, metav1.GetOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting block device")
- }
- return blockDevice, nil
-}
-
-// GetBDs returns a list of BlockDevices based on the values of bdNames slice.
-// bdNames slice if is nil or empty, it returns all the BDs in the cluster.
-// bdNames slice if is not nil or not empty, it return the BDs whose names are present in the slice.
-// labelselector takes the label(key+value) and makes an api call with this filter applied. Can be empty string if label filtering is not needed.
-func (k K8sClient) GetBDs(bdNames []string, labelselector string) (*v1alpha1.BlockDeviceList, error) {
- bds, err := k.OpenebsCS.OpenebsV1alpha1().BlockDevices(k.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting block device")
- }
- if len(bdNames) == 0 {
- return bds, nil
- }
- bdNameBDmap := make(map[string]v1alpha1.BlockDevice)
- for _, item := range bds.Items {
- bdNameBDmap[item.Name] = item
- }
- var items = make([]v1alpha1.BlockDevice, 0)
- for _, name := range bdNames {
- if _, ok := bdNameBDmap[name]; ok {
- items = append(items, bdNameBDmap[name])
- }
- }
- return &v1alpha1.BlockDeviceList{
- Items: items,
- }, nil
-}
-
-// GetBDCs returns a list of BlockDeviceClaims based on the values of bdcNames slice.
-// bdcNames slice if is nil or empty, it returns all the BDCs in the cluster.
-// bdcNames slice if is not nil or not empty, it return the BDCs whose names are present in the slice.
-// labelselector takes the label(key+value) and makes an api call with this filter applied. Can be empty string if label filtering is not needed.
-func (k K8sClient) GetBDCs(bdcNames []string, labelselector string) (*v1alpha1.BlockDeviceClaimList, error) {
- bds, err := k.OpenebsCS.OpenebsV1alpha1().BlockDeviceClaims(k.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting block device")
- }
- if len(bdcNames) == 0 {
- return bds, nil
- }
- bdcNameBDCmap := make(map[string]v1alpha1.BlockDeviceClaim)
- for _, item := range bds.Items {
- bdcNameBDCmap[item.Name] = item
- }
- var items = make([]v1alpha1.BlockDeviceClaim, 0)
- for _, name := range bdcNames {
- if _, ok := bdcNameBDCmap[name]; ok {
- items = append(items, bdcNameBDCmap[name])
- }
- }
- return &v1alpha1.BlockDeviceClaimList{
- Items: items,
- }, nil
-}
diff --git a/pkg/client/cstor.go b/pkg/client/cstor.go
deleted file mode 100644
index 88d5b2e7..00000000
--- a/pkg/client/cstor.go
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package client
-
-import (
- "context"
- "fmt"
-
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/pkg/errors"
-
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/klog"
-
- // required for auth, see: https://github.com/kubernetes/client-go/tree/v0.17.3/plugin/pkg/client/auth
- _ "k8s.io/client-go/plugin/pkg/client/auth"
-)
-
-// GetCV returns the CStorVolume passed as name with OpenEBS's Client
-func (k K8sClient) GetCV(volName string) (*cstorv1.CStorVolume, error) {
- volInfo, err := k.OpenebsCS.CstorV1().CStorVolumes(k.Ns).Get(context.TODO(), volName, metav1.GetOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "error while getting volume %s", volName)
- }
- return volInfo, nil
-}
-
-// GetCVs returns a list or map of CStorVolumes based on the values of volNames slice, and options.
-// volNames slice if is nil or empty, it returns all the CVs in the cluster.
-// volNames slice if is not nil or not empty, it return the CVs whose names are present in the slice.
-// rType takes the return type of the method, can be either List or Map.
-// labelselector takes the label(key+value) and makes an api call with this filter applied, can be empty string if label filtering is not needed.
-// options takes a MapOptions object which defines how to create a map, refer to types for more info. Can be empty in case of rType is List.
-// Only one type can be returned at a time, please define the other type as '_' while calling.
-func (k K8sClient) GetCVs(volNames []string, rType util.ReturnType, labelSelector string, options util.MapOptions) (*cstorv1.CStorVolumeList, map[string]cstorv1.CStorVolume, error) {
- cVols, err := k.OpenebsCS.CstorV1().CStorVolumes("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
- if len(cVols.Items) == 0 {
- return nil, nil, errors.Errorf("Error while getting volumes%v", err)
- }
- var list []cstorv1.CStorVolume
- if len(volNames) == 0 {
- list = cVols.Items
- } else {
- csMap := make(map[string]cstorv1.CStorVolume)
- for _, cv := range cVols.Items {
- csMap[cv.Name] = cv
- }
- for _, name := range volNames {
- if cv, ok := csMap[name]; ok {
- list = append(list, cv)
- } else {
- fmt.Printf("Error from server (NotFound): cStorVolume %s not found\n", name)
- }
- }
- }
- if rType == util.List {
- return &cstorv1.CStorVolumeList{
- Items: list,
- }, nil, nil
- }
- if rType == util.Map {
- cvMap := make(map[string]cstorv1.CStorVolume)
- switch options.Key {
- case util.Label:
- for _, cv := range list {
- if vol, ok := cv.Labels[options.LabelKey]; ok {
- cvMap[vol] = cv
- }
- }
- return nil, cvMap, nil
- case util.Name:
- for _, cv := range list {
- cvMap[cv.Name] = cv
- }
- return nil, cvMap, nil
- default:
- return nil, nil, errors.New("invalid map options")
- }
- }
- return nil, nil, errors.New("invalid return type")
-}
-
-// GetCVA returns the CStorVolumeAttachment, corresponding to the label passed.
-// Ex:- labelSelector: {cstortypes.PersistentVolumeLabelKey + "=" + pvName}
-func (k K8sClient) GetCVA(labelSelector string) (*cstorv1.CStorVolumeAttachment, error) {
- vol, err := k.OpenebsCS.CstorV1().CStorVolumeAttachments("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
- if err != nil {
- return nil, errors.Wrap(err, "error from server (NotFound): CVA not found")
- } else if vol == nil || len(vol.Items) == 0 {
- return nil, fmt.Errorf("error from server (NotFound): CVA not found for %s", labelSelector)
- }
- return &vol.Items[0], nil
-}
-
-// GetCVAs returns a list or map of CStorVolumeAttachments based on the values of options.
-// rType takes the return type of the method, can either be List or Map.
-// labelselector takes the label(key+value) and makes a api call with this filter applied, can be empty string if label filtering is not needed.
-// options takes a MapOptions object which defines how to create a map, refer to types for more info. Can be empty in case of rType is List.
-// Only one type can be returned at a time, please define the other type as '_' while calling.
-func (k K8sClient) GetCVAs(rType util.ReturnType, labelSelector string, options util.MapOptions) (*cstorv1.CStorVolumeAttachmentList, map[string]cstorv1.CStorVolumeAttachment, error) {
- cvaList, err := k.OpenebsCS.CstorV1().CStorVolumeAttachments("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
- if len(cvaList.Items) == 0 {
- return nil, nil, errors.Errorf("No CVA found for %s, %v", labelSelector, err)
- }
- if rType == util.List {
- return cvaList, nil, nil
- }
- if rType == util.Map {
- cvaMap := make(map[string]cstorv1.CStorVolumeAttachment)
- switch options.Key {
- case util.Label:
- for _, cva := range cvaList.Items {
- if vol, ok := cva.Labels[options.LabelKey]; ok {
- cvaMap[vol] = cva
- }
- }
- return nil, cvaMap, nil
- case util.Name:
- for _, cva := range cvaList.Items {
- cvaMap[cva.Name] = cva
- }
- return nil, cvaMap, nil
- default:
- return nil, nil, errors.New("invalid map options")
- }
- }
- return nil, nil, errors.New("invalid return type")
-}
-
-// GetCVTargetPod returns the Cstor Volume Target Pod, corresponding to the volumeClaim and volumeName.
-func (k K8sClient) GetCVTargetPod(volumeClaim string, volumeName string) (*corev1.Pod, error) {
- pods, err := k.K8sCS.CoreV1().Pods(k.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("openebs.io/persistent-volume-claim=%s,openebs.io/persistent-volume=%s,openebs.io/target=cstor-target", volumeClaim, volumeName)})
- if err != nil || len(pods.Items) == 0 {
- return nil, errors.New("The target pod for the volume was not found")
- }
- return &pods.Items[0], nil
-}
-
-// GetCVInfoMap returns a Volume object, filled using corresponding CVA and PV.
-func (k K8sClient) GetCVInfoMap() (map[string]*util.Volume, error) {
- volumes := make(map[string]*util.Volume)
- cstorVA, _, err := k.GetCVAs(util.List, "", util.MapOptions{})
- if err != nil {
- return volumes, errors.Wrap(err, "error while getting storage volume attachments")
- }
- for _, i := range cstorVA.Items {
- if i.Spec.Volume.Name == "" {
- continue
- }
- pv, err := k.GetPV(i.Spec.Volume.Name)
- if err != nil {
- klog.Errorf("Failed to get PV %s", i.ObjectMeta.Name)
- continue
- }
- vol := &util.Volume{
- StorageClass: pv.Spec.StorageClassName,
- Node: i.Labels["nodeID"],
- PVC: pv.Spec.ClaimRef.Name,
- CSIVolumeAttachmentName: i.Name,
- AttachementStatus: string(pv.Status.Phase),
- // first fetch access modes & then convert to string
- AccessMode: util.AccessModeToString(pv.Spec.AccessModes),
- }
- // map the pv name to the vol obj
- volumes[i.Spec.Volume.Name] = vol
- }
- return volumes, nil
-}
-
-// GetCVBackups returns the CStorVolumeBackup, corresponding to the label passed.
-// Ex:- labelSelector: {cstortypes.PersistentVolumeLabelKey + "=" + pvName}
-func (k K8sClient) GetCVBackups(labelselector string) (*cstorv1.CStorBackupList, error) {
- cstorBackupList, err := k.OpenebsCS.CstorV1().CStorBackups("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil || len(cstorBackupList.Items) == 0 {
- return nil, errors.New("no cstorbackups were found for the volume")
- }
- return cstorBackupList, nil
-}
-
-// GetCVCompletedBackups returns the CStorCompletedBackups, corresponding to the label passed.
-// Ex:- labelSelector: {cstortypes.PersistentVolumeLabelKey + "=" + pvName}
-func (k K8sClient) GetCVCompletedBackups(labelselector string) (*cstorv1.CStorCompletedBackupList, error) {
- cstorCompletedBackupList, err := k.OpenebsCS.CstorV1().CStorCompletedBackups("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil || len(cstorCompletedBackupList.Items) == 0 {
- return nil, errors.New("no cstorcompletedbackups were found for the volume")
- }
- return cstorCompletedBackupList, nil
-}
-
-// GetCVRestores returns the CStorRestores, corresponding to the label passed.
-// Ex:- labelSelector: {cstortypes.PersistentVolumeLabelKey + "=" + pvName}
-func (k K8sClient) GetCVRestores(labelselector string) (*cstorv1.CStorRestoreList, error) {
- cStorRestoreList, err := k.OpenebsCS.CstorV1().CStorRestores("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil || len(cStorRestoreList.Items) == 0 {
- return nil, errors.New("no cstorrestores were found for the volume")
- }
- return cStorRestoreList, nil
-}
-
-// GetCVC returns the CStorVolumeConfig for cStor volume using the PV/CV/CVC name.
-func (k K8sClient) GetCVC(name string) (*cstorv1.CStorVolumeConfig, error) {
- cStorVolumeConfig, err := k.OpenebsCS.CstorV1().CStorVolumeConfigs(k.Ns).Get(context.TODO(), name, metav1.GetOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "error while getting cStor Volume Config for %s in %s", name, k.Ns)
- }
- return cStorVolumeConfig, nil
-}
-
-// GetCVRs returns the list CStorVolumeReplica, corresponding to the label passed.
-// For ex:- labelselector : {"cstorvolume.openebs.io/name" + "=" + name} , {"cstorpoolinstance.openebs.io/name" + "=" + poolName}
-func (k K8sClient) GetCVRs(labelselector string) (*cstorv1.CStorVolumeReplicaList, error) {
- cvrs, err := k.OpenebsCS.CstorV1().CStorVolumeReplicas("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil {
- return nil, errors.Wrapf(err, "error while getting cStor Volume Replica for %s", labelselector)
- }
- if cvrs == nil || len(cvrs.Items) == 0 {
- fmt.Printf("Error while getting cStor Volume Replica for %s, no replicas found for \n", labelselector)
- }
- return cvrs, nil
-}
-
-// GetCSPC returns the CStorPoolCluster for cStor volume using the poolName passed.
-func (k K8sClient) GetCSPC(poolName string) (*cstorv1.CStorPoolCluster, error) {
- cStorPool, err := k.OpenebsCS.CstorV1().CStorPoolClusters(k.Ns).Get(context.TODO(), poolName, metav1.GetOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting cspc")
- }
- return cStorPool, nil
-}
-
-func (k K8sClient) ListCSPC() (*cstorv1.CStorPoolClusterList, error) {
- cStorPool, err := k.OpenebsCS.CstorV1().CStorPoolClusters(k.Ns).List(context.TODO(), metav1.ListOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting cspc")
- }
- return cStorPool, nil
-}
-
-// GetCSPI returns the CStorPoolInstance for cStor volume using the poolName passed.
-func (k K8sClient) GetCSPI(poolName string) (*cstorv1.CStorPoolInstance, error) {
- cStorPool, err := k.OpenebsCS.CstorV1().CStorPoolInstances(k.Ns).Get(context.TODO(), poolName, metav1.GetOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting cspi")
- }
- return cStorPool, nil
-}
-
-// GetCSPIs returns a list of CStorPoolInstances based on the values of cspiNames slice
-// cspiNames slice if is nil or empty, it returns all the CSPIs in the cluster
-// cspiNames slice if is not nil or not empty, it return the CSPIs whose names are present in the slice
-// labelselector takes the label(key+value) and makes an api call with this filter applied. Can be empty string if label filtering is not needed.
-func (k K8sClient) GetCSPIs(cspiNames []string, labelselector string) (*cstorv1.CStorPoolInstanceList, error) {
- cspi, err := k.OpenebsCS.CstorV1().CStorPoolInstances("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelselector})
- if err != nil {
- return nil, errors.Wrapf(err, "Error while getting cspi")
- }
- if len(cspiNames) == 0 {
- return cspi, nil
- }
- poolMap := make(map[string]cstorv1.CStorPoolInstance)
- for _, p := range cspi.Items {
- poolMap[p.Name] = p
- }
- var list []cstorv1.CStorPoolInstance
- for _, name := range cspiNames {
- if pool, ok := poolMap[name]; ok {
- list = append(list, pool)
- }
- // else {
- // This logging might be omitted
- // fmt.Fprintf(os.Stderr, "Error from server (NotFound): pool %s not found\n", name)
- //}
- }
- return &cstorv1.CStorPoolInstanceList{
- Items: list,
- }, nil
-}
diff --git a/pkg/client/jiva.go b/pkg/client/jiva.go
deleted file mode 100644
index 47929742..00000000
--- a/pkg/client/jiva.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package client
-
-import (
- "context"
- "fmt"
-
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/pkg/errors"
-
- jiva "github.com/openebs/jiva-operator/pkg/apis/openebs/v1alpha1"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- // required for auth, see: https://github.com/kubernetes/client-go/tree/v0.17.3/plugin/pkg/client/auth
- _ "k8s.io/client-go/plugin/pkg/client/auth"
-)
-
-// GetJV returns the JivaVolume passed as name with REST Client
-func (k K8sClient) GetJV(jv string) (*jiva.JivaVolume, error) {
- var j jiva.JivaVolume
- err := k.K8sCS.Discovery().RESTClient().Get().Namespace(k.Ns).Name(jv).AbsPath("/apis/openebs.io/v1alpha1").
- Resource("jivavolumes").Do(context.TODO()).Into(&j)
- if err != nil {
- return nil, err
- }
- return &j, nil
-}
-
-// GetJVs returns a list or map of JivaVolumes based on the values of volNames slice, and options.
-// volNames slice if is nil or empty, it returns all the JVs in the cluster.
-// volNames slice if is not nil or not empty, it return the JVs whose names are present in the slice.
-// rType takes the return type of the method, can either List or Map.
-// labelselector takes the label(key+value) and makes an api call with this filter applied, can be empty string if label filtering is not needed.
-// options takes a MapOptions object which defines how to create a map, refer to types for more info. Can be empty in case of rType is List.
-// Only one type can be returned at a time, please define the other type as '_' while calling.
-func (k K8sClient) GetJVs(volNames []string, rType util.ReturnType, labelSelector string, options util.MapOptions) (*jiva.JivaVolumeList, map[string]jiva.JivaVolume, error) {
- jvs := jiva.JivaVolumeList{}
- // NOTE: The resource name must be plural and the API-group should be present for getting CRs
- err := k.K8sCS.Discovery().RESTClient().Get().AbsPath("/apis/openebs.io/v1alpha1").
- Resource("jivavolumes").Do(context.TODO()).Into(&jvs)
- if err != nil {
- return nil, nil, err
- }
- var list []jiva.JivaVolume
- if len(volNames) == 0 {
- list = jvs.Items
- } else {
- jvsMap := make(map[string]jiva.JivaVolume)
- for _, jv := range jvs.Items {
- jvsMap[jv.Name] = jv
- }
- for _, name := range volNames {
- if jv, ok := jvsMap[name]; ok {
- list = append(list, jv)
- } else {
- fmt.Printf("Error from server (NotFound): jivavolume %s not found\n", name)
- }
- }
- }
- if rType == util.List {
- return &jiva.JivaVolumeList{
- Items: list,
- }, nil, nil
- }
- if rType == util.Map {
- jvMap := make(map[string]jiva.JivaVolume)
- switch options.Key {
- case util.Label:
- for _, jv := range list {
- if vol, ok := jv.Labels[options.LabelKey]; ok {
- jvMap[vol] = jv
- }
- }
- return nil, jvMap, nil
- case util.Name:
- for _, jv := range list {
- jvMap[jv.Name] = jv
- }
- return nil, jvMap, nil
- default:
- return nil, nil, errors.New("invalid map options")
- }
- }
- return nil, nil, errors.New("invalid return type")
-}
-
-// GetJVTargetPod returns the Jiva Volume Controller and Replica Pods, corresponding to the volumeName.
-func (k K8sClient) GetJVTargetPod(volumeName string) (*corev1.PodList, error) {
- pods, err := k.K8sCS.CoreV1().Pods(k.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("openebs.io/cas-type=jiva,openebs.io/persistent-volume=%s", volumeName)})
- if err != nil || len(pods.Items) == 0 {
- return nil, errors.New("The controller and replica pod for the volume was not found")
- }
- return pods, nil
-}
diff --git a/pkg/client/k8s.go b/pkg/client/k8s.go
index 8274621a..a17ef513 100644
--- a/pkg/client/k8s.go
+++ b/pkg/client/k8s.go
@@ -34,8 +34,6 @@ import (
zfsclient "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
"github.com/pkg/errors"
- openebsclientset "github.com/openebs/api/v2/pkg/client/clientset/versioned"
-
appsv1 "k8s.io/api/apps/v1"
batchV1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
@@ -61,9 +59,6 @@ type K8sClient struct {
// K8sCS refers to the Clientset capable of communicating
// with the K8s cluster
K8sCS kubernetes.Interface
- // OpenebsClientset capable of accessing the OpenEBS
- // components
- OpenebsCS openebsclientset.Interface
// LVMCS is the client for accessing OpenEBS LVM components
LVMCS lvmclient.Interface
// ZFCS is the client for accessing OpenEBS ZFS components
@@ -108,18 +103,13 @@ func newK8sClient(ns string) (*K8sClient, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to build Kubernetes clientset")
}
- openebsCS, err := getOpenEBSClient(config)
- if err != nil {
- return nil, errors.Wrap(err, "failed to build OpenEBS clientset")
- }
lv, _ := getLVMclient(config)
zf, _ := getZFSclient(config)
return &K8sClient{
- Ns: ns,
- K8sCS: k8sCS,
- OpenebsCS: openebsCS,
- LVMCS: lv,
- ZFCS: zf,
+ Ns: ns,
+ K8sCS: k8sCS,
+ LVMCS: lv,
+ ZFCS: zf,
}, nil
}
@@ -161,20 +151,6 @@ func getK8sClient(kubeconfig string) (*kubernetes.Clientset, error) {
return clientset, nil
}
-// getOpenEBSClient returns OpenEBS clientset by taking kubeconfig as an
-// argument
-func getOpenEBSClient(kubeconfig string) (*openebsclientset.Clientset, error) {
- config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
- if err != nil {
- return nil, errors.Wrap(err, "Could not build config from flags")
- }
- client, err := openebsclientset.NewForConfig(config)
- if err != nil {
- return nil, errors.Wrap(err, "Could not get new config")
- }
- return client, nil
-}
-
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
@@ -196,7 +172,7 @@ func (k K8sClient) GetOpenEBSNamespace(casType string) (string, error) {
return pods.Items[0].Namespace, nil
}
-// GetOpenEBSNamespaceMap maps the cas-type to it's namespace, e.g. n[cstor] = cstor-ns
+// GetOpenEBSNamespaceMap maps the cas-type to it's namespace, e.g. n[zfs] = zfs-ns
// NOTE: This will not work correctly if CSI controller pod runs in kube-system NS
func (k K8sClient) GetOpenEBSNamespaceMap() (map[string]string, error) {
label := "openebs.io/component-name in ("
diff --git a/pkg/client/lvmlocalpv.go b/pkg/client/lvmlocalpv.go
index fc60c5b3..f2a0f0f7 100644
--- a/pkg/client/lvmlocalpv.go
+++ b/pkg/client/lvmlocalpv.go
@@ -113,7 +113,7 @@ func (k K8sClient) GetLVMNodes(lVols []string, rType util.ReturnType, labelSelec
if lv, ok := lvsMap[name]; ok {
list = append(list, lv)
} else {
- fmt.Printf("Error from server (NotFound): lvmvolume %s not found\n", name)
+ fmt.Printf("Error from server (NotFound): lvmnode %s not found\n", name)
}
}
}
diff --git a/pkg/clusterinfo/cluster-info.go b/pkg/clusterinfo/cluster-info.go
index 027f56d1..4a259875 100644
--- a/pkg/clusterinfo/cluster-info.go
+++ b/pkg/clusterinfo/cluster-info.go
@@ -40,16 +40,7 @@ func compute(k *client.K8sClient) error {
for casType, componentNames := range util.CasTypeToComponentNamesMap {
componentDataMap, err := getComponentDataByComponents(k, componentNames, casType)
if err == nil && len(componentDataMap) != 0 {
- status, working := "", ""
- if casType == util.LocalDeviceCasType {
- var err error
- status, working, err = getLocalPVDeviceStatus(componentDataMap)
- if err != nil {
- continue
- }
- } else {
- status, working = getStatus(componentDataMap)
- }
+ status, working := getStatus(componentDataMap)
version := getVersion(componentDataMap)
namespace := getNamespace(componentDataMap)
clusterInfoRows = append(
@@ -67,7 +58,6 @@ func compute(k *client.K8sClient) error {
func getComponentDataByComponents(k *client.K8sClient, componentNames string, casType string) (map[string]util.ComponentData, error) {
var podList *corev1.PodList
- // Fetch Cstor Components
componentDataMap := make(map[string]util.ComponentData)
podList, _ = k.GetPods(fmt.Sprintf("openebs.io/component-name in (%s)", componentNames), "", "")
if len(podList.Items) != 0 {
@@ -92,25 +82,6 @@ func getComponentDataByComponents(k *client.K8sClient, componentNames string, ca
}
}
- // Below is to handle corner cases in case of cstor and jiva, as they use components like ndm and localpv provisioner
- // which are also used by other engine, below has been added to strictly identify the installed engine.
- engineComponents := 0
- for key := range componentDataMap {
- if !strings.Contains(util.NDMComponentNames, key) && strings.Contains(util.CasTypeToComponentNamesMap[casType], key) {
- engineComponents++
- if casType == util.JivaCasType && key == util.HostpathComponentNames {
- // Since hostpath component is not a unique engine component for jiva
- engineComponents--
- }
- }
- }
- if engineComponents == 0 {
- return nil, fmt.Errorf("components for %s engine are not installed", casType)
- }
-
- // The below is to fill in the expected components, for example if 5 out of 7 cstor components are there
- // in the cluster, we would not be able what was the expected number of components, the below would ensure cstor
- // needs 7 component always to work.
for _, item := range strings.Split(componentNames, ",") {
if _, ok := componentDataMap[item]; !ok {
componentDataMap[item] = util.ComponentData{}
@@ -139,21 +110,9 @@ func getStatus(componentDataMap map[string]util.ComponentData) (string, string)
}
}
-func getLocalPVDeviceStatus(componentDataMap map[string]util.ComponentData) (string, string, error) {
- if ndmData, ok := componentDataMap["ndm"]; ok {
- if localPVData, ok := componentDataMap["openebs-localpv-provisioner"]; ok {
- if ndmData.Namespace == localPVData.Namespace && localPVData.Namespace != "" && localPVData.CasType != "" {
- status, working := getStatus(componentDataMap)
- return status, working, nil
- }
- }
- }
- return "", "", fmt.Errorf("installed NDM is not for Device LocalPV")
-}
-
func getVersion(componentDataMap map[string]util.ComponentData) string {
- for key, val := range componentDataMap {
- if !strings.Contains(util.NDMComponentNames, key) && val.Version != "" && val.Status == "Running" {
+ for _, val := range componentDataMap {
+ if val.Version != "" && val.Status == "Running" {
return val.Version
}
}
@@ -161,8 +120,8 @@ func getVersion(componentDataMap map[string]util.ComponentData) string {
}
func getNamespace(componentDataMap map[string]util.ComponentData) string {
- for key, val := range componentDataMap {
- if !strings.Contains(util.NDMComponentNames, key) && val.Namespace != "" {
+ for _, val := range componentDataMap {
+ if val.Namespace != "" {
return val.Namespace
}
}
diff --git a/pkg/clusterinfo/cluster-info_test.go b/pkg/clusterinfo/cluster-info_test.go
deleted file mode 100644
index 3592e92b..00000000
--- a/pkg/clusterinfo/cluster-info_test.go
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clusterinfo
-
-import (
- "reflect"
- "testing"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func Test_getComponentDataByComponents(t *testing.T) {
- type args struct {
- k *client.K8sClient
- componentNames string
- casType string
- }
- tests := []struct {
- name string
- args args
- want map[string]util.ComponentData
- wantErr bool
- }{
- {
- "All components present and running",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cspcOperator, &cvcOperator, &cstorAdmissionWebhook, &ndm, &ndmOperator, &openebsCstorCsiController, &openebsCstorCsiNode),
- },
- componentNames: util.CasTypeToComponentNamesMap[util.CstorCasType],
- casType: util.CstorCasType,
- },
- map[string]util.ComponentData{
- "cspc-operator": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "cvc-operator": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "cstor-admission-webhook": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "openebs-cstor-csi-node": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "openebs-cstor-csi-controller": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "ndm": {Namespace: "openebs", Status: "Running", Version: "1.1", CasType: "cstor"},
- "openebs-ndm-operator": {Namespace: "openebs", Status: "Running", Version: "1.1", CasType: "cstor"},
- },
- false,
- },
- {
- "Some components present and running",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cspcOperator, &cvcOperator, &cstorAdmissionWebhook, &ndmOperator, &openebsCstorCsiNode),
- },
- componentNames: util.CasTypeToComponentNamesMap[util.CstorCasType],
- casType: util.CstorCasType,
- },
- map[string]util.ComponentData{
- "cspc-operator": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "cvc-operator": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "cstor-admission-webhook": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "openebs-cstor-csi-node": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "openebs-cstor-csi-controller": {},
- "ndm": {},
- "openebs-ndm-operator": {Namespace: "openebs", Status: "Running", Version: "1.1", CasType: "cstor"},
- },
- false,
- },
- {
- "All components present and running with some component having evicted pods",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cspcOperator, &cvcOperator, &cstorAdmissionWebhook, &ndm, &ndmOperator, &openebsCstorCsiController, &openebsCstorCsiNode, &cspcOperatorEvicted, &cvcOperatorEvicted),
- },
- componentNames: util.CasTypeToComponentNamesMap[util.CstorCasType],
- casType: util.CstorCasType,
- },
- map[string]util.ComponentData{
- "cspc-operator": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "cvc-operator": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "cstor-admission-webhook": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "openebs-cstor-csi-node": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "openebs-cstor-csi-controller": {Namespace: "openebs", Status: "Running", Version: "2.1", CasType: "cstor"},
- "ndm": {Namespace: "openebs", Status: "Running", Version: "1.1", CasType: "cstor"},
- "openebs-ndm-operator": {Namespace: "openebs", Status: "Running", Version: "1.1", CasType: "cstor"},
- },
- false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := getComponentDataByComponents(tt.args.k, tt.args.componentNames, tt.args.casType)
- if (err != nil) != tt.wantErr {
- t.Errorf("getComponentDataByComponents() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("getComponentDataByComponents() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_getLocalPVDeviceStatus(t *testing.T) {
- type args struct {
- componentDataMap map[string]util.ComponentData
- }
- tests := []struct {
- name string
- args args
- want string
- want1 string
- wantErr bool
- }{
- {
- "ndm and localpv provisioner in same ns",
- args{
- componentDataMap: map[string]util.ComponentData{
- "openebs-localpv-provisioner": {
- Namespace: "openebs",
- Status: "Running",
- Version: "1.1",
- CasType: util.LocalDeviceCasType,
- },
- "ndm": {
- Namespace: "openebs",
- Status: "Running",
- Version: "3.1",
- CasType: util.LocalDeviceCasType,
- },
- },
- },
- "Healthy",
- "2/2",
- false,
- },
- {
- "ndm and localpv provisioner in same ns but ndm down",
- args{
- componentDataMap: map[string]util.ComponentData{
- "openebs-localpv-provisioner": {
- Namespace: "openebs",
- Status: "Running",
- Version: "1.1",
- CasType: util.LocalDeviceCasType,
- },
- "ndm": {
- Namespace: "openebs",
- Status: "Pending",
- Version: "3.1",
- CasType: util.LocalDeviceCasType,
- },
- },
- },
- "Degraded",
- "1/2",
- false,
- },
- {
- "ndm and localpv provisioner in same ns but both down",
- args{
- componentDataMap: map[string]util.ComponentData{
- "openebs-localpv-provisioner": {
- Namespace: "openebs",
- Status: "Pending",
- Version: "1.1",
- CasType: util.LocalDeviceCasType,
- },
- "ndm": {
- Namespace: "openebs",
- Status: "Pending",
- Version: "3.1",
- CasType: util.LocalDeviceCasType,
- },
- },
- },
- "Unhealthy",
- "0/2",
- false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, got1, err := getLocalPVDeviceStatus(tt.args.componentDataMap)
- if (err != nil) != tt.wantErr {
- t.Errorf("getLocalPVDeviceStatus() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("getLocalPVDeviceStatus() got = %v, want %v", got, tt.want)
- }
- if got1 != tt.want1 {
- t.Errorf("getLocalPVDeviceStatus() got1 = %v, want %v", got1, tt.want1)
- }
- })
- }
-}
-
-func Test_getNamespace(t *testing.T) {
- type args struct {
- componentDataMap map[string]util.ComponentData
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- "some running components with ndm in same ns",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "cstor",
- Status: "Running",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Running",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "cstor",
- },
- {
- "some running components with ndm in different ns",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "openebs",
- Status: "Running",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "cstor",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := getNamespace(tt.args.componentDataMap); got != tt.want {
- t.Errorf("getNamespace() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_getStatus(t *testing.T) {
- type args struct {
- componentDataMap map[string]util.ComponentData
- }
- tests := []struct {
- name string
- args args
- want string
- want1 string
- }{
- {
- "some running components",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "cstor",
- Status: "Running",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Running",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "Degraded",
- "2/4",
- },
- {
- "No running components",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "Unhealthy",
- "0/4",
- },
- {
- "All running components",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Running",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "cstor",
- Status: "Running",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Running",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Running",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "Healthy",
- "4/4",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, got1 := getStatus(tt.args.componentDataMap)
- if got != tt.want {
- t.Errorf("getStatus() got = %v, want %v", got, tt.want)
- }
- if got1 != tt.want1 {
- t.Errorf("getStatus() got1 = %v, want %v", got1, tt.want1)
- }
- })
- }
-}
-
-func Test_getVersion(t *testing.T) {
- type args struct {
- componentDataMap map[string]util.ComponentData
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- "some running components",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "cstor",
- Status: "Running",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Running",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "1.1",
- },
- {
- "No running components except ndm",
- args{
- componentDataMap: map[string]util.ComponentData{
- "cstor-csi-controller": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "ndm": {
- Namespace: "cstor",
- Status: "Running",
- Version: "3.1",
- CasType: "cstor",
- },
- "cstor-operator": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- "cstor-some-xyz-component": {
- Namespace: "cstor",
- Status: "Pending",
- Version: "1.1",
- CasType: "cstor",
- },
- },
- },
- "",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := getVersion(tt.args.componentDataMap); got != tt.want {
- t.Errorf("getVersion() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-//map[cspc-operator:{openebs Running 2.1 cstor} cstor-admission-webhook:{openebs Running 2.1 cstor} cvc-operator:{openebs Running 2.1 cstor} ndm:{openebs Running 1.1 cstor} openebs-cstor-csi-controller:{openebs Running 2.1 cstor} openebs-cstor-csi-node:{openebs Running 2.1 cstor} openebs-ndm-operator:{openebs Running 1.1 cstor}],
-//map[cspc-operator:{openebs Running 2.1 cstor} cstor-admission-webhook:{openebs Running 2.1 cstor} cvc-operator:{openebs Running 2.1 cstor} ndm:{openebs Running 1.1 cstor} ndm-operator:{openebs Running 1.1 cstor} openebs-cstor-csi-controller:{openebs Running 2.1 cstor} openebs-cstor-csi-node:{openebs Running 2.1 cstor}]
-
-func Test_compute(t *testing.T) {
- type args struct {
- k *client.K8sClient
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "All components of cstor present and running",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cspcOperator, &cvcOperator, &cstorAdmissionWebhook, &ndm, &ndmOperator, &openebsCstorCsiController, &openebsCstorCsiNode),
- },
- },
- false,
- },
- {
- "Some components of cstor present and running",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cspcOperator, &cvcOperator, &cstorAdmissionWebhook, &ndmOperator, &openebsCstorCsiNode),
- },
- },
- false,
- },
- {
- "All components of cstor present and running with some component having evicted pods",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cspcOperator, &cvcOperator, &cstorAdmissionWebhook, &ndm, &ndmOperator, &openebsCstorCsiController, &openebsCstorCsiNode, &cspcOperatorEvicted, &cvcOperatorEvicted),
- },
- },
- false,
- },
- {
- "If no components are present",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(),
- },
- },
- true,
- },
- {
- "If ndm and localpv provisioner components are in same ns",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&localpvProvisionerInOpenebs, &ndm, &ndmOperator),
- },
- },
- false,
- },
- {
- "If ndm and localpv provisioner components are in different ns",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&localpvProvisioner, &ndm, &ndmOperator),
- },
- },
- false,
- },
- {
- "If jiva and ndm in same ns",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&jivaOperator, &openebsJivaCsiController, &openebsJivaCsiNode, &ndm, &ndmOperator, &localpvProvisionerInOpenebs),
- },
- },
- false,
- },
- {
- "If jiva and ndm in different ns",
- args{
- k: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&jivaOperator, &openebsJivaCsiController, &openebsJivaCsiNode, &ndmXYZ, &ndmOperatorXYZ, &localpvProvisionerInOpenebs),
- },
- },
- false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := compute(tt.args.k); (err != nil) != tt.wantErr {
- t.Errorf("compute() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
diff --git a/pkg/clusterinfo/testdata_test.go b/pkg/clusterinfo/testdata_test.go
deleted file mode 100644
index 87f1fd8c..00000000
--- a/pkg/clusterinfo/testdata_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clusterinfo
-
-import (
- "time"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var cspcOperator = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspcOperatorPOD",
- Namespace: "openebs",
- UID: "some-uuid-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "cspc-operator", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var cspcOperatorEvicted = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspcOperatorEvictedPOD",
- Namespace: "openebs",
- UID: "some-uuid-8",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "cspc-operator", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Evicted"},
-}
-
-var cvcOperator = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cvcOperatorPOD",
- Namespace: "openebs",
- UID: "some-uuid-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "cvc-operator", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var cvcOperatorEvicted = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cvcOperatorEvictedPOD",
- Namespace: "openebs",
- UID: "some-uuid-9",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "cvc-operator", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Evicted"},
-}
-
-var cstorAdmissionWebhook = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstorAdmissionWebhookPOD",
- Namespace: "openebs",
- UID: "some-uuid-3",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "cstor-admission-webhook", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var openebsCstorCsiNode = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "openebsCstorCsiNodePOD",
- Namespace: "openebs",
- UID: "some-uuid-4",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-cstor-csi-node", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var openebsCstorCsiController = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "openebsCstorCsiControllerPOD",
- Namespace: "openebs",
- UID: "some-uuid-5",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-cstor-csi-controller", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var ndm = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ndmPOD",
- Namespace: "openebs",
- UID: "some-uuid-6",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "ndm", "openebs.io/version": "1.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var ndmOperator = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ndmOperatorPOD",
- Namespace: "openebs",
- UID: "some-uuid-7",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-ndm-operator", "openebs.io/version": "1.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var localpvProvisionerInOpenebs = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "localpvprovisionerInOpenebsPOD",
- Namespace: "openebs",
- UID: "some-uuid-10",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-localpv-provisioner", "openebs.io/version": "1.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var localpvProvisioner = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "localpvprovisionerPOD",
- Namespace: "xyz",
- UID: "some-uuid-10",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-localpv-provisioner", "openebs.io/version": "1.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var openebsJivaCsiNode = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "openebsJivaCsiNodePOD",
- Namespace: "openebs",
- UID: "some-uuid-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-jiva-csi-node", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var jivaOperator = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "jivaOperatorPOD",
- Namespace: "openebs",
- UID: "some-uuid-8",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "jiva-operator", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var openebsJivaCsiController = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "openebsJivaCsiControllerPOD",
- Namespace: "openebs",
- UID: "some-uuid-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-jiva-csi-controller", "openebs.io/version": "2.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var ndmXYZ = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ndmPOD",
- Namespace: "xyz",
- UID: "some-uuid-6",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "ndm", "openebs.io/version": "1.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
-
-var ndmOperatorXYZ = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ndmOperatorPOD",
- Namespace: "xyz",
- UID: "some-uuid-7",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/component-name": "openebs-ndm-operator", "openebs.io/version": "1.1"},
- },
- Status: corev1.PodStatus{Phase: "Running"},
-}
diff --git a/pkg/generate/cspc.go b/pkg/generate/cspc.go
deleted file mode 100644
index b0515675..00000000
--- a/pkg/generate/cspc.go
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package generate
-
-import (
- "fmt"
- "sort"
- "strconv"
- "strings"
-
- "github.com/ghodss/yaml"
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// isPoolValid checks if a CStor pool is valid
-func isPoolTypeValid(raid string) bool {
- if raid == "stripe" || raid == "mirror" || raid == "raidz" || raid == "raidz2" {
- return true
- }
- return false
-}
-
-// CSPC calls the generate routine for different cas-types
-func CSPC(nodes []string, devs int, raid, capacity string) error {
- c := client.NewK8sClient()
- if !isPoolTypeValid(strings.ToLower(raid)) {
- // TODO: Use the well defined pool constant types from openebs/api when added there
- return fmt.Errorf("invalid pool type %s", raid)
- }
- // resource.Quantity doesn't like the bits or bytes suffixes
- capacity = strings.Replace(capacity, "b", "", 1)
- capacity = strings.Replace(capacity, "B", "", 1)
- size, err := resource.ParseQuantity(capacity)
- if err != nil {
- return err
- }
- _, str, err := cspc(c, nodes, devs, strings.ToLower(raid), size)
- if err != nil {
- return err
- }
- fmt.Println(str)
- return nil
-}
-
-// cspc takes eligible nodes, number of devices and poolType to create a pool cluster template
-func cspc(c *client.K8sClient, nodes []string, devs int, poolType string, minSize resource.Quantity) (*cstorv1.CStorPoolCluster, string, error) {
- // 0. Figure out the OPENEBS_NAMESPACE for CStor
- cstorNS, err := c.GetOpenEBSNamespace(util.CstorCasType)
- // assume CSTOR's OPENEBS_NAMESPACE has all the relevant blockdevices
- c.Ns = cstorNS
- if err != nil {
- return nil, "", fmt.Errorf("unable to determine the cStor namespace error: %v", err)
- }
- // 0.1 Validate user input, check if user hasn't entered less than 64Mi
- cstorMin := resource.MustParse("64Mi")
- if minSize.Cmp(cstorMin) < 0 {
- return nil, "", fmt.Errorf("minimum size of supported block-devices in a cspc is 64Mi")
- }
- // 0.2 Validate user input, check if user has entered >= minimum supported BD-count
- if min := minCount()[poolType]; devs < min {
- return nil, "", fmt.Errorf("%s pool requires a minimum of %d block device per node",
- poolType, min)
- }
- // 1. Validate nodes & poolType, fetch disks
- nodeList, err := c.GetNodes(nodes, "", "")
- if err != nil {
- return nil, "", fmt.Errorf("(server error) unable to fetch node information %s", err)
- }
- if len(nodeList.Items) != len(nodes) {
- return nil, "", fmt.Errorf("not all worker nodes are available for provisioning a cspc")
- }
- // 1.1 Translate nodeNames to node's hostNames to fetch disks
- // while they might seem equivalent, they aren't equal, this quirk is
- // visible clearly for EKS clusters
- var hostnames []string
- for _, node := range nodeList.Items {
- // I hope it is unlikely for a K8s node to have an empty hostname
- hostnames = append(hostnames, node.Labels["kubernetes.io/hostname"])
- }
- // 2. Fetch BD's from the eligible/valid nodes by hostname labels
- bds, err := c.GetBDs(nil, "kubernetes.io/hostname in ("+strings.Join(hostnames, ",")+")")
- if err != nil || len(bds.Items) == 0 {
- return nil, "", fmt.Errorf("no blockdevices found in nodes with %v hostnames", hostnames)
- }
- _, err = filterCStorCompatible(bds, minSize)
- if err != nil {
- return nil, "", fmt.Errorf("(server error) unable to fetch bds from %v nodes", nodes)
- }
- // 3. Choose devices at the valid BDs by hostname
- hostToBD := make(map[string][]v1alpha1.BlockDevice)
- for _, bd := range bds.Items {
- hostToBD[bd.Labels["kubernetes.io/hostname"]] = append(hostToBD[bd.Labels["kubernetes.io/hostname"]], bd)
- }
- // 4. Select disks and create the PoolSpec
- p, err := makePools(poolType, devs, hostToBD, nodes, hostnames, minSize)
- if err != nil {
- return nil, "", err
- }
-
- // 5. Write the cspc object with a dummy name
- cspc := cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{Namespace: cstorNS, GenerateName: "cstor"},
- Spec: cstorv1.CStorPoolClusterSpec{
- Pools: *p,
- },
- }
- // 6. Unmarshall it into a string
- y, err := yaml.Marshal(cspc)
- if err != nil {
- fmt.Printf("err: %v\n", err)
- return nil, "", err
- }
- specStr := string(y)
- // 7. removing status and versionDetails field
- specStr = specStr[:strings.Index(specStr, "status: {}")]
- // 8. Split the string by the newlines/carriage returns and insert the BD's link
- specStr = addBDDetailComments(specStr, bds)
- return &cspc, specStr, nil
-}
-
-// addBDDetailComments adds more information about the blockdevice in a cspc YAML string
-func addBDDetailComments(yaml string, bdList *v1alpha1.BlockDeviceList) string {
- finalYaml := ""
- for _, l := range strings.Split(yaml, "\n") {
- if strings.Contains(l, "- blockDeviceName:") {
- name := strings.Trim(strings.Split(l, ":")[1], " ")
- finalYaml = finalYaml + getBDComment(name, bdList) + "\n"
- }
- finalYaml = finalYaml + l + "\n"
- }
- return finalYaml
-}
-
-// getBDComment returns information about a blockdevice, with fixed whitespace
-// to match the identation level
-func getBDComment(name string, bdList *v1alpha1.BlockDeviceList) string {
- for _, bd := range bdList.Items {
- if bd.Name == name {
- return " # " + bd.Spec.Path + " " + util.ConvertToIBytes(strconv.FormatUint(bd.Spec.Capacity.Storage, 10))
- }
- }
- return ""
-}
-
-// makePools creates a poolSpec based on the poolType, number of devices per
-// pool instance and a collection of blockdevices by nodes
-func makePools(poolType string, nDevices int, bd map[string][]v1alpha1.BlockDevice,
- nodes []string, hosts []string, minsize resource.Quantity) (*[]cstorv1.PoolSpec, error) {
- // IMPORTANT: User is more likely to see the nodeNames, so the errors
- // should preferably be shown in terms of nodeNames and not hostNames
- var spec []cstorv1.PoolSpec
- switch poolType {
- case string(cstorv1.PoolStriped):
- // always single RAID-group with nDevices patched together, cannot disk replace,
- // no redundancy in a pool, redundancy possible across pool instances
-
- // for each eligible set of BDs from each eligible nodes with hostname
- // "host", take nDevices number of BDs
- for i, host := range hosts {
- bds, ok := bd[host]
- if !ok {
- // DOUBT: Do 0 or lesser number of BDs demand a separate error string?
- // I can ask to create a stripe pool with 1 disk and my
- // choice of node might not have eligible BDs
- return nil, fmt.Errorf("no eligible blockdevices found in node %s", nodes[i])
- }
- if len(bds) < nDevices {
- // the node might have lesser number of BDs
- return nil, fmt.Errorf("not enough blockdevices found on node %s, want %d, found %d", nodes[i], nDevices, len(bds))
- }
- var raids []cstorv1.CStorPoolInstanceBlockDevice
- for d := 0; d < nDevices; d++ {
- raids = append(raids, cstorv1.CStorPoolInstanceBlockDevice{BlockDeviceName: bds[d].Name})
- }
- spec = append(spec, cstorv1.PoolSpec{
- NodeSelector: map[string]string{"kubernetes.io/hostname": host},
- DataRaidGroups: []cstorv1.RaidGroup{{CStorPoolInstanceBlockDevices: raids}},
- PoolConfig: cstorv1.PoolConfig{
- DataRaidGroupType: string(cstorv1.PoolStriped),
- },
- })
- }
- return &spec, nil
- case string(cstorv1.PoolMirrored), string(cstorv1.PoolRaidz), string(cstorv1.PoolRaidz2):
- min := minCount()[poolType]
- if nDevices%min != 0 {
- // there must be min number of devices per RaidGroup
- return nil, fmt.Errorf("number of devices must be a multiple of %d", min)
- }
- if min > nDevices {
- return nil, fmt.Errorf("insufficient blockdevices require minimum %d devices for %s", min, poolType)
- }
- // 1. Start filling in the devices in their RAID-groups per the hostnames
- for i, host := range hosts {
- var raidGroups []cstorv1.RaidGroup
- // add all BDs to a CSPCs CSPI spec
- bds := bd[host]
- if len(bds) < nDevices {
- return nil, fmt.Errorf("not enough eligible blockdevices found on node %s, want %d, found %d", nodes[i], nDevices, len(bds))
- }
- // 1. sort the BDs by increasing order
- sort.Slice(bds, func(i, j int) bool {
- // sort by increasing order
- return bds[i].Spec.Capacity.Storage < bds[j].Spec.Capacity.Storage
- })
- // 2. Check if close to the desired capacity of the pool can be achieved by minimising disk wastage
- // 3. Suggest the start and end index for the BDs to be used for the raid group
- maxIndex := len(bds)
- if maxIndex < nDevices {
- return nil, fmt.Errorf("not enough eligible blockdevices found on node %s, want %d, found %d", nodes[i], min, maxIndex)
- }
- devices := Generate(v1alpha1.BlockDeviceList{Items: bds})
- for d := 0; d < nDevices/min; d++ {
- var raids []cstorv1.CStorPoolInstanceBlockDevice
- d, thisRaidGroup, err := devices.Select(minsize, min)
- // re-assign the head node of the linked-list for next iteration
- // pinning the new head to the variable declared above for upcoming usage as required
- devices = d
- if err != nil {
- return nil, err
- }
- for j := 0; j < min; j++ {
- // each RaidGroup has min number of devices
- raids = append(raids, cstorv1.CStorPoolInstanceBlockDevice{BlockDeviceName: thisRaidGroup[j].ObjectMeta.Name})
- }
- raidGroups = append(raidGroups, cstorv1.RaidGroup{CStorPoolInstanceBlockDevices: raids})
- }
- // add the CSPI BD spec inside cspc to a PoolSpec
- spec = append(spec, cstorv1.PoolSpec{
- NodeSelector: map[string]string{"kubernetes.io/hostname": host},
- DataRaidGroups: raidGroups,
- PoolConfig: cstorv1.PoolConfig{
- DataRaidGroupType: poolType,
- },
- })
- }
- return &spec, nil
- default:
- return nil, fmt.Errorf("unknown pool-type %s", poolType)
- }
-}
-
-// minCount states the minimum number of BDs for a pool type in a RAID-group
-// this is an example of an immutable map
-func minCount() map[string]int {
- return map[string]int{
- string(cstorv1.PoolStriped): 1,
- // mirror: data is mirrored across even no of disks
- string(cstorv1.PoolMirrored): 2,
- // raidz: data is spread across even no of disks and one disk is for parity^
- // ^recovery information, metadata, etc
- // can handle one device failing
- string(cstorv1.PoolRaidz): 3,
- // raidz2: data is spread across even no of disks and two disks are for parity
- // can handle two devices failing
- string(cstorv1.PoolRaidz2): 6,
- }
-}
-
-// filterCStorCompatible takes a list of BDs and gives out a list of BDs which can be used to provision a pool
-func filterCStorCompatible(bds *v1alpha1.BlockDeviceList, minLimit resource.Quantity) (*v1alpha1.BlockDeviceList, error) {
- // TODO: Optionally reject sparse-disks depending on configs
- var final []v1alpha1.BlockDevice
- for _, bd := range bds.Items {
- // an eligible blockdevice is in active+unclaimed state and lacks a file-system
- if bd.Status.State == v1alpha1.BlockDeviceActive &&
- bd.Status.ClaimState == v1alpha1.BlockDeviceUnclaimed &&
- bd.Spec.FileSystem.Type == "" &&
- // BD's capacity >=64 MiB
- bd.Spec.Capacity.Storage >= uint64(minLimit.Value()) {
- final = append(final, bd)
- }
- }
- bds.Items = final
- if len(final) == 0 {
- return nil, fmt.Errorf("found no eligble blockdevices of size %s", minLimit.String())
- }
- return bds, nil
-}
diff --git a/pkg/generate/cspc_test.go b/pkg/generate/cspc_test.go
deleted file mode 100644
index cf1561a7..00000000
--- a/pkg/generate/cspc_test.go
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package generate
-
-import (
- "testing"
-
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- cstorfake "github.com/openebs/api/v2/pkg/client/clientset/versioned/fake"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/stretchr/testify/assert"
- "k8s.io/apimachinery/pkg/api/resource"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func TestCSPC(t *testing.T) {
- type args struct {
- c *client.K8sClient
- nodes []string
- devs int
- GB int
- poolType string
- }
-
- tests := []struct {
- name string
- args args
- want *cstorv1.CStorPoolCluster
- str string
- wantErr bool
- }{
- {
- "no cstor installation present",
- args{
- c: &client.K8sClient{Ns: "", K8sCS: fake.NewSimpleClientset(), OpenebsCS: cstorfake.NewSimpleClientset()},
- nodes: []string{"node1"}, devs: 1, poolType: ""}, nil,
- "", true,
- },
- {
- "cstor present, no suggested nodes present",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod), OpenebsCS: cstorfake.NewSimpleClientset()},
- nodes: []string{"node1"}, devs: 1, poolType: ""}, nil,
- "", true,
- },
- {
- "cstor present, suggested nodes present, blockdevices absent",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1), OpenebsCS: cstorfake.NewSimpleClientset()},
- nodes: []string{"node1"}, devs: 1, poolType: ""}, nil,
- "", true,
- },
- {
- "cstor present, suggested nodes present, blockdevices present but incompatible",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1),
- OpenebsCS: cstorfake.NewSimpleClientset(&activeBDwEXT4, &inactiveBDwEXT4)},
- nodes: []string{"node1"}, devs: 1, poolType: ""}, nil,
- "", true,
- },
- {
- "cstor present, suggested nodes present, blockdevices present and compatible",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1),
- OpenebsCS: cstorfake.NewSimpleClientset(&activeUnclaimedUnforattedBD)},
- nodes: []string{"node1"}, devs: 1, poolType: "stripe"}, &cspc1Struct, cspc1, false,
- },
- {
- "all good config, CSTOR_NAMESPACE is correctly identified each time",
- args{
- c: &client.K8sClient{Ns: "randomNamespaceWillGetReplaced", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1),
- OpenebsCS: cstorfake.NewSimpleClientset(&activeUnclaimedUnforattedBD)},
- nodes: []string{"node1"}, devs: 1, poolType: "stripe"}, &cspc1Struct, cspc1, false,
- },
- {
- "all good config, 2 disk stripe pool for 3 nodes",
- args{
- c: &client.K8sClient{Ns: "", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1, &node2, &node3),
- OpenebsCS: cstorfake.NewSimpleClientset(&goodBD1N1, &goodBD1N2, &goodBD1N3, &goodBD2N1, &goodBD2N2, &goodBD2N3)},
- // Stripe pools can have only one RaidGroup per instance, i.e.
- nodes: []string{"node1", "node2", "node3"}, devs: 2, poolType: "stripe"}, &threeNodeTwoDevCSPC, StripeThreeNodeTwoDev, false,
- },
- {
- "good config, no BDs",
- args{
- c: &client.K8sClient{Ns: "randomNamespaceWillGetReplaced", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1),
- OpenebsCS: cstorfake.NewSimpleClientset(&inactiveBDwEXT4)},
- nodes: []string{"node1"}, devs: 5, poolType: "stripe"}, nil, "", true,
- },
- {
- "all good mirror pool gets provisioned, 2 nodes of same size on 3 nodes",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1, &node2, &node3),
- OpenebsCS: cstorfake.NewSimpleClientset(&goodBD1N1, &goodBD2N1, &goodBD1N2, &goodBD2N2, &goodBD1N3, &goodBD2N3)},
- nodes: []string{"node1", "node2", "node3"}, devs: 2, poolType: "mirror"}, &mirrorCSPC, mirrorCSPCstr, false,
- },
- {
- "all good raidz pool gets provisioned, 3 nodes of same size on 2 nodes",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1, &node2),
- OpenebsCS: cstorfake.NewSimpleClientset(&goodBD1N1, &goodBD2N1, &goodBD3N1, &goodBD1N2, &goodBD2N2, &goodBD3N2)},
- nodes: []string{"node1", "node2"}, devs: 3, poolType: "raidz"}, &raidzCSPCThreeBDTwoNode, raidzCSPCstr, false,
- },
- {
- "all good raidz2 pool does not gets provisioned, insufficient BDs 3 nodes of same size on 2 nodes",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1, &node2),
- OpenebsCS: cstorfake.NewSimpleClientset(&goodBD1N1,
- &goodBD2N1, &goodBD3N1, &goodBD4N1, &goodBD1N2,
- &goodBD2N2, &goodBD3N2, &goodBD4N2)},
- nodes: []string{"node1", "node2"}, devs: 3, poolType: "raidz2"}, nil, "", true,
- },
- {
- "raidz2 pool provisioned, 2 nodes, 6 BDs",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1, &node2),
- OpenebsCS: cstorfake.NewSimpleClientset(&goodBD1N1,
- &goodBD2N1, &goodBD3N1, &goodBD4N1, &goodBD5N1,
- &goodBD6N1, &goodBD1N2, &goodBD2N2, &goodBD3N2,
- &goodBD4N2, &goodBD5N2, &goodBD6N2)},
- nodes: []string{"node1", "node2"}, devs: 6, poolType: "raidz2"}, &raidz2CSPCSixBDTwoNode, raidz2CSPCstr, false,
- },
- {
- "raidz2 not provisioned, requires 2 more BDs",
- args{
- c: &client.K8sClient{Ns: "openebs", K8sCS: fake.NewSimpleClientset(&cstorCSIpod, &node1, &node2),
- OpenebsCS: cstorfake.NewSimpleClientset(&goodBD1N1,
- &goodBD2N1, &goodBD3N1, &goodBD4N1, &goodBD1N2,
- &goodBD2N2, &goodBD3N2, &goodBD4N2)},
- nodes: []string{"node1", "node2"}, devs: 4, poolType: "raidz2"}, nil, "", true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- // tt.args.GB,
- got, got1, err := cspc(tt.args.c, tt.args.nodes, tt.args.devs, tt.args.poolType, resource.MustParse("1Gi"))
- if (err != nil) != tt.wantErr {
- t.Errorf("cspc() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- assert.YAMLEq(t, tt.str, got1, "stringified YAML is not the same as expected")
- assert.Exactlyf(t, got, tt.want, "struct is not same")
- })
- }
-}
-
-func Test_isPoolTypeValid(t *testing.T) {
- tests := []struct {
- name string
- poolNames []string
- want bool
- }{
- {name: "valid pools", poolNames: []string{"stripe", "mirror", "raidz", "raidz2"}, want: true},
- {name: "invalid pools", poolNames: []string{"striped", "mirrored", "raid-z", "raid-z2", "lvm", "raidz1", "raidz0"}, want: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- for _, poolType := range tt.poolNames {
- if got := isPoolTypeValid(poolType); got != tt.want {
- t.Errorf("isPoolTypeValid() = %v, want %v", got, tt.want)
- }
- }
- })
- }
-}
-
-func Test_makePools(t *testing.T) {
- type args struct {
- poolType string
- nDevices int
- bd map[string][]v1alpha1.BlockDevice
- nodes []string
- hosts []string
- minSize resource.Quantity
- }
- tests := []struct {
- name string
- args args
- want *[]cstorv1.PoolSpec
- wantErr bool
- }{
- {"stripe, three node, two disks", args{"stripe", 2,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1},
- "node2": {goodBD1N2, goodBD2N2}, "node3": {goodBD1N3, goodBD2N3}},
- []string{"node1", "node2", "node3"}, []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, &threeNodeTwoDevCSPC.Spec.Pools, false},
- {"stripe, three node, two disks, one node lacking disks", args{"stripe", 2,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1},
- "node2": {goodBD1N2, goodBD2N2}},
- []string{"node1", "node2", "node3"}, []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, nil, true},
- {"stripe, three node, two disks, one node lacking required disks", args{"stripe", 2,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1},
- "node2": {goodBD1N2}, "node3": {goodBD1N3, goodBD2N2}}, []string{"node1", "node2", "node3"},
- []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, nil, true},
- {"raidz, three node, three disks but only two disks present in node3", args{"raidz", 3,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1, goodBD3N1},
- "node2": {goodBD1N2, goodBD2N2, goodBD3N2}, "node3": {goodBD1N3, goodBD2N3}},
- []string{"node1", "node2", "node3"}, []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, nil, true},
- {"unknown pool, three node, two disks", args{"randompoolwhichmakesnosense", 2,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1},
- "node2": {goodBD1N2, goodBD2N2}, "node3": {goodBD1N3, goodBD2N3}},
- []string{"node1", "node2", "node3"}, []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, nil, true},
- {"mirror, three node, two disks", args{"mirror", 2,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1},
- "node2": {goodBD1N2, goodBD2N2}, "node3": {goodBD1N3, goodBD2N3}},
- []string{"node1", "node2", "node3"}, []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, &mirrorCSPC.Spec.Pools, false},
- {"mirror, two node, four disks", args{"mirror", 4,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1},
- "node2": {goodBD1N2, goodBD2N2, goodBD3N2, goodBD4N2}, "node3": {goodBD1N3, goodBD2N3}},
- // in the above example, presence of node3 BDs don't matter
- []string{"node1", "node2"}, []string{"node1", "node2"}, resource.MustParse("1Gi")}, &mirrorCSPCFourBDs.Spec.Pools, false},
- {"mirror, three node, one disk", args{"mirror", 1,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1},
- "node2": {goodBD1N2, goodBD2N2}, "node3": {goodBD1N3, goodBD2N3}},
- // one cannot create a mirror pool with just one disk per node
- []string{"node1", "node2", "node3"}, []string{"node1", "node2", "node3"}, resource.MustParse("1Gi")}, nil, true},
- {"raidz, two node, three disk", args{"raidz", 3,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1, goodBD3N1}, "node2": {goodBD1N2, goodBD2N2, goodBD3N2}},
- []string{"node1", "node2"}, []string{"node1", "node2"}, resource.MustParse("1Gi")}, &raidzCSPCThreeBDTwoNode.Spec.Pools, false},
- {"raidz2, two node, three disk", args{"raidz2", 6,
- map[string][]v1alpha1.BlockDevice{"node1": {goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1, goodBD5N1, goodBD6N1}, "node2": {goodBD1N2, goodBD2N2, goodBD3N2, goodBD4N2, goodBD5N2, goodBD6N2}},
- []string{"node1", "node2"}, []string{"node1", "node2"}, resource.MustParse("1Gi")}, &raidz2CSPCSixBDTwoNode.Spec.Pools, false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := makePools(tt.args.poolType, tt.args.nDevices, tt.args.bd, tt.args.nodes, tt.args.hosts, tt.args.minSize)
- if (err != nil) != tt.wantErr {
- t.Errorf("makePools() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- assert.Equal(t, tt.want, got, "pool specs differ for %s", tt.name)
- })
- }
-}
diff --git a/pkg/generate/sort.go b/pkg/generate/sort.go
deleted file mode 100644
index c68819f1..00000000
--- a/pkg/generate/sort.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package generate
-
-import (
- "fmt"
-
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- "k8s.io/apimachinery/pkg/api/resource"
-)
-
-// DeviceList is a LinkedList of BlockDevices
-type DeviceList struct {
- item *v1alpha1.BlockDevice
- next *DeviceList
-}
-
-// New returns a new DeviceList node with a bd device
-func New(bd v1alpha1.BlockDevice) *DeviceList {
- return &DeviceList{&bd, nil}
-}
-
-// Generate returns a new initialized *DeviceList(linked list) with the list of Blockdevices
-func Generate(list v1alpha1.BlockDeviceList) *DeviceList {
- if len(list.Items) == 0 {
- return nil
- }
- var head *DeviceList
- curr := head
- for _, bd := range list.Items {
- if curr == nil {
- head = New(bd)
- curr = head
- } else {
- curr.next = New(bd)
- curr = curr.next
- }
- }
- return head
-}
-
-// Select returns count number of Blockdevices from the DeviceList LinkedList
-func (head *DeviceList) Select(size resource.Quantity, count int) (*DeviceList, []v1alpha1.BlockDevice, error) {
- if count == 1 {
- // there's only one way of selecting one disk such that losses are
- // minimized in a single RaidGroup
- curr := head
- head = head.next
- return head, []v1alpha1.BlockDevice{*curr.item}, nil
- }
- curr := head
- fakeHead := &DeviceList{item: &v1alpha1.BlockDevice{}, next: head}
- prev := fakeHead
- results := []v1alpha1.BlockDevice{}
- // ahead is count nodes ahead of curr
- ahead := head
- for i := 1; i < count; i++ {
- if ahead == nil {
- return head, nil, fmt.Errorf("wanted %d blockdevices, have %d to pick", count, i)
- }
- ahead = ahead.next
- }
- for ahead != nil {
- capFirst := resource.MustParse(fmt.Sprintf("%d", curr.item.Spec.Capacity.Storage))
- capLast := resource.MustParse(fmt.Sprintf("%d", ahead.item.Spec.Capacity.Storage))
- if capFirst.Cmp(capLast) == 0 {
- // add all the devices in the same group
- for curr != ahead {
- results = append(results, *curr.item)
- curr = curr.next
- }
- results = append(results, *curr.item)
- // 1. Remove the set of BDs from the LinkedList
- prev.next = ahead.next
- if len(results) == count {
- break
- }
- }
- prev = curr
- curr = curr.next
- ahead = ahead.next
- }
- head = fakeHead.next
- if len(results) != count {
- return head, nil, fmt.Errorf("wanted %d blockdevices, have %d to pick", count, len(results))
- }
- return head, results, nil
-}
diff --git a/pkg/generate/sort_test.go b/pkg/generate/sort_test.go
deleted file mode 100644
index 8137b90a..00000000
--- a/pkg/generate/sort_test.go
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package generate
-
-import (
- "fmt"
- "reflect"
- "testing"
-
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- "github.com/openebs/api/v2/pkg/apis/types"
- "github.com/stretchr/testify/assert"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func TestGenerate(t *testing.T) {
- type args struct {
- list v1alpha1.BlockDeviceList
- }
- tests := []struct {
- name string
- args args
- want *DeviceList
- }{
- {"empty node LinkedList",
- args{list: v1alpha1.BlockDeviceList{Items: []v1alpha1.BlockDevice{}}}, nil},
- {"single node LinkedList",
- args{list: v1alpha1.BlockDeviceList{Items: []v1alpha1.BlockDevice{goodBD1N1}}},
- &DeviceList{&goodBD1N1, nil},
- },
- {
- "two node LinkedList",
- args{list: v1alpha1.BlockDeviceList{Items: []v1alpha1.BlockDevice{goodBD1N1, goodBD1N2}}},
- &DeviceList{&goodBD1N1, &DeviceList{&goodBD1N2, nil}},
- },
- {
- "four node LinkedList",
- args{list: v1alpha1.BlockDeviceList{Items: []v1alpha1.BlockDevice{goodBD1N1, goodBD1N2, goodBD1N3, goodBD2N1}}},
- &DeviceList{&goodBD1N1, &DeviceList{&goodBD1N2, &DeviceList{&goodBD1N3,
- &DeviceList{&goodBD2N1, nil}}}},
- },
- {
- "five node LinkedList",
- args{list: v1alpha1.BlockDeviceList{Items: []v1alpha1.BlockDevice{goodBD1N1, goodBD1N2, goodBD1N3, goodBD2N1, goodBD3N1}}},
- &DeviceList{&goodBD1N1, &DeviceList{&goodBD1N2, &DeviceList{&goodBD1N3,
- &DeviceList{&goodBD2N1, &DeviceList{&goodBD3N1, nil}}}}},
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- assert.Equalf(t, tt.want, Generate(tt.args.list), "Generate(%v)", tt.args.list)
- })
- }
-}
-
-func TestDeviceList_Select(t *testing.T) {
- type args struct {
- head *DeviceList
- size resource.Quantity
- count int
- }
- tests := []struct {
- name string
- args args
- want []v1alpha1.BlockDevice
- wantErr bool
- }{
- {"one node LinkedList", args{&DeviceList{&goodBD1N1, nil}, resource.MustParse("0Ki"), 1}, []v1alpha1.BlockDevice{goodBD1N1}, false},
- {"single node LinkedList", args{&DeviceList{&goodBD1N1, nil}, resource.MustParse("1Gi"), 1},
- []v1alpha1.BlockDevice{goodBD1N1}, false},
- {"two node LinkedList, one BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1, nil}},
- resource.MustParse("1Gi"), 1}, []v1alpha1.BlockDevice{goodBD1N1}, false},
- {"two node LinkedList, four BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1, nil}},
- resource.MustParse("1Gi"), 4}, nil, true},
- {"two node LinkedList, two BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1, nil}},
- resource.MustParse("1Gi"), 2}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1}, false},
- {"three node LinkedList, one BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1, &DeviceList{&goodBD3N1, nil}}},
- resource.MustParse("1Gi"), 1}, []v1alpha1.BlockDevice{goodBD1N1}, false},
- {"three node LinkedList, two BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1, &DeviceList{&goodBD3N1, nil}}},
- resource.MustParse("1Gi"), 2}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1}, false},
- {"three node LinkedList, three BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1, &DeviceList{&goodBD3N1, nil}}},
- resource.MustParse("1Gi"), 3}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1}, false},
- {"four node LinkedList, four BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1,
- &DeviceList{&goodBD3N1, &DeviceList{&goodBD4N1, nil}}}},
- resource.MustParse("1Gi"), 4}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1}, false},
- {"four node LinkedList, three BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1,
- &DeviceList{&goodBD3N1, &DeviceList{&goodBD4N1, nil}}}},
- resource.MustParse("1Gi"), 3}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1}, false},
- {"five node LinkedList, five BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1,
- &DeviceList{&goodBD3N1, &DeviceList{&goodBD4N1, &DeviceList{&goodBD5N1, nil}}}}},
- resource.MustParse("1Gi"), 5}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1, goodBD5N1}, false},
- {"six node LinkedList, four BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1,
- &DeviceList{&goodBD3N1, &DeviceList{&goodBD4N1, &DeviceList{&goodBD5N1, &DeviceList{&goodBD6N1, nil}}}}}},
- resource.MustParse("1Gi"), 4}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1}, false},
- {"six node LinkedList, five BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1,
- &DeviceList{&goodBD3N1, &DeviceList{&goodBD4N1, &DeviceList{&goodBD5N1, &DeviceList{&goodBD6N1, nil}}}}}},
- resource.MustParse("1Gi"), 5}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1, goodBD5N1}, false},
- {"six node LinkedList, six BD required", args{&DeviceList{&goodBD1N1, &DeviceList{&goodBD2N1,
- &DeviceList{&goodBD3N1, &DeviceList{&goodBD4N1, &DeviceList{&goodBD5N1, &DeviceList{&goodBD6N1, nil}}}}}},
- resource.MustParse("1Gi"), 6}, []v1alpha1.BlockDevice{goodBD1N1, goodBD2N1, goodBD3N1, goodBD4N1, goodBD5N1, goodBD6N1}, false},
- {"six node LinkedList, two BD required of 1G", args{bdLinkedList(6, []int{1, 2, 3, 4, 5, 6}), resource.MustParse("1G"), 2}, nil, true},
- {"six node LinkedList, two BD required of 1G", args{bdLinkedList(6, []int{1, 2, 3, 4, 6, 6}), resource.MustParse("1G"), 2},
- []v1alpha1.BlockDevice{bdGen(5, 6), bdGen(6, 6)}, false},
- {"six node LinkedList, two BD required of 1G", args{bdLinkedList(6, []int{1, 2, 4, 4, 6, 6}), resource.MustParse("1G"), 2},
- []v1alpha1.BlockDevice{bdGen(3, 4), bdGen(4, 4)}, false},
- {"six node LinkedList, three BD required of 1G", args{bdLinkedList(6, []int{1, 4, 4, 4, 6, 6}), resource.MustParse("1G"), 3},
- []v1alpha1.BlockDevice{bdGen(2, 4), bdGen(3, 4), bdGen(4, 4)}, false},
- {"six node LinkedList, three BD required of 1G", args{bdLinkedList(6, []int{5, 10, 15, 20, 25, 30}), resource.MustParse("1G"), 3},
- nil, true},
- {"six node LinkedList, two BD required of 1G", args{bdLinkedList(6, []int{1, 1, 10, 20, 25, 30}), resource.MustParse("1G"), 2},
- []v1alpha1.BlockDevice{bdGen(1, 1), bdGen(2, 1)}, false},
- {"six node LinkedList, two BD required of 6G", args{bdLinkedList(6, []int{5, 10, 10, 20, 25, 30}), resource.MustParse("6G"), 2},
- []v1alpha1.BlockDevice{bdGen(2, 10), bdGen(3, 10)}, false},
- {"six node LinkedList with unsorted BD sizes, two BD required of 1G", args{bdLinkedList(6, []int{25, 30, 6, 10, 20, 6}), resource.MustParse("1G"), 2},
- nil, true},
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- newHead, got, err := tt.args.head.Select(tt.args.size, tt.args.count)
- if (err != nil) != tt.wantErr {
- t.Fatalf("Select() error = %v, wantErr %v", err, tt.wantErr)
- }
- _ = newHead
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("Select(...), got %v, want %v", len(got), len(tt.want))
- }
- })
- }
-}
-
-func bdGen(bdSuffix int, GBsize int) v1alpha1.BlockDevice {
- parse := resource.MustParse(fmt.Sprintf("%d", GBsize) + "G")
- return v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{
- Kind: "BlockDevice",
- APIVersion: "openebs.io/v1alpha1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("bd-%d", bdSuffix),
- Namespace: "openebs",
- Labels: map[string]string{types.HostNameLabelKey: "node-X"},
- },
- Spec: v1alpha1.DeviceSpec{
- Capacity: v1alpha1.DeviceCapacity{Storage: uint64(parse.Value())},
- NodeAttributes: v1alpha1.NodeAttribute{NodeName: "node-X"},
- },
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive},
- }
-}
-
-func bdLinkedList(limit int, size []int) *DeviceList {
- if len(size) != limit {
- return nil
- }
- var head *DeviceList
- for i := limit - 1; i >= 0; i-- {
- tmp := New(bdGen(i+1, size[i]))
- tmp.next = head
- head = tmp
- }
- return head
-}
-
-func BenchmarkSelect(b *testing.B) {
- type args struct {
- head *DeviceList
- size resource.Quantity
- count int
- }
- benchmarks := []struct {
- name string
- args args
- want []v1alpha1.BlockDevice
- }{
- {"six node LinkedList, two BD required of 6G", args{bdLinkedList(6, []int{5, 10, 10, 20, 25, 30}), resource.MustParse("6G"), 2},
- []v1alpha1.BlockDevice{bdGen(2, 10), bdGen(3, 10)}},
- {"six node LinkedList with unsorted BD sizes, two BD required of 1G", args{bdLinkedList(6, []int{25, 30, 6, 10, 20, 6}), resource.MustParse("1G"), 2},
- nil},
- }
- for _, bm := range benchmarks {
- b.Run(bm.name, func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- _, _, _ = bm.args.head.Select(bm.args.size, bm.args.count)
- }
- })
- }
-}
diff --git a/pkg/generate/testdata_test.go b/pkg/generate/testdata_test.go
deleted file mode 100644
index c70a1e95..00000000
--- a/pkg/generate/testdata_test.go
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package generate
-
-import (
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var cstorCSIpod = corev1.Pod{
- TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "fake-cstor-CSI", Namespace: "openebs",
- Labels: map[string]string{"openebs.io/version": "1.9.0", "openebs.io/component-name": "openebs-cstor-csi-controller"}},
- Status: corev1.PodStatus{Phase: corev1.PodRunning},
-}
-
-var node1 = corev1.Node{
- TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Status: corev1.NodeStatus{Phase: corev1.NodeRunning}}
-
-var node2 = corev1.Node{
- TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Status: corev1.NodeStatus{Phase: corev1.NodeRunning}}
-
-var node3 = corev1.Node{
- TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}},
- Status: corev1.NodeStatus{Phase: corev1.NodeRunning}}
-
-var activeBDwEXT4 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "ext4", Mountpoint: "/dev/sda"}},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var inactiveBDwEXT4 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd1-inactive", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "ext4", Mountpoint: "/dev/sda"}, Capacity: v1alpha1.DeviceCapacity{Storage: 6711000}},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceInactive}}
-
-var activeUnclaimedUnforattedBD = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/dev/sda"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD1N1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd1-n1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd1n1"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD2N1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd2-n1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd2n1"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD3N1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd3-n1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd3n1"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD4N1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd4-n1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd4n1"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD5N1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd5-n1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd5n1"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD6N1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd6-n1", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node1"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd6n1"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-var goodBD1N2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd1-n2", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/dev/sda"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD2N2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd2-n2", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/dev/sda"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD3N2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd3-n2", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd3n2"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD4N2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd4-n2", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd4n2"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD5N2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd5-n2", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd5n2"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD6N2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd6-n2", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node2"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/mnt/bd6n2"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD1N3 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd1-n3", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node3"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/dev/sdc"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var goodBD2N3 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "Blockdevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd2-n3", Namespace: "openebs",
- Labels: map[string]string{"kubernetes.io/hostname": "node3"}},
- Spec: v1alpha1.DeviceSpec{FileSystem: v1alpha1.FileSystemInfo{Type: "", Mountpoint: "/dev/sdc"}, Capacity: v1alpha1.DeviceCapacity{Storage: 1074000000},
- Path: "/dev/sda"},
- Status: v1alpha1.DeviceStatus{ClaimState: v1alpha1.BlockDeviceUnclaimed, State: v1alpha1.BlockDeviceActive}}
-
-var mirrorCSPC = cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{GenerateName: "cstor", Namespace: "openebs"},
- Spec: cstorv1.CStorPoolClusterSpec{Pools: []cstorv1.PoolSpec{
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- DataRaidGroups: []cstorv1.RaidGroup{{CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{
- BlockDeviceName: "bd1-n1"}, {BlockDeviceName: "bd2-n1"}}}}, PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolMirrored)}},
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"},
- DataRaidGroups: []cstorv1.RaidGroup{{CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{
- BlockDeviceName: "bd1-n2"}, {BlockDeviceName: "bd2-n2"}}}}, PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolMirrored)}},
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node3"},
- DataRaidGroups: []cstorv1.RaidGroup{{CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{
- BlockDeviceName: "bd1-n3"}, {BlockDeviceName: "bd2-n3"}}}}, PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolMirrored)}}}}}
-
-var mirrorCSPCFourBDs = cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{GenerateName: "cstor", Namespace: "openebs"},
- Spec: cstorv1.CStorPoolClusterSpec{Pools: []cstorv1.PoolSpec{
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- DataRaidGroups: []cstorv1.RaidGroup{{CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n1"}, {BlockDeviceName: "bd2-n1"}}},
- {CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd3-n1"}, {BlockDeviceName: "bd4-n1"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolMirrored)}},
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"},
- DataRaidGroups: []cstorv1.RaidGroup{{CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n2"}, {BlockDeviceName: "bd2-n2"}}},
- {CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd3-n2"}, {BlockDeviceName: "bd4-n2"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolMirrored)}}}}}
-
-var mirrorCSPCstr = `apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
-spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n1
- nodeSelector:
- kubernetes.io/hostname: node1
- poolConfig:
- dataRaidGroupType: mirror
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n2
- nodeSelector:
- kubernetes.io/hostname: node2
- poolConfig:
- dataRaidGroupType: mirror
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n3
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n3
- nodeSelector:
- kubernetes.io/hostname: node3
- poolConfig:
- dataRaidGroupType: mirror
-
-`
-var raidzCSPCThreeBDTwoNode = cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{GenerateName: "cstor", Namespace: "openebs"},
- Spec: cstorv1.CStorPoolClusterSpec{Pools: []cstorv1.PoolSpec{
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- DataRaidGroups: []cstorv1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n1"}, {BlockDeviceName: "bd2-n1"}, {BlockDeviceName: "bd3-n1"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolRaidz)}},
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"},
- DataRaidGroups: []cstorv1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n2"}, {BlockDeviceName: "bd2-n2"}, {BlockDeviceName: "bd3-n2"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolRaidz)}}}}}
-
-var raidzCSPCstr = `apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
-spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd3-n1
- nodeSelector:
- kubernetes.io/hostname: node1
- poolConfig:
- dataRaidGroupType: raidz
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd3-n2
- nodeSelector:
- kubernetes.io/hostname: node2
- poolConfig:
- dataRaidGroupType: raidz
-
-`
-var raidz2CSPCSixBDTwoNode = cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{GenerateName: "cstor", Namespace: "openebs"},
- Spec: cstorv1.CStorPoolClusterSpec{Pools: []cstorv1.PoolSpec{
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- DataRaidGroups: []cstorv1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n1"},
- {BlockDeviceName: "bd2-n1"}, {BlockDeviceName: "bd3-n1"}, {BlockDeviceName: "bd4-n1"}, {BlockDeviceName: "bd5-n1"}, {BlockDeviceName: "bd6-n1"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolRaidz2)}},
- {NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"},
- DataRaidGroups: []cstorv1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n2"},
- {BlockDeviceName: "bd2-n2"}, {BlockDeviceName: "bd3-n2"}, {BlockDeviceName: "bd4-n2"}, {BlockDeviceName: "bd5-n2"}, {BlockDeviceName: "bd6-n2"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolRaidz2)}}}}}
-
-var raidz2CSPCstr = `apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
-spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd3-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd4-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd5-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd6-n1
- nodeSelector:
- kubernetes.io/hostname: node1
- poolConfig:
- dataRaidGroupType: raidz2
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd3-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd4-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd5-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd6-n2
- nodeSelector:
- kubernetes.io/hostname: node2
- poolConfig:
- dataRaidGroupType: raidz2
-
-`
-
-var cspc1 = `apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
-spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1
- nodeSelector:
- kubernetes.io/hostname: node1
- poolConfig:
- dataRaidGroupType: stripe
-
-`
-var StripeThreeNodeTwoDev = `apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- creationTimestamp: null
- generateName: cstor
- namespace: openebs
-spec:
- pools:
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n1
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n1
- nodeSelector:
- kubernetes.io/hostname: node1
- poolConfig:
- dataRaidGroupType: stripe
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n2
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n2
- nodeSelector:
- kubernetes.io/hostname: node2
- poolConfig:
- dataRaidGroupType: stripe
- - dataRaidGroups:
- - blockDevices:
- # /dev/sda 1.0GiB
- - blockDeviceName: bd1-n3
- # /dev/sda 1.0GiB
- - blockDeviceName: bd2-n3
- nodeSelector:
- kubernetes.io/hostname: node3
- poolConfig:
- dataRaidGroupType: stripe
-
-`
-var threeNodeTwoDevCSPC = cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{GenerateName: "cstor", Namespace: "openebs"},
- Spec: cstorv1.CStorPoolClusterSpec{Pools: []cstorv1.PoolSpec{{
- NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- DataRaidGroups: []cstorv1.RaidGroup{{
- CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n1"}, {BlockDeviceName: "bd2-n1"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolStriped)}},
- {
- NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"},
- DataRaidGroups: []cstorv1.RaidGroup{{
- CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n2"}, {BlockDeviceName: "bd2-n2"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolStriped)}},
- {
- NodeSelector: map[string]string{"kubernetes.io/hostname": "node3"},
- DataRaidGroups: []cstorv1.RaidGroup{{
- CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1-n3"}, {BlockDeviceName: "bd2-n3"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolStriped)}}}},
-}
-
-var cspc1Struct = cstorv1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolCluster", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{GenerateName: "cstor", Namespace: "openebs"},
- Spec: cstorv1.CStorPoolClusterSpec{Pools: []cstorv1.PoolSpec{{
- NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- DataRaidGroups: []cstorv1.RaidGroup{{
- CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd1"}}}},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: string(cstorv1.PoolStriped)}}}},
-}
diff --git a/pkg/persistentvolumeclaim/cstor.go b/pkg/persistentvolumeclaim/cstor.go
deleted file mode 100644
index 0e92b413..00000000
--- a/pkg/persistentvolumeclaim/cstor.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "fmt"
- "time"
-
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/cli-runtime/pkg/printers"
-)
-
-const (
- cstorPvcInfoTemplate = `
-{{.Name}} Details :
-------------------
-NAME : {{.Name}}
-NAMESPACE : {{.Namespace}}
-CAS TYPE : {{.CasType}}
-BOUND VOLUME : {{.BoundVolume}}
-ATTACHED TO NODE : {{.AttachedToNode}}
-POOL : {{.Pool}}
-STORAGE CLASS : {{.StorageClassName}}
-SIZE : {{.Size}}
-USED : {{.Used}}
-CV STATUS : {{.CVStatus}}
-PV STATUS : {{.PVStatus}}
-MOUNTED BY : {{.MountPods}}
-`
-
- detailsFromCVC = `
-Additional Details from CVC :
------------------------------
-NAME : {{ .metadata.name }}
-REPLICA COUNT : {{ .spec.provision.replicaCount }}
-POOL INFO : {{ .status.poolInfo}}
-VERSION : {{ .versionDetails.status.current}}
-UPGRADING : {{if eq .versionDetails.status.current .versionDetails.desired}}false{{else}}true{{end}}
-`
-)
-
-// DescribeCstorVolumeClaim describes a cstor storage engine PersistentVolumeClaim
-func DescribeCstorVolumeClaim(c *client.K8sClient, pvc *corev1.PersistentVolumeClaim, pv *corev1.PersistentVolume, mountPods string) error {
- // Create Empty template objects and fill gradually when underlying sub CRs are identified.
- pvcInfo := util.CstorPVCInfo{}
-
- pvcInfo.Name = pvc.Name
- pvcInfo.Namespace = pvc.Namespace
- pvcInfo.BoundVolume = pvc.Spec.VolumeName
- pvcInfo.CasType = util.CstorCasType
- pvcInfo.StorageClassName = *pvc.Spec.StorageClassName
- pvcInfo.MountPods = mountPods
-
- if pv != nil {
- pvcInfo.PVStatus = pv.Status.Phase
- }
-
- // fetching the underlying CStorVolume for the PV, to get the phase and size and notify the user
- // if the CStorVolume is not found.
- cv, err := c.GetCV(pvc.Spec.VolumeName)
- if err != nil {
- fmt.Println("Underlying CstorVolume is not found for: ", pvc.Name)
- } else {
- pvcInfo.Size = util.ConvertToIBytes(cv.Spec.Capacity.String())
- pvcInfo.CVStatus = cv.Status.Phase
- }
-
- // fetching the underlying CStorVolumeConfig for the PV, to get the cvc info and Pool Name and notify the user
- // if the CStorVolumeConfig is not found.
- cvc, err := c.GetCVC(pvc.Spec.VolumeName)
- if err != nil {
- fmt.Println("Underlying CstorVolumeConfig is not found for: ", pvc.Name)
- } else {
- pvcInfo.Pool = cvc.Labels[cstortypes.CStorPoolClusterLabelKey]
- }
-
- // fetching the underlying CStorVolumeAttachment for the PV, to get the attached to node and notify the user
- // if the CStorVolumeAttachment is not found.
- cva, err := c.GetCVA(util.CVAVolnameKey + "=" + pvc.Spec.VolumeName)
- if err != nil {
- pvcInfo.AttachedToNode = util.NotAvailable
- fmt.Println("Underlying CstorVolumeAttachment is not found for: ", pvc.Name)
- } else {
- pvcInfo.AttachedToNode = cva.Spec.Volume.OwnerNodeID
- }
-
- // fetching the underlying CStorVolumeReplicas for the PV, to list their details and notify the user
- // none of the replicas are running if the CStorVolumeReplicas are not found.
- cvrs, err := c.GetCVRs(cstortypes.PersistentVolumeLabelKey + "=" + pvc.Spec.VolumeName)
- if err == nil && len(cvrs.Items) > 0 {
- pvcInfo.Used = util.ConvertToIBytes(util.GetUsedCapacityFromCVR(cvrs))
- }
-
- // Printing the Filled Details of the Cstor PVC
- _ = util.PrintByTemplate("pvc", cstorPvcInfoTemplate, pvcInfo)
-
- // fetching the underlying TargetPod for the PV, to display its relevant details and notify the user
- // if the TargetPod is not found.
- tgtPod, err := c.GetCVTargetPod(pvc.Name, pvc.Spec.VolumeName)
- if err == nil {
- fmt.Printf("\nTarget Details :\n----------------\n")
- var rows []metav1.TableRow
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- tgtPod.Namespace, tgtPod.Name,
- util.GetReadyContainers(tgtPod.Status.ContainerStatuses),
- tgtPod.Status.Phase, util.Duration(time.Since(tgtPod.ObjectMeta.CreationTimestamp.Time)),
- tgtPod.Status.PodIP, tgtPod.Spec.NodeName}})
- util.TablePrinter(util.PodDetailsColumnDefinations, rows, printers.PrintOptions{Wide: true})
- } else {
- fmt.Printf("\nTarget Details :\n----------------\nNo target pod exists for the CstorVolume\n")
- }
-
- // If CVRs are found list them and show relevant details else notify the user none of the replicas are
- // running if not found
- if cvrs != nil && len(cvrs.Items) > 0 {
- fmt.Printf("\nReplica Details :\n-----------------\n")
- var rows []metav1.TableRow
- for _, cvr := range cvrs.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{cvr.Name,
- util.ConvertToIBytes(cvr.Status.Capacity.Total),
- util.ConvertToIBytes(cvr.Status.Capacity.Used),
- cvr.Status.Phase,
- util.Duration(time.Since(cvr.ObjectMeta.CreationTimestamp.Time))}})
- }
- util.TablePrinter(util.CstorReplicaColumnDefinations, rows, printers.PrintOptions{Wide: true})
- } else {
- fmt.Printf("\nReplica Details :\n-----------------\nNo running replicas found\n")
- }
-
- if cvc != nil {
- util.TemplatePrinter(detailsFromCVC, cvc)
- }
-
- return nil
-}
diff --git a/pkg/persistentvolumeclaim/cstor_test.go b/pkg/persistentvolumeclaim/cstor_test.go
deleted file mode 100644
index f3d0d61f..00000000
--- a/pkg/persistentvolumeclaim/cstor_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "testing"
-
- openebsFakeClientset "github.com/openebs/api/v2/pkg/client/clientset/versioned/fake"
- "github.com/openebs/openebsctl/pkg/client"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func TestDescribeCstorVolumeClaim(t *testing.T) {
- type args struct {
- c *client.K8sClient
- pvc *corev1.PersistentVolumeClaim
- pv *corev1.PersistentVolume
- mountPods string
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- name: "All Valid Values",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor, &cstorTargetPod),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pv: &cstorPV1,
- pvc: &cstorPVC1,
- mountPods: "",
- },
- wantErr: false,
- },
- {
- name: "PV missing",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pv: nil,
- pvc: &cstorPVC1,
- mountPods: "",
- },
- wantErr: false,
- },
- {
- name: "CV missing",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv2, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pv: &cstorPV1,
- pvc: &cstorPVC1,
- mountPods: "",
- },
- wantErr: false,
- },
- {
- name: "CVC missing",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pv: &cstorPV1,
- pvc: &cstorPVC1,
- mountPods: "",
- },
- wantErr: false,
- },
- {
- name: "CVA missing",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pv: &cstorPV1,
- pvc: &cstorPVC1,
- mountPods: "",
- },
- wantErr: false,
- },
- {
- name: "CVRs missing",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva2, &cvc1, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pv: &cstorPV1,
- pvc: &cstorPVC1,
- mountPods: "",
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := DescribeCstorVolumeClaim(tt.args.c, tt.args.pvc, tt.args.pv, tt.args.mountPods); (err != nil) != tt.wantErr {
- t.Errorf("DescribeCstorVolumeClaim() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
diff --git a/pkg/persistentvolumeclaim/debug.go b/pkg/persistentvolumeclaim/debug.go
deleted file mode 100644
index d508ec74..00000000
--- a/pkg/persistentvolumeclaim/debug.go
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "fmt"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/pkg/errors"
- corev1 "k8s.io/api/core/v1"
-)
-
-// Debug manages various implementations of PersistentVolumeClaim Describing
-func Debug(pvcs []string, namespace string, openebsNs string) error {
- if len(pvcs) == 0 || pvcs == nil {
- return errors.New("please provide atleast one pvc name to describe")
- }
- // Clienset creation
- k := client.NewK8sClient(openebsNs)
-
- // 1. Get a list of required PersistentVolumeClaims
- var pvcList *corev1.PersistentVolumeClaimList
- pvcList, err := k.GetPVCs(namespace, pvcs, "")
- if len(pvcList.Items) == 0 || err != nil {
- return errors.New("no pvcs found corresponding to the names")
- }
- // 2. Get the namespaces
- nsMap, _ := k.GetOpenEBSNamespaceMap()
- // 3. Range over the list of PVCs
- for _, pvc := range pvcList.Items {
- // 4. Fetch the storage class, used to get the cas-type
- sc, _ := k.GetSC(*pvc.Spec.StorageClassName)
- pv, _ := k.GetPV(pvc.Spec.VolumeName)
- // 5. Get cas type
- casType := util.GetCasType(pv, sc)
- // 6. Assign a namespace corresponding to the engine
- if openebsNs == "" {
- if val, ok := nsMap[casType]; ok {
- k.Ns = val
- }
- }
- // 7. Debug the volume based on its casType
- if desc, ok := CasDebugMap()[casType]; ok {
- err = desc(k, &pvc, pv)
- if err != nil {
- continue
- }
- } else {
- fmt.Printf("Debugging is currently not supported for %s Cas Type PVCs\n", casType)
- }
- }
- return nil
-}
-
-// CasDebugMap returns a map cas-types to functions for persistentvolumeclaim debugging
-func CasDebugMap() map[string]func(*client.K8sClient, *corev1.PersistentVolumeClaim, *corev1.PersistentVolume) error {
- // a good hack to implement immutable maps in Golang & also write tests for it
- return map[string]func(*client.K8sClient, *corev1.PersistentVolumeClaim, *corev1.PersistentVolume) error{
- util.CstorCasType: DebugCstorVolumeClaim,
- }
-}
diff --git a/pkg/persistentvolumeclaim/debug_cstor.go b/pkg/persistentvolumeclaim/debug_cstor.go
deleted file mode 100644
index 043a67ed..00000000
--- a/pkg/persistentvolumeclaim/debug_cstor.go
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "errors"
- "fmt"
-
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/cli-runtime/pkg/printers"
-)
-
-// DebugCstorVolumeClaim is used to debug a cstor volume by calling various modules
-func DebugCstorVolumeClaim(k *client.K8sClient, pvc *corev1.PersistentVolumeClaim, pv *corev1.PersistentVolume) error {
- // 1. Main Struture Creation which contains all cstor CRs, this structure will be passed across all modules.
- var cstorResources util.CstorVolumeResources
- cstorResources.PVC = pvc
- cstorResources.PV = pv
- // 2. Fill in the available CRs
- if pv != nil {
- cv, _ := k.GetCV(pv.Name)
- cstorResources.CV = cv
- cvc, _ := k.GetCVC(pv.Name)
- cstorResources.CVC = cvc
- cva, _ := k.GetCVA(util.CVAVolnameKey + "=" + pv.Name)
- cstorResources.CVA = cva
- cvrs, _ := k.GetCVRs(cstortypes.PersistentVolumeLabelKey + "=" + pv.Name)
- cstorResources.CVRs = cvrs
- }
- sc, _ := k.GetSC(*pvc.Spec.StorageClassName)
- // 3. Fill in the Pool and Blockdevice Details
- if sc != nil {
- cspc, _ := k.GetCSPC(sc.Parameters["cstorPoolCluster"])
- cstorResources.CSPC = cspc
- if cspc != nil {
- cspis, _ := k.GetCSPIs(nil, "openebs.io/cas-type=cstor,openebs.io/cstor-pool-cluster="+cspc.Name)
- cstorResources.CSPIs = cspis
- expectedBlockDevicesInPool := make(map[string]bool)
- // This map contains the list of BDs we specified at the time of Pool Creation
- for _, pool := range cspc.Spec.Pools {
- dataRaidGroups := pool.DataRaidGroups
- for _, dataRaidGroup := range dataRaidGroups {
- for _, bdName := range dataRaidGroup.GetBlockDevices() {
- expectedBlockDevicesInPool[bdName] = false
- }
- }
- }
- // This list contains the list of BDs which are actually present in the system.
- var presentBlockDevicesInPool []string
- for _, pool := range cspis.Items {
- raidGroupsInPool := pool.GetAllRaidGroups()
- for _, item := range raidGroupsInPool {
- presentBlockDevicesInPool = append(presentBlockDevicesInPool, item.GetBlockDevices()...)
- }
- }
-
- // Mark the present BDs are true.
- cstorResources.PresentBDs, _ = k.GetBDs(presentBlockDevicesInPool, "")
- for _, item := range cstorResources.PresentBDs.Items {
- if _, ok := expectedBlockDevicesInPool[item.Name]; ok {
- expectedBlockDevicesInPool[item.Name] = true
- }
- }
- cstorResources.ExpectedBDs = expectedBlockDevicesInPool
- cstorResources.BDCs, _ = k.GetBDCs(nil, "openebs.io/cstor-pool-cluster="+cspc.Name)
-
- }
- }
- // 4. Call the resource showing module
- _ = resourceStatus(cstorResources)
- _ = displayPVCEvents(*k, cstorResources)
- _ = displayCVCEvents(*k, cstorResources)
- _ = displayCVREvents(*k, cstorResources)
- _ = displayCSPIEvents(*k, cstorResources)
- _ = displayCSPCEvents(*k, cstorResources)
- _ = displayBDCEvents(*k, cstorResources)
- return nil
-}
-
-func resourceStatus(crs util.CstorVolumeResources) error {
- // 1. Fetch the total and usage details and humanize them
- var totalCapacity, usedCapacity, availableCapacity string
- totalCapacity = util.ConvertToIBytes(crs.PVC.Spec.Resources.Requests.Storage().String())
- if crs.CVRs != nil {
- usedCapacity = util.ConvertToIBytes(util.GetUsedCapacityFromCVR(crs.CVRs))
- }
- // 2. Calculate the available capacity and usage percentage is used capacity is available
- if usedCapacity != "" {
- availableCapacity = util.GetAvailableCapacity(totalCapacity, usedCapacity)
- percentage := util.GetUsedPercentage(totalCapacity, usedCapacity)
- if percentage >= 80.00 {
- availableCapacity = util.ColorText(availableCapacity, util.Red)
- } else {
- availableCapacity = util.ColorText(availableCapacity, util.Green)
- }
- }
- // 3. Display the usage status
- fmt.Println("Volume Usage Stats:")
- fmt.Println("-------------------")
-
- util.TablePrinter(util.VolumeTotalAndUsageDetailColumnDefinitions, []metav1.TableRow{{Cells: []interface{}{totalCapacity, usedCapacity, availableCapacity}}}, printers.PrintOptions{})
-
- fmt.Println()
- fmt.Println("Related CR Statuses:")
- fmt.Println("--------------------")
-
- var crStatusRows []metav1.TableRow
- if crs.PV != nil {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"PersistentVolume", crs.PV.Name, util.ColorStringOnStatus(string(crs.PV.Status.Phase))}})
- } else {
- crStatusRows = append(
- crStatusRows,
- metav1.TableRow{Cells: []interface{}{"PersistentVolume", "", util.ColorStringOnStatus(util.NotFound)}},
- )
- }
- if crs.CV != nil {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"CstorVolume", crs.CV.Name, util.ColorStringOnStatus(string(crs.CV.Status.Phase))}})
- } else {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"CstorVolume", "", util.ColorStringOnStatus(util.NotFound)}})
- }
- if crs.CVC != nil {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"CstorVolumeConfig", crs.CVC.Name, util.ColorStringOnStatus(string(crs.CVC.Status.Phase))}})
- } else {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"CstorVolumeConfig", "", util.ColorStringOnStatus(util.NotFound)}})
- }
- if crs.CVA != nil {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"CstorVolumeAttachment", crs.CVA.Name, util.ColorStringOnStatus(util.Attached)}})
- } else {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"CstorVolumeAttachment", "", util.ColorStringOnStatus(util.CVANotAttached)}})
- }
- // 4. Display the CRs statuses
- util.TablePrinter(util.CstorVolumeCRStatusColumnDefinitions, crStatusRows, printers.PrintOptions{})
-
- fmt.Println()
- fmt.Println("Replica Statuses:")
- fmt.Println("-----------------")
- crStatusRows = []metav1.TableRow{}
- if crs.CVRs != nil {
- for _, item := range crs.CVRs.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{item.Kind, item.Name, util.ColorStringOnStatus(string(item.Status.Phase))}})
- }
- }
- // 5. Display the CRs statuses
- util.TablePrinter(util.CstorVolumeCRStatusColumnDefinitions, crStatusRows, printers.PrintOptions{})
-
- fmt.Println()
- fmt.Println("BlockDevice and BlockDeviceClaim Statuses:")
- fmt.Println("------------------------------------------")
- crStatusRows = []metav1.TableRow{}
- if crs.PresentBDs != nil {
- for _, item := range crs.PresentBDs.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{item.Kind, item.Name, util.ColorStringOnStatus(string(item.Status.State))}})
- }
- for key, val := range crs.ExpectedBDs {
- if !val {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"BlockDevice", key, util.ColorStringOnStatus(util.NotFound)}})
- }
- }
- }
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{"", "", ""}})
- if crs.BDCs != nil {
- for _, item := range crs.BDCs.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{item.Kind, item.Name, util.ColorStringOnStatus(string(item.Status.Phase))}})
- }
- }
- // 6. Display the BDs and BDCs statuses
- util.TablePrinter(util.CstorVolumeCRStatusColumnDefinitions, crStatusRows, printers.PrintOptions{})
-
- fmt.Println()
- fmt.Println("Pool Instance Statuses:")
- fmt.Println("-----------------------")
- crStatusRows = []metav1.TableRow{}
- if crs.CSPIs != nil {
- for _, item := range crs.CSPIs.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{item.Kind, item.Name, util.ColorStringOnStatus(string(item.Status.Phase))}})
- }
- }
- // 7. Display the Pool statuses
- util.TablePrinter(util.CstorVolumeCRStatusColumnDefinitions, crStatusRows, printers.PrintOptions{})
-
- return nil
-}
-
-func displayPVCEvents(k client.K8sClient, crs util.CstorVolumeResources) error {
- // 1. Set the namespace of the resource to the client
- k.Ns = crs.PVC.Namespace
- // 2. Fetch the events of the concerned PVC.
- // The PVCs donot have the Kind filled, thus we have hardcoded here.
- events, err := k.GetEvents(fmt.Sprintf("involvedObject.name=%s,involvedObject.kind=PersistentVolumeClaim", crs.PVC.Name))
- // 3. Display the events
- fmt.Println()
- if err == nil && len(events.Items) != 0 {
- fmt.Println("Checking PVC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCross, len(events.Items)), util.Red), "-------->")
- var crStatusRows []metav1.TableRow
- for _, event := range events.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{event.InvolvedObject.Name, event.Action, event.Reason, event.Message, util.ColorStringOnStatus(event.Type)}})
- }
- util.TablePrinter(util.EventsColumnDefinitions, crStatusRows, printers.PrintOptions{})
- return nil
- } else if err == nil && len(events.Items) == 0 {
- fmt.Println("Checking PVC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCheck, len(events.Items)), util.Green), "-------->")
- return nil
- } else {
- return err
- }
-}
-
-func displayBDCEvents(k client.K8sClient, crs util.CstorVolumeResources) error {
- if crs.BDCs != nil && len(crs.BDCs.Items) != 0 {
- // 1. Set the namespace of the resource to the client
- k.Ns = crs.BDCs.Items[0].Namespace
- // 2. Fetch the events of the concerned BDC
- fmt.Println()
- var crStatusRows []metav1.TableRow
- for _, BDC := range crs.BDCs.Items {
- events, err := k.GetEvents(fmt.Sprintf("involvedObject.name=%s,involvedObject.kind=BlockDeviceClaim", BDC.Name))
- // 3. Display the events
- if err == nil && len(events.Items) != 0 {
- for _, event := range events.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{event.InvolvedObject.Name, event.Action, event.Reason, event.Message, util.ColorStringOnStatus(event.Type)}})
- }
- }
- }
- if len(crStatusRows) == 0 {
- fmt.Println("Checking BDC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCheck, len(crStatusRows)), util.Green), "-------->")
- return nil
- } else {
- fmt.Println("Checking BDC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCross, len(crStatusRows)), util.Red), "-------->")
- util.TablePrinter(util.EventsColumnDefinitions, crStatusRows, printers.PrintOptions{})
- return nil
- }
- }
- return errors.New("no BDC present to display events")
-}
-
-func displayCVCEvents(k client.K8sClient, crs util.CstorVolumeResources) error {
- if crs.CVC != nil {
- // 1. Set the namespace of the resource to the client
- k.Ns = crs.CVC.Namespace
- // 2. Fetch the events of the concerned CVC
- events, err := k.GetEvents(fmt.Sprintf("involvedObject.name=%s,involvedObject.kind=CStorVolumeConfig", crs.CVC.Name))
- // 3. Display the events
- fmt.Println()
- if err == nil && len(events.Items) != 0 {
- fmt.Println("Checking CVC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCross, len(events.Items)), util.Red), "-------->")
- var crStatusRows []metav1.TableRow
- for _, event := range events.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{event.InvolvedObject.Name, event.Action, event.Reason, event.Message, util.ColorStringOnStatus(event.Type)}})
- }
- defer util.TablePrinter(util.EventsColumnDefinitions, crStatusRows, printers.PrintOptions{})
- return nil
- } else if err == nil && len(events.Items) == 0 {
- fmt.Println("Checking CVC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCheck, len(events.Items)), util.Green), "-------->")
- return nil
- } else {
- return err
- }
- } else {
- return errors.New("no CVC present to display events")
- }
-}
-
-func displayCSPCEvents(k client.K8sClient, crs util.CstorVolumeResources) error {
- if crs.CSPC != nil {
- // 1. Set the namespace of the resource to the client
- k.Ns = crs.CSPC.Namespace
- // 2. Fetch the events of the concerned PVC.
- // The PVCs donot have the Kind filled, thus we have hardcoded here.
- events, err := k.GetEvents(fmt.Sprintf("involvedObject.name=%s,involvedObject.kind=CStorPoolCluster", crs.CSPC.Name))
- // 3. Display the events
- fmt.Println()
- if err == nil && len(events.Items) != 0 {
- fmt.Println("Checking CSPC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCross, len(events.Items)), util.Red), "-------->")
- var crStatusRows []metav1.TableRow
- for _, event := range events.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{event.InvolvedObject.Name, event.Action, event.Reason, event.Message, util.ColorStringOnStatus(event.Type)}})
- }
- util.TablePrinter(util.EventsColumnDefinitions, crStatusRows, printers.PrintOptions{})
- return nil
- } else if err == nil && len(events.Items) == 0 {
- fmt.Println("Checking CSPC Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCheck, len(events.Items)), util.Green), "-------->")
- return nil
- } else {
- return err
- }
- } else {
- return errors.New("no CSPC present to display events")
- }
-}
-
-func displayCSPIEvents(k client.K8sClient, crs util.CstorVolumeResources) error {
- if crs.CSPIs != nil && len(crs.CSPIs.Items) != 0 {
- // 1. Set the namespace of the resource to the client
- k.Ns = crs.CSPIs.Items[0].Namespace
- // 2. Fetch the events of the concerned CSPIs
- fmt.Println()
- var crStatusRows []metav1.TableRow
- for _, CSPI := range crs.CSPIs.Items {
- events, err := k.GetEvents(fmt.Sprintf("involvedObject.name=%s,involvedObject.kind=CStorPoolInstance", CSPI.Name))
- // 3. Display the events
- if err == nil && len(events.Items) != 0 {
- for _, event := range events.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{event.InvolvedObject.Name, event.Action, event.Reason, event.Message, util.ColorStringOnStatus(event.Type)}})
- }
- }
- }
- if len(crStatusRows) == 0 {
- fmt.Println("Checking CSPI Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCheck, len(crStatusRows)), util.Green), "-------->")
- return nil
- } else {
- fmt.Println("Checking CSPI Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCross, len(crStatusRows)), util.Red), "-------->")
- util.TablePrinter(util.EventsColumnDefinitions, crStatusRows, printers.PrintOptions{})
- return nil
- }
- }
- return errors.New("no CSPIs present to display events")
-}
-
-func displayCVREvents(k client.K8sClient, crs util.CstorVolumeResources) error {
- if crs.CVRs != nil && len(crs.CVRs.Items) != 0 {
- // 1. Set the namespace of the resource to the client
- k.Ns = crs.CVRs.Items[0].Namespace
- // 2. Fetch the events of the concerned CVRs
- fmt.Println()
- var crStatusRows []metav1.TableRow
- for _, CVR := range crs.CVRs.Items {
- events, err := k.GetEvents(fmt.Sprintf("involvedObject.name=%s,involvedObject.kind=CStorVolumeReplica", CVR.Name))
- // 3. Display the events
- if err == nil && len(events.Items) != 0 {
- for _, event := range events.Items {
- crStatusRows = append(crStatusRows, metav1.TableRow{Cells: []interface{}{event.InvolvedObject.Name, event.Action, event.Reason, event.Message, util.ColorStringOnStatus(event.Type)}})
- }
- }
- }
- if len(crStatusRows) == 0 {
- fmt.Println("Checking CVR Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCheck, len(crStatusRows)), util.Green), "-------->")
- return nil
- } else {
- fmt.Println("Checking CVR Events:", util.ColorText(fmt.Sprintf(" %s %d! ", util.UnicodeCross, len(crStatusRows)), util.Red), "-------->")
- util.TablePrinter(util.EventsColumnDefinitions, crStatusRows, printers.PrintOptions{})
- return nil
- }
- }
- return errors.New("no CVRs present to display events")
-}
diff --git a/pkg/persistentvolumeclaim/debug_cstor_test.go b/pkg/persistentvolumeclaim/debug_cstor_test.go
deleted file mode 100644
index c3cc776a..00000000
--- a/pkg/persistentvolumeclaim/debug_cstor_test.go
+++ /dev/null
@@ -1,964 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "fmt"
- "testing"
- "time"
-
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
- openebsFakeClientset "github.com/openebs/api/v2/pkg/client/clientset/versioned/fake"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/kubernetes/fake"
- fake2 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
- k8stest "k8s.io/client-go/testing"
-)
-
-func TestDebugCstorVolumeClaim(t *testing.T) {
- type args struct {
- k *client.K8sClient
- pvc *corev1.PersistentVolumeClaim
- pv *corev1.PersistentVolume
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with all valid values",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cv1, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with PV missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: nil,
- },
- false,
- },
- {
- "Test with CV missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with CVC Missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cv1, &cva1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with CVA missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cv1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with CVRs missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cv1, &cva1, &cvc1),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with cspc missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspi1, &cspi2, &bd1, &bd2, &bdc1, &bdc2, &cv1, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with cspis missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &bd1, &bd2, &bdc1, &bdc2, &cv1, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with bds missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bdc1, &bdc2, &cv1, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- {
- "Test with bdcs missing",
- args{
- k: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPVC1, &cstorSc),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cspc, &cspi1, &cspi2, &bd1, &bd2, &cv1, &cva1, &cvc1, &cvr1, &cvr2),
- },
- pvc: &cstorPVC1,
- pv: &cstorPV1,
- },
- false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := DebugCstorVolumeClaim(tt.args.k, tt.args.pvc, tt.args.pv); (err != nil) != tt.wantErr {
- t.Errorf("DebugCstorVolumeClaim() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_displayBDCEvents(t *testing.T) {
- type args struct {
- k client.K8sClient
- crs util.CstorVolumeResources
- bdcFunc func(*client.K8sClient, map[string]corev1.EventList)
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid values and events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&bdcEvent1, &bdcEvent2),
- },
- crs: util.CstorVolumeResources{
- BDCs: &bdcList,
- },
- bdcFunc: eventFunc,
- },
- false,
- },
- {
- "Test with valid values with no events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- BDCs: &bdcList,
- },
- bdcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no BDCs",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- BDCs: nil,
- },
- bdcFunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no BDCList as empty",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- BDCs: &v1alpha1.BlockDeviceClaimList{
- Items: []v1alpha1.BlockDeviceClaim{},
- },
- },
- bdcFunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no events errored out",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- BDCs: &v1alpha1.BlockDeviceClaimList{
- Items: []v1alpha1.BlockDeviceClaim{},
- },
- },
- bdcFunc: noEventFunc,
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.args.bdcFunc != nil {
- tt.args.bdcFunc(&tt.args.k, map[string]corev1.EventList{
- "bdc-1": {Items: []corev1.Event{bdcEvent1}},
- "bdc-2": {Items: []corev1.Event{bdcEvent2}},
- })
- }
- if err := displayBDCEvents(tt.args.k, tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("displayBDCEvents() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_displayCSPCEvents(t *testing.T) {
- type args struct {
- k client.K8sClient
- crs util.CstorVolumeResources
- cspcFunc func(*client.K8sClient, map[string]corev1.EventList)
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid values and events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cspcEvent),
- },
- crs: util.CstorVolumeResources{
- CSPC: &cspc,
- },
- cspcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPC: &cspc,
- },
- cspcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no CSPC",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPC: nil,
- },
- cspcFunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no events errored out",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPC: &cspc,
- },
- cspcFunc: noEventFunc,
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.args.cspcFunc != nil {
- tt.args.cspcFunc(&tt.args.k, map[string]corev1.EventList{})
- }
- if err := displayCSPCEvents(tt.args.k, tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("displayCSPCEvents() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_displayCSPIEvents(t *testing.T) {
- type args struct {
- k client.K8sClient
- crs util.CstorVolumeResources
- cspiFunc func(*client.K8sClient, map[string]corev1.EventList)
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid values and events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cspiEvent1, &cspiEvent2),
- },
- crs: util.CstorVolumeResources{
- CSPIs: &cspiList,
- },
- cspiFunc: eventFunc,
- },
- false,
- },
- {
- "Test with valid values with no events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPIs: &cspiList,
- },
- cspiFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no CSPIs",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPIs: nil,
- },
- cspiFunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no CSPIList as empty",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPIs: &v1.CStorPoolInstanceList{
- Items: []v1.CStorPoolInstance{},
- },
- },
- cspiFunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no events errored out",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CSPIs: &v1.CStorPoolInstanceList{
- Items: []v1.CStorPoolInstance{},
- },
- },
- cspiFunc: noEventFunc,
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.args.cspiFunc != nil {
- tt.args.cspiFunc(&tt.args.k, map[string]corev1.EventList{
- "cspc-1": {Items: []corev1.Event{cspiEvent1}},
- "cspc-2": {Items: []corev1.Event{cspiEvent2}},
- })
- }
- if err := displayCSPIEvents(tt.args.k, tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("displayCSPIEvents() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_displayCVCEvents(t *testing.T) {
- type args struct {
- k client.K8sClient
- crs util.CstorVolumeResources
- cvcFunc func(*client.K8sClient, map[string]corev1.EventList)
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid values and events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cvcEvent1, &cvcEvent2),
- },
- crs: util.CstorVolumeResources{
- CVC: &cvc1,
- },
- cvcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVC: &cvc1,
- },
- cvcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no CVC",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVC: nil,
- },
- cvcFunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no events errored out",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVC: &cvc1,
- },
- cvcFunc: noEventFunc,
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.args.cvcFunc != nil {
- tt.args.cvcFunc(&tt.args.k, map[string]corev1.EventList{})
- }
- if err := displayCVCEvents(tt.args.k, tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("displayCVCEvents() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_displayCVREvents(t *testing.T) {
- type args struct {
- k client.K8sClient
- crs util.CstorVolumeResources
- cvrfunc func(*client.K8sClient, map[string]corev1.EventList)
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid values and events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cvrEvent1, &cvrEvent2),
- },
- crs: util.CstorVolumeResources{
- CVRs: &cvrList,
- },
- cvrfunc: eventFunc,
- },
- false,
- },
- {
- "Test with valid values with no events",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVRs: &cvrList,
- },
- cvrfunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no CVRs",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVRs: nil,
- },
- cvrfunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no CVRIList as empty",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVRs: &v1.CStorVolumeReplicaList{
- Items: []v1.CStorVolumeReplica{},
- },
- },
- cvrfunc: nil,
- },
- true,
- },
- {
- "Test with valid values with no events errored out",
- args{
- k: client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- CVRs: &v1.CStorVolumeReplicaList{
- Items: []v1.CStorVolumeReplica{},
- },
- },
- cvrfunc: noEventFunc,
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.args.cvrfunc != nil {
- tt.args.cvrfunc(&tt.args.k, map[string]corev1.EventList{
- "pvc-1-rep-1": {Items: []corev1.Event{cvrEvent1}},
- "pvc-1-rep-2": {Items: []corev1.Event{cvrEvent2}},
- })
- }
- if err := displayCVREvents(tt.args.k, tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("displayCVREvents() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_displayPVCEvents(t *testing.T) {
- type args struct {
- k client.K8sClient
- crs util.CstorVolumeResources
- pvcFunc func(*client.K8sClient, map[string]corev1.EventList)
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with valid values and events",
- args{
- k: client.K8sClient{
- Ns: "default",
- K8sCS: fake.NewSimpleClientset(&pvcEvent1, &pvcEvent2),
- },
- crs: util.CstorVolumeResources{
- PVC: &cstorPVC1,
- },
- pvcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no events",
- args{
- k: client.K8sClient{
- Ns: "default",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- PVC: &cstorPVC1,
- },
- pvcFunc: nil,
- },
- false,
- },
- {
- "Test with valid values with no events errored out",
- args{
- k: client.K8sClient{
- Ns: "default",
- K8sCS: fake.NewSimpleClientset(),
- },
- crs: util.CstorVolumeResources{
- PVC: &cstorPVC1,
- },
- pvcFunc: noEventFunc,
- },
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.args.pvcFunc != nil {
- tt.args.pvcFunc(&tt.args.k, map[string]corev1.EventList{})
- }
- if err := displayPVCEvents(tt.args.k, tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("displayPVCEvents() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func Test_resourceStatus(t *testing.T) {
- var cvrListWithMoreUsedCapacity = v1.CStorVolumeReplicaList{Items: []v1.CStorVolumeReplica{{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "3.923GiB",
- },
- Phase: v1.CVRStatusOnline,
- },
- }}}
- type args struct {
- crs util.CstorVolumeResources
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- "Test with all valid values",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all PV absent",
- args{crs: util.CstorVolumeResources{
- PV: nil,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all CV absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: nil,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all CVC absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: nil,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all CVA absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: nil,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all CVRs absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: nil,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all BDs absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: nil,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all BDCs absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: nil,
- CSPIs: &cspiList,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all CSPIs absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: nil,
- CSPC: &cspc,
- }},
- false,
- },
- {
- "Test with all CSPC absent",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrList,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: nil,
- }},
- false,
- },
- {
- "Test with all Used Capacity exceeding 80%",
- args{crs: util.CstorVolumeResources{
- PV: &cstorPV1,
- PVC: &cstorPVC1,
- CV: &cv1,
- CVC: &cvc1,
- CVA: &cva1,
- CVRs: &cvrListWithMoreUsedCapacity,
- PresentBDs: &bdList,
- ExpectedBDs: expectedBDs,
- BDCs: &bdcList,
- CSPIs: &cspiList,
- CSPC: nil,
- }},
- false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := resourceStatus(tt.args.crs); (err != nil) != tt.wantErr {
- t.Errorf("resourceStatus() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func eventFunc(c *client.K8sClient, eventMap map[string]corev1.EventList) {
- c.K8sCS.CoreV1().Events(c.Ns).(*fake2.FakeEvents).Fake.PrependReactor("*", "events", func(action k8stest.Action) (handled bool, ret runtime.Object, err error) {
- listOpts, ok := action.(k8stest.ListActionImpl)
- if ok {
- val, matched := listOpts.ListRestrictions.Fields.RequiresExactMatch("involvedObject.name")
- if matched {
- if events, present := eventMap[val]; present {
- return true, &events, nil
- } else {
- return true, nil, fmt.Errorf("invalid fieldSelector")
- }
- } else {
- return true, nil, fmt.Errorf("invalid fieldSelector")
- }
- } else {
- return true, nil, fmt.Errorf("invalid fieldSelector")
- }
- })
-}
-
-func noEventFunc(c *client.K8sClient, eventMap map[string]corev1.EventList) {
- c.K8sCS.CoreV1().Events(c.Ns).(*fake2.FakeEvents).Fake.PrependReactor("*", "events", func(action k8stest.Action) (handled bool, ret runtime.Object, err error) {
- return true, nil, fmt.Errorf("failed to list events")
- })
-}
diff --git a/pkg/persistentvolumeclaim/generic_test.go b/pkg/persistentvolumeclaim/generic_test.go
index 87f77d23..4f1ceaf0 100644
--- a/pkg/persistentvolumeclaim/generic_test.go
+++ b/pkg/persistentvolumeclaim/generic_test.go
@@ -37,8 +37,8 @@ func TestDescribeGenericVolumeClaim(t *testing.T) {
{
name: "All Valid Values",
args: args{
- pv: &cstorPV1,
- pvc: &cstorPVC1,
+ pv: &zfsPV1,
+ pvc: &zfsPVC1,
casType: "some-cas",
mountPods: "",
},
@@ -48,7 +48,7 @@ func TestDescribeGenericVolumeClaim(t *testing.T) {
name: "PV missing",
args: args{
pv: nil,
- pvc: &cstorPVC1,
+ pvc: &zfsPVC1,
casType: "some-cas",
mountPods: "",
},
diff --git a/pkg/persistentvolumeclaim/jiva.go b/pkg/persistentvolumeclaim/jiva.go
deleted file mode 100644
index 2fd82e87..00000000
--- a/pkg/persistentvolumeclaim/jiva.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "fmt"
- "os"
- "strings"
- "time"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/openebs/openebsctl/pkg/volume"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/cli-runtime/pkg/printers"
-)
-
-const (
- jivaPvcInfoTemplate = `
-{{.Name}} Details :
--------------------
-NAME : {{.Name}}
-NAMESPACE : {{.Namespace}}
-CAS TYPE : {{.CasType}}
-BOUND VOLUME : {{.BoundVolume}}
-ATTACHED TO NODE : {{.AttachedToNode}}
-JIVA VOLUME POLICY : {{.JVP}}
-STORAGE CLASS : {{.StorageClassName}}
-SIZE : {{.Size}}
-JV STATUS : {{.JVStatus}}
-PV STATUS : {{.PVStatus}}
-MOUNTED BY : {{.MountPods}}
-`
-)
-
-// DescribeJivaVolumeClaim describes a jiva storage engine PersistentVolumeClaim
-func DescribeJivaVolumeClaim(c *client.K8sClient, pvc *corev1.PersistentVolumeClaim, vol *corev1.PersistentVolume, mountPods string) error {
- // 1. Get the JivaVolume Corresponding to the pvc name
- jv, err := c.GetJV(pvc.Spec.VolumeName)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "failed to get JivaVolume for %s", pvc.Spec.VolumeName)
- fmt.Println()
- }
- // 2. Fill in Jiva Volume Claim related details
- jivaPvcInfo := util.JivaPVCInfo{
- Name: pvc.Name,
- Namespace: pvc.Namespace,
- CasType: util.JivaCasType,
- BoundVolume: pvc.Spec.VolumeName,
- StorageClassName: *pvc.Spec.StorageClassName,
- Size: pvc.Spec.Resources.Requests.Storage().String(),
- MountPods: mountPods,
- }
- if jv != nil {
- jivaPvcInfo.AttachedToNode = jv.Labels["nodeID"]
- jivaPvcInfo.JVP = jv.Annotations["openebs.io/volume-policy"]
- jivaPvcInfo.JVStatus = jv.Status.Status
- }
- if vol != nil {
- jivaPvcInfo.PVStatus = vol.Status.Phase
- // 3. Print the Jiva Volume Claim information
- _ = util.PrintByTemplate("jivaPvcInfo", jivaPvcInfoTemplate, jivaPvcInfo)
- } else {
- _ = util.PrintByTemplate("jivaPvcInfo", jivaPvcInfoTemplate, jivaPvcInfo)
- _, _ = fmt.Fprintf(os.Stderr, "PersistentVolume %s, doesnot exist", pvc.Spec.VolumeName)
- fmt.Println()
- return nil
- }
- // 4. Print the Portal Information
- replicaPodIPAndModeMap := make(map[string]string)
- jvStatus := ""
- if jv != nil {
- util.TemplatePrinter(volume.JivaPortalTemplate, jv)
- // Create Replica IP to Mode Map
- if jv.Status.ReplicaStatuses != nil && len(jv.Status.ReplicaStatuses) != 0 {
- for _, replicaStatus := range jv.Status.ReplicaStatuses {
- replicaPodIPAndModeMap[strings.Split(replicaStatus.Address, ":")[1][2:]] = replicaStatus.Mode
- }
- }
- jvStatus = jv.Status.Status
- } else {
- fmt.Println()
- }
- // 5. Fetch the Jiva controller and replica pod details
- podList, err := c.GetJVTargetPod(vol.Name)
- if err == nil {
- fmt.Println("Controller and Replica Pod Details :")
- fmt.Println("-----------------------------------")
- var rows []metav1.TableRow
- for _, pod := range podList.Items {
- if !strings.Contains(pod.Name, "-ctrl-") {
- mode := ""
- // If the IP doesnot exist, keep the mode as empty
- if val, ok := replicaPodIPAndModeMap[pod.Status.PodIP]; ok {
- mode = val
- }
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- pod.Namespace, pod.Name, mode,
- pod.Spec.NodeName, pod.Status.Phase, pod.Status.PodIP,
- util.GetReadyContainers(pod.Status.ContainerStatuses),
- util.Duration(time.Since(pod.ObjectMeta.CreationTimestamp.Time))}})
- } else {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- pod.Namespace, pod.Name, jvStatus,
- pod.Spec.NodeName, pod.Status.Phase, pod.Status.PodIP,
- util.GetReadyContainers(pod.Status.ContainerStatuses),
- util.Duration(time.Since(pod.ObjectMeta.CreationTimestamp.Time))}})
- }
- }
- util.TablePrinter(util.JivaPodDetailsColumnDefinations, rows, printers.PrintOptions{Wide: true})
- } else {
- fmt.Printf("Controller and Replica Pod Details :")
- fmt.Println("-----------------------------------")
- fmt.Println("No Controller and Replica pod exists for the JivaVolume")
- }
- // 6. Fetch the replica PVCs and create rows for cli-runtime
- var rows []metav1.TableRow
- pvcList, err := c.GetPVCs(c.Ns, nil, "openebs.io/component=jiva-replica,openebs.io/persistent-volume="+vol.Name)
- if err != nil || len(pvcList.Items) == 0 {
- fmt.Printf("No replicas found for the JivaVolume %s", vol.Name)
- return nil
- }
- for _, pvc := range pvcList.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- pvc.Name,
- pvc.Status.Phase,
- pvc.Spec.VolumeName,
- util.ConvertToIBytes(pvc.Spec.Resources.Requests.Storage().String()),
- *pvc.Spec.StorageClassName,
- util.Duration(time.Since(pvc.ObjectMeta.CreationTimestamp.Time)),
- pvc.Spec.VolumeMode}})
- }
- // 6. Print the replica details if present
- fmt.Println()
- fmt.Println("Replica Data Volume Details :")
- fmt.Println("-----------------------------")
- util.TablePrinter(util.JivaReplicaPVCColumnDefinations, rows, printers.PrintOptions{Wide: true})
- return nil
-}
diff --git a/pkg/persistentvolumeclaim/jiva_test.go b/pkg/persistentvolumeclaim/jiva_test.go
deleted file mode 100644
index 03739d08..00000000
--- a/pkg/persistentvolumeclaim/jiva_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package persistentvolumeclaim
-
-import (
- "testing"
-
- "github.com/openebs/openebsctl/pkg/client"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestDescribeJivaVolumeClaim(t *testing.T) {
- type args struct {
- c *client.K8sClient
- pvc *corev1.PersistentVolumeClaim
- vol *corev1.PersistentVolume
- mountPods string
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := DescribeJivaVolumeClaim(tt.args.c, tt.args.pvc, tt.args.vol, tt.args.mountPods); (err != nil) != tt.wantErr {
- t.Errorf("DescribeJivaVolumeClaim() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
diff --git a/pkg/persistentvolumeclaim/persistentvolumeclaim.go b/pkg/persistentvolumeclaim/persistentvolumeclaim.go
index fea3ec8d..12242658 100644
--- a/pkg/persistentvolumeclaim/persistentvolumeclaim.go
+++ b/pkg/persistentvolumeclaim/persistentvolumeclaim.go
@@ -84,14 +84,12 @@ func Describe(pvcs []string, namespace string, openebsNs string) error {
func CasDescribeMap() map[string]func(*client.K8sClient, *corev1.PersistentVolumeClaim, *corev1.PersistentVolume, string) error {
// a good hack to implement immutable maps in Golang & also write tests for it
return map[string]func(*client.K8sClient, *corev1.PersistentVolumeClaim, *corev1.PersistentVolume, string) error{
- util.JivaCasType: DescribeJivaVolumeClaim,
- util.CstorCasType: DescribeCstorVolumeClaim,
- util.LVMCasType: DescribeLVMVolumeClaim,
- util.ZFSCasType: DescribeZFSVolumeClaim,
+ util.LVMCasType: DescribeLVMVolumeClaim,
+ util.ZFSCasType: DescribeZFSVolumeClaim,
}
}
-//GetMountPods filters the array of Pods and returns an array of Pods that mount the PersistentVolumeClaim
+// GetMountPods filters the array of Pods and returns an array of Pods that mount the PersistentVolumeClaim
func GetMountPods(pvcName string, nsPods []corev1.Pod) []corev1.Pod {
var pods []corev1.Pod
for _, pod := range nsPods {
@@ -107,7 +105,7 @@ func GetMountPods(pvcName string, nsPods []corev1.Pod) []corev1.Pod {
return pods
}
-//SortPods sorts the array of Pods by name
+// SortPods sorts the array of Pods by name
func SortPods(pods []corev1.Pod) []corev1.Pod {
sort.Slice(pods, func(i, j int) bool {
cmpKey := func(pod corev1.Pod) string {
@@ -118,10 +116,10 @@ func SortPods(pods []corev1.Pod) []corev1.Pod {
return pods
}
-//PodsToString Flattens the array of Pods and returns a string fit to display in the output
+// PodsToString Flattens the array of Pods and returns a string fit to display in the output
func PodsToString(pods []corev1.Pod) string {
if len(pods) == 0 {
- return ""
+ return "none"
}
str := ""
for _, pod := range pods {
diff --git a/pkg/persistentvolumeclaim/testdata_test.go b/pkg/persistentvolumeclaim/testdata_test.go
index f20fdb94..aeea9679 100644
--- a/pkg/persistentvolumeclaim/testdata_test.go
+++ b/pkg/persistentvolumeclaim/testdata_test.go
@@ -19,14 +19,10 @@ package persistentvolumeclaim
import (
"time"
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
lvm "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1"
"github.com/openebs/openebsctl/pkg/util"
zfs "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
corev1 "k8s.io/api/core/v1"
- v12 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -36,743 +32,6 @@ var (
blockFS = corev1.PersistentVolumeBlock
)
-/****************
-* CSTOR
-****************/
-
-var nsCstor = corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- },
- Spec: corev1.NamespaceSpec{Finalizers: []corev1.FinalizerName{corev1.FinalizerKubernetes}},
-}
-
-var cv1 = v1.CStorVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeSpec{
- Capacity: fourGigiByte,
- TargetIP: "10.2.2.2",
- TargetPort: "3002",
- Iqn: "pvc1-some-fake-iqn",
- TargetPortal: "10.2.2.2:3002",
- ReplicationFactor: 3,
- ConsistencyFactor: 0,
- DesiredReplicationFactor: 0,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-1-rep-1", "some-id-2": "pvc-1-rep-2", "some-id-3": "pvc-1-rep-3"},
- },
- },
- Status: v1.CStorVolumeStatus{
- Phase: util.Healthy,
- ReplicaStatuses: []v1.ReplicaStatus{{ID: "some-id-1", Mode: "Healthy"}, {ID: "some-id-2", Mode: "Healthy"}, {ID: "some-id-3", Mode: "Healthy"}},
- Capacity: fourGigiByte,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-1-rep-1", "some-id-2": "pvc-1-rep-2", "some-id-3": "pvc-1-rep-3"},
- },
- },
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{
- DependentsUpgraded: true,
- Current: "2.11.0",
- LastUpdateTime: metav1.Time{},
- },
- },
-}
-
-var cv2 = v1.CStorVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeSpec{
- Capacity: fourGigiByte,
- TargetIP: "10.2.2.2",
- TargetPort: "3002",
- Iqn: "pvc1-some-fake-iqn",
- TargetPortal: "10.2.2.2:3002",
- ReplicationFactor: 3,
- ConsistencyFactor: 0,
- DesiredReplicationFactor: 0,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-2-rep-1"},
- },
- },
- Status: v1.CStorVolumeStatus{
- Phase: util.Healthy,
- ReplicaStatuses: []v1.ReplicaStatus{{ID: "some-id-1", Mode: "Healthy"}},
- Capacity: fourGigiByte,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-2-rep-1"},
- },
- },
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{
- DependentsUpgraded: true,
- Current: "2.11.0",
- LastUpdateTime: metav1.Time{},
- },
- },
-}
-
-var cvc1 = v1.CStorVolumeConfig{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeConfigSpec{Provision: v1.VolumeProvision{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- ReplicaCount: 3,
- }},
- Publish: v1.CStorVolumeConfigPublish{},
- Status: v1.CStorVolumeConfigStatus{PoolInfo: []string{"pool-1", "pool-2", "pool-3"}},
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{Current: "2.11.0"},
- },
-}
-
-var cvc2 = v1.CStorVolumeConfig{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeConfigSpec{Provision: v1.VolumeProvision{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- ReplicaCount: 3,
- }},
- Publish: v1.CStorVolumeConfigPublish{},
- Status: v1.CStorVolumeConfigStatus{PoolInfo: []string{"pool-1"}},
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{Current: "2.11.0"},
- },
-}
-
-var cva1 = v1.CStorVolumeAttachment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-cva",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"Volname": "pvc-1", "nodeID": "node-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeAttachmentSpec{Volume: v1.VolumeInfo{OwnerNodeID: "node-1"}},
-}
-
-var cva2 = v1.CStorVolumeAttachment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2-cva",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"Volname": "pvc-2", "nodeID": "node-2"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeAttachmentSpec{Volume: v1.VolumeInfo{OwnerNodeID: "node-2"}},
-}
-
-var cvr1 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr2 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr3 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-3",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr4 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2-rep-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvrList = v1.CStorVolumeReplicaList{Items: []v1.CStorVolumeReplica{cvr1, cvr2}}
-
-var cstorSc = v12.StorageClass{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-sc",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- },
- Provisioner: "cstor.csi.openebs.io",
- Parameters: map[string]string{"cstorPoolCluster": "cspc"},
-}
-
-var (
- cstorScName = "cstor-sc"
- cstorVolumeMode = corev1.PersistentVolumeFilesystem
- cstorPVC1 = corev1.PersistentVolumeClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- Namespace: "default",
- },
- Spec: corev1.PersistentVolumeClaimSpec{
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- Resources: corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: fourGigiByte}},
- VolumeName: "pvc-1",
- StorageClassName: &cstorScName,
- VolumeMode: &cstorVolumeMode,
- },
- Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound, Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte}},
- }
-)
-
-var (
- cstorPVC2 = corev1.PersistentVolumeClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- Namespace: "default",
- },
- Spec: corev1.PersistentVolumeClaimSpec{
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- Resources: corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: fourGigiByte}},
- VolumeName: "pvc-2",
- StorageClassName: &cstorScName,
- VolumeMode: &cstorVolumeMode,
- },
- Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound, Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte}},
- }
-)
-
-var (
- cstorPV1 = corev1.PersistentVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: corev1.PersistentVolumeSpec{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- ClaimRef: &corev1.ObjectReference{
- Namespace: "default",
- Name: "cstor-pvc-1",
- },
- PersistentVolumeReclaimPolicy: "Retain",
- StorageClassName: cstorScName,
- VolumeMode: &cstorVolumeMode,
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{
- Driver: "cstor.csi.openebs.io",
- }},
- },
- Status: corev1.PersistentVolumeStatus{Phase: corev1.VolumeBound},
- }
-)
-
-var (
- cstorPV2 = corev1.PersistentVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- },
- Spec: corev1.PersistentVolumeSpec{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- ClaimRef: &corev1.ObjectReference{
- Namespace: "default",
- Name: "cstor-pvc-2",
- },
- PersistentVolumeReclaimPolicy: "Retain",
- StorageClassName: cstorScName,
- VolumeMode: &cstorVolumeMode,
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{
- Driver: "cstor.csi.openebs.io",
- }},
- },
- Status: corev1.PersistentVolumeStatus{Phase: corev1.VolumeBound},
- }
-)
-
-var cbkp = v1.CStorBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: "bkp-name",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: v1.CStorBackupSpec{
- BackupName: "bkp-name",
- VolumeName: "pvc-1",
- SnapName: "snap-name",
- PrevSnapName: "prev-snap-name",
- BackupDest: "10.2.2.7",
- LocalSnap: true,
- },
- Status: v1.BKPCStorStatusDone,
-}
-
-var ccbkp = v1.CStorCompletedBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: "completed-bkp-name",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: v1.CStorCompletedBackupSpec{
- BackupName: "completed-bkp-name",
- VolumeName: "pvc-1",
- SecondLastSnapName: "secondlast-snapshot-name",
- LastSnapName: "last-snapshot-name",
- },
-}
-
-var crestore = v1.CStorRestore{
- ObjectMeta: metav1.ObjectMeta{
- Name: "restore-name",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: v1.CStorRestoreSpec{
- RestoreName: "restore-name",
- VolumeName: "pvc-1",
- RestoreSrc: "10.2.2.7",
- MaxRetryCount: 3,
- RetryCount: 2,
- StorageClass: "cstor-sc",
- Size: fourGigiByte,
- Local: true,
- },
-}
-
-var cstorTargetPod = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "restore-name",
- Namespace: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"openebs.io/persistent-volume-claim": "cstor-pvc-1", "openebs.io/persistent-volume": "pvc-1", "openebs.io/target": "cstor-target"},
- Finalizers: []string{},
- },
- Spec: corev1.PodSpec{NodeName: "node-1"},
- Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{{Ready: true}, {Ready: true}, {Ready: true}}, PodIP: "10.2.2.2", Phase: "Running"},
-}
-
-var cspc = v1.CStorPoolCluster{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspc",
- Namespace: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- },
- Spec: v1.CStorPoolClusterSpec{
- Pools: []v1.PoolSpec{{
- DataRaidGroups: []v1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []v1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd-1"}}},
- {CStorPoolInstanceBlockDevices: []v1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd-2"}}},
- {CStorPoolInstanceBlockDevices: []v1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd-3"}}},
- },
- }},
- },
-}
-
-var cspi1 = v1.CStorPoolInstance{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspc-1",
- Namespace: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- "openebs.io/cstor-pool-cluster": "cspc",
- "openebs.io/cas-type": "cstor",
- },
- },
- Spec: v1.CStorPoolInstanceSpec{
- DataRaidGroups: []v1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []v1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd-1"}}},
- },
- },
- Status: v1.CStorPoolInstanceStatus{Phase: "ONLINE"},
-}
-
-var cspi2 = v1.CStorPoolInstance{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspc-2",
- Namespace: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- "openebs.io/cstor-pool-cluster": "cspc",
- "openebs.io/cas-type": "cstor",
- },
- },
- Spec: v1.CStorPoolInstanceSpec{
- DataRaidGroups: []v1.RaidGroup{
- {CStorPoolInstanceBlockDevices: []v1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd-2"}}},
- },
- },
- Status: v1.CStorPoolInstanceStatus{Phase: "ONLINE"},
-}
-
-var cspiList = v1.CStorPoolInstanceList{Items: []v1.CStorPoolInstance{cspi1, cspi2}}
-
-/****************
-* BDC & BDCs
- ****************/
-
-var bd1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{Name: "bd-1", Namespace: "cstor"},
- Spec: v1alpha1.DeviceSpec{
- Path: "/dev/sdb",
- Capacity: v1alpha1.DeviceCapacity{Storage: uint64(132131321)},
- FileSystem: v1alpha1.FileSystemInfo{
- Type: "zfs_member",
- Mountpoint: "/var/some-fake-point",
- },
- NodeAttributes: v1alpha1.NodeAttribute{
- NodeName: "fake-node-1",
- },
- },
- Status: v1alpha1.DeviceStatus{
- ClaimState: "Claimed",
- State: "Active",
- },
-}
-var bd2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{Name: "bd-2", Namespace: "cstor"},
- Spec: v1alpha1.DeviceSpec{
- Path: "/dev/sdb",
- Capacity: v1alpha1.DeviceCapacity{Storage: uint64(132131321)},
- FileSystem: v1alpha1.FileSystemInfo{
- Type: "zfs_member",
- Mountpoint: "/var/some-fake-point",
- },
- NodeAttributes: v1alpha1.NodeAttribute{
- NodeName: "fake-node-1",
- },
- },
- Status: v1alpha1.DeviceStatus{
- ClaimState: "Claimed",
- State: "Active",
- },
-}
-
-var bdList = v1alpha1.BlockDeviceList{
- Items: []v1alpha1.BlockDevice{bd1, bd2},
-}
-
-var bdc1 = v1alpha1.BlockDeviceClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "bdc-1",
- Namespace: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- },
- Status: v1alpha1.DeviceClaimStatus{Phase: "Bound"},
-}
-
-var bdc2 = v1alpha1.BlockDeviceClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "bdc-2",
- Namespace: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- },
- Status: v1alpha1.DeviceClaimStatus{Phase: "Bound"},
-}
-
-var bdcList = v1alpha1.BlockDeviceClaimList{Items: []v1alpha1.BlockDeviceClaim{bdc1, bdc2}}
-
-var expectedBDs = map[string]bool{
- "bdc-1": true,
- "bdc-2": true,
- "bdc-3": false,
-}
-
-/****************
-* EVENTS
-****************/
-
-var pvcEvent1 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-pvc-1.time1",
- Namespace: "default",
- UID: "some-random-event-uuid-1",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "PersistentVolumeClaim",
- Namespace: "default",
- Name: "cstor-pvc-1",
- UID: "some-random-pvc-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var pvcEvent2 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-pvc-1.time2",
- Namespace: "default",
- UID: "some-random-event-uuid-2",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "PersistentVolumeClaim",
- Namespace: "default",
- Name: "cstor-pvc-1",
- UID: "some-random-pvc-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cvcEvent1 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-3",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorVolumeConfig",
- Namespace: "cstor",
- Name: "pvc-1",
- UID: "some-random-cvc-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cvcEvent2 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1.time2",
- Namespace: "cstor",
- UID: "some-random-event-uuid-4",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorVolumeConfig",
- Namespace: "cstor",
- Name: "pvc-1",
- UID: "some-random-cvc-uuid-2",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var bdcEvent1 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "bdc-1.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-5",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "BlockDeviceClaim",
- Namespace: "cstor",
- Name: "bdc-1",
- UID: "some-random-bdc-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var bdcEvent2 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "bdc-2.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-6",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "BlockDeviceClaim",
- Namespace: "cstor",
- Name: "bdc-2",
- UID: "some-random-bdc-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cspiEvent1 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspc-1.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-7",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorPoolInstance",
- Namespace: "cstor",
- Name: "cspc-1",
- UID: "some-random-cspi-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cspiEvent2 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspc-2.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-8",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorPoolInstance",
- Namespace: "cstor",
- Name: "cspc-2",
- UID: "some-random-cspi-uuid-2",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cspcEvent = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cspc.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-9",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorPoolCluster",
- Namespace: "cstor",
- Name: "cspc",
- UID: "some-random-cspc-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cvrEvent1 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-1.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-10",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorVolumeReplica",
- Namespace: "cstor",
- Name: "pvc-1-rep-1",
- UID: "some-random-cvr-uuid-1",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
-var cvrEvent2 = corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-2.time1",
- Namespace: "cstor",
- UID: "some-random-event-uuid-11",
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "CStorVolumeReplica",
- Namespace: "cstor",
- Name: "pvc-1-rep-2",
- UID: "some-random-cvr-uuid-2",
- },
- Reason: "some-fake-reason",
- Message: "some-fake-message",
- Count: 1,
- Type: "Warning",
- Action: "some-fake-action",
-}
-
/****************
* LVM LOCAL PV
****************/
diff --git a/pkg/storage/cstor.go b/pkg/storage/cstor.go
deleted file mode 100644
index 78062937..00000000
--- a/pkg/storage/cstor.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package storage
-
-import (
- "fmt"
- "time"
-
- "github.com/docker/go-units"
-
- "github.com/openebs/api/v2/pkg/apis/types"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/cli-runtime/pkg/printers"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- "github.com/pkg/errors"
-)
-
-const cStorPoolInstanceInfoTemplate = `
-{{.Name}} Details :
-----------------
-NAME : {{.Name}}
-HOSTNAME : {{.HostName}}
-SIZE : {{.Size}}
-FREE CAPACITY : {{.FreeCapacity}}
-READ ONLY STATUS : {{.ReadOnlyStatus}}
-STATUS : {{.Status}}
-RAID TYPE : {{.RaidType}}
-`
-
-// GetCstorPools lists the pools
-func GetCstorPools(c *client.K8sClient, pools []string) ([]metav1.TableColumnDefinition, []metav1.TableRow, error) {
- cpools, err := c.GetCSPIs(pools, "")
- if err != nil {
- return nil, nil, errors.Wrap(err, "error listing pools")
- }
- var rows []metav1.TableRow
- for _, item := range cpools.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- item.ObjectMeta.Name,
- item.ObjectMeta.Labels["kubernetes.io/hostname"],
- util.ConvertToIBytes(item.Status.Capacity.Free.String()),
- util.ConvertToIBytes(item.Status.Capacity.Total.String()),
- item.Status.ReadOnly,
- item.Status.ProvisionedReplicas,
- item.Status.HealthyReplicas,
- string(item.Status.Phase),
- util.Duration(time.Since(item.ObjectMeta.CreationTimestamp.Time))}})
- }
- if len(cpools.Items) == 0 {
- return nil, nil, fmt.Errorf("no cstor pools are found")
- }
- return util.CstorPoolListColumnDefinations, rows, nil
-}
-
-// DescribeCstorPool method runs info command and make call to DisplayPoolInfo to display the results
-func DescribeCstorPool(c *client.K8sClient, poolName string) error {
- pools, err := c.GetCSPIs([]string{poolName}, "")
- if err != nil {
- return errors.Wrap(err, "error getting pool info")
- }
- if len(pools.Items) == 0 {
- return fmt.Errorf("cstor-pool %s not found", poolName)
- }
- poolInfo := pools.Items[0]
- poolDetails := util.PoolInfo{
- Name: poolInfo.Name,
- HostName: poolInfo.Spec.HostName,
- Size: util.ConvertToIBytes(poolInfo.Status.Capacity.Total.String()),
- FreeCapacity: util.ConvertToIBytes(poolInfo.Status.Capacity.Free.String()),
- ReadOnlyStatus: poolInfo.Status.ReadOnly,
- Status: poolInfo.Status.Phase,
- RaidType: poolInfo.Spec.PoolConfig.DataRaidGroupType,
- }
- // Fetch all the raid groups in the CSPI
- RaidGroupsInPool := poolInfo.GetAllRaidGroups()
-
- // Fetch all the block devices in the raid groups associated to the CSPI
- var BlockDevicesInPool []string
- for _, item := range RaidGroupsInPool {
- BlockDevicesInPool = append(BlockDevicesInPool, item.GetBlockDevices()...)
- }
-
- // Printing the filled details of the Pool
- err = util.PrintByTemplate("pool", cStorPoolInstanceInfoTemplate, poolDetails)
- if err != nil {
- return err
- }
-
- // Fetch info for every block device
- var bdRows []metav1.TableRow
- for _, item := range BlockDevicesInPool {
- bd, err := c.GetBD(item)
- if err != nil {
- fmt.Printf("Could not find the blockdevice : %s\n", item)
- } else {
- bdRows = append(bdRows, metav1.TableRow{Cells: []interface{}{bd.Name, units.BytesSize(float64(bd.Spec.Capacity.Storage)), bd.Status.State}})
- }
- }
- if len(bdRows) != 0 {
- fmt.Printf("\nBlockdevice details :\n" + "---------------------\n")
- util.TablePrinter(util.BDListColumnDefinations, bdRows, printers.PrintOptions{Wide: true})
- } else {
- fmt.Printf("Could not find any blockdevice that belongs to the pool\n")
- }
-
- // Fetch info for provisional replica
- var cvrRows []metav1.TableRow
- CVRsInPool, err := c.GetCVRs(types.CStorPoolInstanceNameLabelKey + "=" + poolName)
- if err != nil {
- fmt.Printf("None of the replicas are running")
- } else {
- for _, cvr := range CVRsInPool.Items {
- pvcName := ""
- pv, err := c.GetPV(cvr.Labels["openebs.io/persistent-volume"])
- if err == nil {
- pvcName = pv.Spec.ClaimRef.Name
- }
- cvrRows = append(cvrRows, metav1.TableRow{Cells: []interface{}{
- cvr.Name,
- pvcName,
- util.ConvertToIBytes(cvr.Status.Capacity.Total),
- cvr.Status.Phase}})
- }
- }
- if len(cvrRows) != 0 {
- fmt.Printf("\nReplica Details :\n-----------------\n")
- util.TablePrinter(util.PoolReplicaColumnDefinations, cvrRows, printers.PrintOptions{Wide: true})
- }
- return nil
-}
diff --git a/pkg/storage/cstor_test.go b/pkg/storage/cstor_test.go
deleted file mode 100644
index 1fdb2b8e..00000000
--- a/pkg/storage/cstor_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package storage
-
-import (
- "fmt"
- "reflect"
- "testing"
-
- fakecstor "github.com/openebs/api/v2/pkg/client/clientset/versioned/fake"
- "github.com/openebs/api/v2/pkg/client/clientset/versioned/typed/cstor/v1/fake"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- corefake "k8s.io/client-go/kubernetes/fake"
- k8stest "k8s.io/client-go/testing"
-)
-
-func TestGetCstorPool(t *testing.T) {
- type args struct {
- c *client.K8sClient
- poolName []string
- }
- tests := []struct {
- name string
- args args
- cstorfunc func(sClient *client.K8sClient)
- want []metav1.TableRow
- wantErr bool
- }{
- {
- "no cstor pool found",
- args{c: &client.K8sClient{Ns: "openebs", OpenebsCS: fakecstor.NewSimpleClientset()},
- poolName: nil},
- cspiNotFound,
- nil,
- true,
- },
- {
- "two cstor pool found",
- args{c: &client.K8sClient{Ns: "openebs", OpenebsCS: fakecstor.NewSimpleClientset(&cspi1, &cspi2)},
- poolName: nil,
- },
- nil,
- []metav1.TableRow{
- {Cells: []interface{}{"pool-1", "node1", "174.0GiB", "188.1GiB", false, int32(2), int32(2), "ONLINE"}},
- {Cells: []interface{}{"pool-2", "node2", "174.0GiB", "188.1GiB", false, int32(2), int32(2), "ONLINE"}}},
-
- false,
- },
- {
- "no pool-3 cstor pool found",
- args{c: &client.K8sClient{Ns: "openebs", OpenebsCS: fakecstor.NewSimpleClientset(&cspi1, &cspi2)},
- poolName: []string{"pool-3"},
- },
- cspiNotFound,
- nil,
- true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.cstorfunc != nil {
- tt.cstorfunc(tt.args.c)
- }
- if head, row, err := GetCstorPools(tt.args.c, tt.args.poolName); (err != nil) != tt.wantErr {
- t.Errorf("GetCstorPool() error = %v, wantErr %v", err, tt.wantErr)
- } else if err == nil {
- if len(row) != len(tt.want) {
- t.Errorf("GetCstorPool() returned %d rows, wanted %d elements", len(row), len(tt.want))
- }
- for i, cspi := range row {
- if !reflect.DeepEqual(cspi.Cells[0:8], tt.want[i].Cells) {
- t.Errorf("GetCstorPool() returned %v want = %v", row, tt.want)
- }
- }
- if !reflect.DeepEqual(head, util.CstorPoolListColumnDefinations) {
- t.Errorf("GetCstorPools() returned wrong headers = %v want = %v", head,
- util.CstorPoolListColumnDefinations)
- }
- }
- // TODO: Check all but the last item of want
- })
- }
-}
-
-func TestDescribeCstorPool(t *testing.T) {
- type args struct {
- c *client.K8sClient
- poolName string
- }
- tests := []struct {
- name string
- args args
- cstorfunc func(sClient *client.K8sClient)
- wantErr bool
- }{
- {"no cstor pool exist",
- args{c: &client.K8sClient{Ns: "cstor", OpenebsCS: fakecstor.NewSimpleClientset()},
- poolName: ""},
- // a GET on resource which don't exist, returns an error automatically
- nil,
- true,
- },
- {"cspi-3 does not exist",
- args{c: &client.K8sClient{Ns: "cstor", OpenebsCS: fakecstor.NewSimpleClientset()},
- poolName: "cspi-3"},
- nil,
- true,
- },
- {"cspi-1 exists but Namespace mismatched",
- args{c: &client.K8sClient{Ns: "fake", OpenebsCS: fakecstor.NewSimpleClientset(&cspi1)},
- poolName: "cspi-1"},
- nil,
- true,
- },
- {
- "cspi-1 exists and namespace matches but no BD",
- args{c: &client.K8sClient{Ns: "openebs", OpenebsCS: fakecstor.NewSimpleClientset(&cspi1)},
- poolName: "pool-1"},
- nil,
- false,
- },
- {
- "cspi-1 exists and BD exists",
- args{c: &client.K8sClient{Ns: "openebs", OpenebsCS: fakecstor.NewSimpleClientset(&cspi1, &bd1)},
- poolName: "pool-1"},
- nil,
- false,
- },
- {
- "cspi-1 exists, BD & CVR exists",
- args{c: &client.K8sClient{Ns: "openebs", OpenebsCS: fakecstor.NewSimpleClientset(&cspi1, &bd1, &bd2,
- &cvr1, &cvr2), K8sCS: corefake.NewSimpleClientset(&pv1)},
- poolName: "pool-1"},
- nil,
- false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := DescribeCstorPool(tt.args.c, tt.args.poolName); (err != nil) != tt.wantErr {
- t.Errorf("DescribeCstorPool() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func cspiNotFound(c *client.K8sClient) {
- // NOTE: Set the VERB & Resource correctly & make it work for single resources
- c.OpenebsCS.CstorV1().(*fake.FakeCstorV1).Fake.PrependReactor("*", "*", func(action k8stest.Action) (handled bool, ret runtime.Object, err error) {
- return true, nil, fmt.Errorf("failed to list CSPI")
- })
-}
diff --git a/pkg/storage/lvmlocalpv.go b/pkg/storage/lvmlocalpv.go
index 7166c713..10437f1f 100644
--- a/pkg/storage/lvmlocalpv.go
+++ b/pkg/storage/lvmlocalpv.go
@@ -55,7 +55,7 @@ func GetVolumeGroups(c *client.K8sClient, vgs []string) ([]metav1.TableColumnDef
}
// 3. Actually print the table or return an error
if len(rows) == 0 {
- return nil, nil, util.HandleEmptyTableError("lvm Volumegroups", c.Ns, "")
+ return nil, nil, util.HandleEmptyTableError("lvm volumegroups", c.Ns, "")
}
return util.LVMvolgroupListColumnDefinitions, rows, nil
}
diff --git a/pkg/storage/lvmlocalpv_test.go b/pkg/storage/lvmlocalpv_test.go
index 89d6eafa..60397642 100644
--- a/pkg/storage/lvmlocalpv_test.go
+++ b/pkg/storage/lvmlocalpv_test.go
@@ -46,10 +46,9 @@ func TestGetVolumeGroup(t *testing.T) {
"no LVM volumegroups present",
args{
c: &client.K8sClient{
- Ns: "lvmlocalpv",
- K8sCS: nil,
- OpenebsCS: nil,
- LVMCS: fakelvmclient.NewSimpleClientset()},
+ Ns: "lvmlocalpv",
+ K8sCS: nil,
+ LVMCS: fakelvmclient.NewSimpleClientset()},
vg: nil,
lvmfunc: lvnNodeNotFound,
},
@@ -121,7 +120,7 @@ func TestDescribeLVMvg(t *testing.T) {
}{
{
"no LVM vgs exist",
- args{c: &client.K8sClient{Ns: "", LVMCS: fakelvmclient.NewSimpleClientset()}, lvmFunc: lvnNodeNotFound, vg: "cstor-pv1"},
+ args{c: &client.K8sClient{Ns: "", LVMCS: fakelvmclient.NewSimpleClientset()}, lvmFunc: lvnNodeNotFound, vg: "some-vg-name"},
true,
},
{
diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go
index c6db0157..4c569b65 100644
--- a/pkg/storage/storage.go
+++ b/pkg/storage/storage.go
@@ -69,13 +69,13 @@ func Get(pools []string, openebsNS string, casType string) error {
// CasList has a list of method implementations for different cas-types
func CasList() []func(*client.K8sClient, []string) ([]metav1.TableColumnDefinition, []metav1.TableRow, error) {
return []func(*client.K8sClient, []string) ([]metav1.TableColumnDefinition, []metav1.TableRow, error){
- GetCstorPools, GetVolumeGroups, GetZFSPools}
+ GetVolumeGroups, GetZFSPools}
}
// Describe manages various implementations of Storage Describing
func Describe(storages []string, openebsNs, casType string) error {
if len(storages) == 0 || storages == nil {
- return errors.New("please provide atleast one pv name to describe")
+ return errors.New("please provide atleast one storage node name to describe")
}
// 1. Create the clientset
k := client.NewK8sClient(openebsNs)
@@ -121,9 +121,8 @@ func Describe(storages []string, openebsNs, casType string) error {
func CasListMap() map[string]func(*client.K8sClient, []string) ([]metav1.TableColumnDefinition, []metav1.TableRow, error) {
// a good hack to implement immutable maps in Golang & also write tests for it
return map[string]func(*client.K8sClient, []string) ([]metav1.TableColumnDefinition, []metav1.TableRow, error){
- util.CstorCasType: GetCstorPools,
- util.LVMCasType: GetVolumeGroups,
- util.ZFSCasType: GetZFSPools,
+ util.LVMCasType: GetVolumeGroups,
+ util.ZFSCasType: GetZFSPools,
}
}
@@ -131,13 +130,12 @@ func CasListMap() map[string]func(*client.K8sClient, []string) ([]metav1.TableCo
func CasDescribeMap() map[string]func(*client.K8sClient, string) error {
// a good hack to implement immutable maps in Golang & also write tests for it
return map[string]func(*client.K8sClient, string) error{
- util.CstorCasType: DescribeCstorPool,
- util.ZFSCasType: DescribeZFSNode,
- util.LVMCasType: DescribeLVMvg,
+ util.ZFSCasType: DescribeZFSNode,
+ util.LVMCasType: DescribeLVMvg,
}
}
-// CasDescribeList returns a list of functions which describe a Storage i.e. a pool/volume-group
+// CasDescribeList returns a list of functions which describe a Storage i.e. a zfspool/volume-group
func CasDescribeList() []func(*client.K8sClient, string) error {
- return []func(*client.K8sClient, string) error{DescribeCstorPool, DescribeZFSNode, DescribeLVMvg}
+ return []func(*client.K8sClient, string) error{DescribeZFSNode, DescribeLVMvg}
}
diff --git a/pkg/storage/testdata_test.go b/pkg/storage/testdata_test.go
index 4d442c67..e05e80a2 100644
--- a/pkg/storage/testdata_test.go
+++ b/pkg/storage/testdata_test.go
@@ -17,171 +17,12 @@ limitations under the License.
package storage
import (
- "time"
-
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
lvm "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1"
zfs "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var cspi1 = cstorv1.CStorPoolInstance{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolInstance", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "pool-1", Namespace: "openebs",
- Finalizers: []string{"cstorpoolcluster.openebs.io/finalizer", "openebs.io/pool-protection"},
- Labels: map[string]string{
- "kubernetes.io/hostname": "node1",
- "openebs.io/cas-type": "cstor",
- "openebs.io/cstor-pool-cluster": "cassandra-pool",
- "openebs.io/version": "2.11"},
- // OwnerReference links to the CSPC
- },
- Spec: cstorv1.CStorPoolInstanceSpec{
- HostName: "node1",
- NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: "stripe", WriteCacheGroupType: "", Compression: "off"},
- DataRaidGroups: []cstorv1.RaidGroup{{
- CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd-1", Capacity: 1234567, DevLink: "/dev/disk/by-id/abcd/def"}}}},
- WriteCacheRaidGroups: nil,
- },
- Status: cstorv1.CStorPoolInstanceStatus{
- Conditions: []cstorv1.CStorPoolInstanceCondition{{
- Type: cstorv1.CSPIPoolLost,
- Status: "True",
- LastUpdateTime: metav1.Time{Time: time.Now()},
- LastTransitionTime: metav1.Time{Time: time.Now()},
- Reason: "PoolLost",
- Message: "failed to importcstor-xyzabcd",
- }},
- Phase: cstorv1.CStorPoolStatusOnline,
- Capacity: cstorv1.CStorPoolInstanceCapacity{
- Used: resource.MustParse("18600Mi"),
- Free: resource.MustParse("174Gi"),
- Total: resource.MustParse("192600Mi"),
- ZFS: cstorv1.ZFSCapacityAttributes{},
- },
- ReadOnly: false, ProvisionedReplicas: 2, HealthyReplicas: 2,
- },
- VersionDetails: cstorv1.VersionDetails{Desired: "2.11",
- Status: cstorv1.VersionStatus{Current: "2.11", State: cstorv1.ReconcileComplete, LastUpdateTime: metav1.Time{Time: time.Now()}},
- },
-}
-
-var bd1 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{Kind: "BlockDevice", APIVersion: "openebs.io/v1alpha1"},
- ObjectMeta: metav1.ObjectMeta{Name: "bd-1", Namespace: "openebs",
- Annotations: map[string]string{
- "internal.openebs.io/partition-uuid": "49473bca-97c3-f340-beaf-dae9b2ce99bc",
- "internal.openebs.io/uuid-scheme": "legacy"}},
- Spec: v1alpha1.DeviceSpec{Capacity: v1alpha1.DeviceCapacity{
- Storage: 123456789,
- PhysicalSectorSize: 123456789,
- LogicalSectorSize: 123456789,
- }},
- Status: v1alpha1.DeviceStatus{},
-}
-
-var bd2 = v1alpha1.BlockDevice{
- TypeMeta: metav1.TypeMeta{
- Kind: "BlockDevice",
- APIVersion: "openebs.io/v1alpha1",
- },
- ObjectMeta: metav1.ObjectMeta{Name: "bd-2", Namespace: "openebs"},
- Spec: v1alpha1.DeviceSpec{Capacity: v1alpha1.DeviceCapacity{
- Storage: 123456789,
- PhysicalSectorSize: 123456789,
- LogicalSectorSize: 123456789,
- },
- FileSystem: v1alpha1.FileSystemInfo{Type: "zfs_member", Mountpoint: "/home/kubernetes/volume-abcd"}},
- Status: v1alpha1.DeviceStatus{
- ClaimState: "Claimed",
- State: "Active",
- },
-}
-
-var cvr1 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-1",
- Labels: map[string]string{cstortypes.CStorPoolInstanceNameLabelKey: "pool-1", "openebs.io/persistent-volume": "pv1"},
- Namespace: "openebs",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr2 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-2",
- Labels: map[string]string{cstortypes.CStorPoolInstanceNameLabelKey: "pool-1", "openebs.io/persistent-volume": "pv1"},
- Namespace: "openebs",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "40Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-var pv1 = corev1.PersistentVolume{
- TypeMeta: metav1.TypeMeta{Kind: "PersistentVolume", APIVersion: "core/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "pv1"},
- Spec: corev1.PersistentVolumeSpec{ClaimRef: &corev1.ObjectReference{Name: "mongopv1"}},
- Status: corev1.PersistentVolumeStatus{},
-}
-
-var cspi2 = cstorv1.CStorPoolInstance{
- TypeMeta: metav1.TypeMeta{Kind: "CStorPoolInstance", APIVersion: "cstor.openebs.io/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "pool-2", Namespace: "openebs",
- Finalizers: []string{"cstorpoolcluster.openebs.io/finalizer", "openebs.io/pool-protection"},
- Labels: map[string]string{
- "kubernetes.io/hostname": "node2",
- "openebs.io/cas-type": "cstor",
- "openebs.io/cstor-pool-cluster": "cassandra-pool",
- "openebs.io/version": "2.11"},
- // OwnerReference links to the CSPC
- },
- Spec: cstorv1.CStorPoolInstanceSpec{
- HostName: "node2",
- NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"},
- PoolConfig: cstorv1.PoolConfig{DataRaidGroupType: "stripe", WriteCacheGroupType: "", Compression: "off"},
- DataRaidGroups: []cstorv1.RaidGroup{{
- CStorPoolInstanceBlockDevices: []cstorv1.CStorPoolInstanceBlockDevice{{BlockDeviceName: "bd2", Capacity: 1234567, DevLink: "/dev/disk/by-id/abcd/def"}}}},
- WriteCacheRaidGroups: nil,
- },
- Status: cstorv1.CStorPoolInstanceStatus{
- Conditions: []cstorv1.CStorPoolInstanceCondition{{
- Type: cstorv1.CSPIPoolLost,
- Status: "True",
- LastUpdateTime: metav1.Time{Time: time.Now()},
- LastTransitionTime: metav1.Time{Time: time.Now()},
- Reason: "PoolLost",
- Message: "failed to importcstor-xyzabcd",
- }},
- Phase: cstorv1.CStorPoolStatusOnline,
- Capacity: cstorv1.CStorPoolInstanceCapacity{
- Used: resource.MustParse("18600Mi"),
- Free: resource.MustParse("174Gi"),
- Total: resource.MustParse("192600Mi"),
- ZFS: cstorv1.ZFSCapacityAttributes{},
- },
- ReadOnly: false, ProvisionedReplicas: 2, HealthyReplicas: 2,
- },
- VersionDetails: cstorv1.VersionDetails{Desired: "2.11",
- Status: cstorv1.VersionStatus{Current: "2.11", State: cstorv1.ReconcileComplete, LastUpdateTime: metav1.Time{Time: time.Now()}},
- },
-}
-
var (
fourGigiByte = resource.MustParse("4Gi")
fiveGigiByte = resource.MustParse("5Gi")
diff --git a/pkg/storage/zfslocalpv.go b/pkg/storage/zfslocalpv.go
index 4166a31f..a4bf9046 100644
--- a/pkg/storage/zfslocalpv.go
+++ b/pkg/storage/zfslocalpv.go
@@ -40,6 +40,7 @@ func GetZFSPools(c *client.K8sClient, zfsnodes []string) ([]metav1.TableColumnDe
if err != nil {
return nil, nil, err
}
+
var rows []metav1.TableRow
for _, zfsNode := range zfsNodes.Items {
rows = append(rows, metav1.TableRow{Cells: []interface{}{zfsNode.Name, ""}})
diff --git a/pkg/storage/zfslocalpv_test.go b/pkg/storage/zfslocalpv_test.go
index 2421f6da..33397f9f 100644
--- a/pkg/storage/zfslocalpv_test.go
+++ b/pkg/storage/zfslocalpv_test.go
@@ -115,7 +115,7 @@ func TestDescribeZFSNode(t *testing.T) {
},
{
"two ZFS node exist, none asked for",
- args{c: &client.K8sClient{Ns: "zfs", ZFCS: fakezfsclient.NewSimpleClientset(&zfsNode1, &zfsNode3)}, sName: "cstor-pool-name"},
+ args{c: &client.K8sClient{Ns: "zfs", ZFCS: fakezfsclient.NewSimpleClientset(&zfsNode1, &zfsNode3)}, sName: "some-pool-name"},
true,
},
}
diff --git a/pkg/upgrade/api.go b/pkg/upgrade/api.go
deleted file mode 100644
index 98f8b7f8..00000000
--- a/pkg/upgrade/api.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package upgrade
-
-import (
- "fmt"
-
- corebuilder "github.com/openebs/api/v2/pkg/kubernetes/core"
- batchV1 "k8s.io/api/batch/v1"
- corev1 "k8s.io/api/core/v1"
-)
-
-type Job struct {
- *batchV1.Job
-}
-
-// NewJob returns an empty instance of BatchJob
-func NewJob() *Job {
- return &Job{
- &batchV1.Job{},
- }
-}
-
-// WithName sets the name of the field of Job
-func (b *Job) WithName(name string) *Job {
- b.Name = name
- return b
-}
-
-// WithGeneratedName Creates a job with auto-generated name
-func (b *Job) WithGeneratedName(name string) *Job {
- b.GenerateName = fmt.Sprintf("%s-", name)
- return b
-}
-
-// WithLabel sets label for the job
-func (b *Job) WithLabel(label map[string]string) *Job {
- b.Labels = label
- return b
-}
-
-// WithNamespace sets the namespace of the Job
-func (b *Job) WithNamespace(namespace string) *Job {
- b.Namespace = namespace
- return b
-}
-
-// BuildJobSpec builds an empty Job Spec
-func (b *Job) BuildJobSpec() *Job {
- b.Spec = batchV1.JobSpec{}
- return b
-}
-
-// WithBackOffLimit sets the backOffLimit for pods in the Job with given value
-func (b *Job) WithBackOffLimit(limit int32) *Job {
- b.Spec.BackoffLimit = &limit
- return b
-}
-
-// WithPodTemplateSpec sets the template Field for Job
-func (b *Job) WithPodTemplateSpec(pts *corebuilder.PodTemplateSpec) *Job {
- templateSpecObj := pts.Build()
- b.Spec.Template = *templateSpecObj
- return b
-}
-
-// Temporary code until PR into openebs/api is not merged----
-func (b *Job) WithRestartPolicy(policy corev1.RestartPolicy) *Job {
- b.Spec.Template.Spec.RestartPolicy = policy
- return b
-}
diff --git a/pkg/upgrade/cstor.go b/pkg/upgrade/cstor.go
deleted file mode 100644
index d2c1c468..00000000
--- a/pkg/upgrade/cstor.go
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package upgrade
-
-import (
- "errors"
- "fmt"
- "log"
-
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/kubernetes/core"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- batchV1 "k8s.io/api/batch/v1"
- corev1 "k8s.io/api/core/v1"
-)
-
-func InstantiateCspcUpgrade(options UpgradeOpts) {
- k := client.NewK8sClient()
-
- // auto-determine cstor namespace
- var err error
- k.Ns, err = k.GetOpenEBSNamespace(util.CstorCasType)
- if err != nil {
- fmt.Println(`Error determining cstor namespace! using "openebs" as namespace`)
- k.Ns = "openebs"
- }
-
- cspcList, err := k.ListCSPC()
- if err != nil {
- log.Fatal("err listing CSPC ", err)
- }
-
- poolNames := getCSPCPoolNames(cspcList)
- cfg := UpgradeJobCfg{
- fromVersion: "",
- toVersion: "",
- namespace: k.Ns,
- resources: poolNames,
- serviceAccountName: "",
- backOffLimit: 4,
- logLevel: 4,
- additionalArgs: addArgs(options),
- }
-
- cfg.fromVersion, cfg.toVersion, err = getCstorVersionDetails(cspcList)
- if err != nil {
- fmt.Println("error: ", err)
- }
- if options.ToVersion != "" { // overriding the desired version from the cli flag
- cfg.toVersion = options.ToVersion
- }
-
- cfg.serviceAccountName = GetCSPCOperatorServiceAccName(k)
-
- // Check if a job is running with underlying PV
- err = inspectRunningUpgradeJobs(k, &cfg)
- // If error or upgrade job is already running return
- if err != nil {
- log.Fatal("An upgrade job is already running with the underlying volume!, More: ", err)
- }
-
- // Create upgrade job
- k.CreateBatchJob(buildCspcbatchJob(&cfg), k.Ns)
-}
-
-// buildCspcbatchJob returns CSPC Job to be build
-func buildCspcbatchJob(cfg *UpgradeJobCfg) *batchV1.Job {
- return NewJob().
- WithGeneratedName("cstor-cspc-upgrade").
- WithLabel(map[string]string{"name": "cstor-cspc-upgrade", "cas-type": "cstor"}). // sets labels for job discovery
- WithNamespace(cfg.namespace).
- WithBackOffLimit(cfg.backOffLimit).
- WithPodTemplateSpec(
- func() *core.PodTemplateSpec {
- return core.NewPodTemplateSpec().
- WithServiceAccountName(cfg.serviceAccountName).
- WithContainers(
- func() *core.Container {
- return core.NewContainer().
- WithName("upgrade-cstor-cspc-go").
- WithArgumentsNew(getCstorCspcContainerArgs(cfg)).
- WithEnvsNew(
- []corev1.EnvVar{
- {
- Name: "OPENEBS_NAMESPACE",
- ValueFrom: &corev1.EnvVarSource{
- FieldRef: &corev1.ObjectFieldSelector{
- FieldPath: "metadata.namespace",
- },
- },
- },
- },
- ).
- WithImage(fmt.Sprintf("openebs/upgrade:%s", cfg.toVersion)).
- WithImagePullPolicy(corev1.PullIfNotPresent) // Add TTY to openebs/api
- }(),
- )
- }(),
- ).
- WithRestartPolicy(corev1.RestartPolicyOnFailure). // Add restart policy in openebs/api
- Job
-}
-
-func getCstorCspcContainerArgs(cfg *UpgradeJobCfg) []string {
- // Set container arguments
- args := append([]string{
- "cstor-cspc",
- fmt.Sprintf("--from-version=%s", cfg.fromVersion),
- fmt.Sprintf("--to-version=%s", cfg.toVersion),
- "--v=4", // can be taken from flags
- }, cfg.resources...)
- args = append(args, cfg.additionalArgs...)
- return args
-}
-
-func getCSPCPoolNames(cspcList *cstorv1.CStorPoolClusterList) []string {
- var poolNames []string
- for _, cspc := range cspcList.Items {
- poolNames = append(poolNames, cspc.Name)
- }
-
- return poolNames
-}
-
-// getCstorVersionDetails returns cstor versioning details for upgrade job cfg
-// It returns fromVersion, toVersion, or error
-func getCstorVersionDetails(cspcList *cstorv1.CStorPoolClusterList) (fromVersion string, toVersion string, err error) {
- fmt.Println("Fetching CSPC control plane and Data Plane Version")
- for _, cspc := range cspcList.Items {
- fromVersion = cspc.VersionDetails.Status.Current
- toVersion = cspc.VersionDetails.Desired
-
- if fromVersion != "" && toVersion != "" {
- fmt.Println("Current Version:", fromVersion)
- fmt.Println("Desired Version:", toVersion)
- return
- }
- }
-
- return "", "", errors.New("problems fetching versioning details")
-}
-
-func GetCSPCOperatorServiceAccName(k *client.K8sClient) string {
- pods, err := k.GetPods("openebs.io/component-name=cspc-operator", "", k.Ns)
- if err != nil || len(pods.Items) == 0 {
- log.Fatal("error occurred while searching operator, or no operator is found: ", err)
- }
-
- return pods.Items[0].Spec.ServiceAccountName
-}
diff --git a/pkg/upgrade/jiva.go b/pkg/upgrade/jiva.go
deleted file mode 100644
index 1ac91232..00000000
--- a/pkg/upgrade/jiva.go
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package upgrade
-
-import (
- "fmt"
- "log"
-
- core "github.com/openebs/api/v2/pkg/kubernetes/core"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- batchV1 "k8s.io/api/batch/v1"
- corev1 "k8s.io/api/core/v1"
-)
-
-type jobInfo struct {
- name string
- namespace string
-}
-
-// Jiva Data-plane Upgrade Job instantiator
-func InstantiateJivaUpgrade(upgradeOpts UpgradeOpts) {
- k := client.NewK8sClient()
-
- // auto-determine jiva namespace
- ns, err := k.GetOpenEBSNamespace(util.JivaCasType)
- if err != nil {
- fmt.Println(`Error determining namespace! using "openebs" as namespace`)
- ns = "openebs"
- }
-
- // get running volumes from cluster
- volNames, fromVersion, err := getJivaVolumesVersion(k)
- if err != nil {
- fmt.Println(err)
- return
- }
-
- // assign to-version
- if upgradeOpts.ToVersion == "" {
- pods, e := k.GetPods("name=jiva-operator", "", "")
- if e != nil {
- fmt.Println("Failed to get operator-version, err: ", e)
- return
- }
-
- if len(pods.Items) == 0 {
- fmt.Println("Jiva-operator is not running!")
- return
- }
-
- upgradeOpts.ToVersion = pods.Items[0].Labels["openebs.io/version"]
- upgradeOpts.ServiceAccountName = getServiceAccountName(pods)
- }
-
- // create configuration
- cfg := UpgradeJobCfg{
- fromVersion: fromVersion,
- toVersion: upgradeOpts.ToVersion,
- namespace: ns,
- resources: volNames,
- serviceAccountName: upgradeOpts.ServiceAccountName,
- backOffLimit: 4,
- logLevel: 4,
- additionalArgs: addArgs(upgradeOpts),
- }
-
- // Check if a job is running with underlying PV
- err = inspectRunningUpgradeJobs(k, &cfg)
- // If error or upgrade job is already running return
- if err != nil {
- log.Fatal("An upgrade job is already running with the underlying volume!, More: ", err)
- }
-
- k.CreateBatchJob(BuildJivaBatchJob(&cfg), cfg.namespace)
-}
-
-// getJivaVolumesVersion returns the Jiva volumes list and current version
-func getJivaVolumesVersion(k *client.K8sClient) ([]string, string, error) {
- // 1. Fetch all jivavolumes CRs in all namespaces
- _, jvMap, err := k.GetJVs(nil, util.Map, "", util.MapOptions{Key: util.Name})
- if err != nil {
- return nil, "", fmt.Errorf("err getting jiva volumes: %s", err.Error())
- }
-
- var jivaList *corev1.PersistentVolumeList
- //2. Get Jiva Persistent volumes
- jivaList, err = k.GetPvByCasType([]string{"jiva"}, "")
- if err != nil {
- return nil, "", fmt.Errorf("err getting jiva volumes: %s", err.Error())
- }
-
- var volumeNames []string
- var version string
-
- //3. Write-out names, versions and desired-versions
- for _, pv := range jivaList.Items {
- volumeNames = append(volumeNames, pv.Name)
- if v, ok := jvMap[pv.Name]; ok && len(version) == 0 {
- version = v.VersionDetails.Status.Current
- }
- }
-
- //4. Check for zero jiva-volumes
- if len(version) == 0 || len(volumeNames) == 0 {
- return volumeNames, version, fmt.Errorf("no jiva volumes found")
- }
-
- return volumeNames, version, nil
-}
-
-// BuildJivaBatchJob returns Job to be build
-func BuildJivaBatchJob(cfg *UpgradeJobCfg) *batchV1.Job {
- return NewJob().
- WithGeneratedName("jiva-upgrade").
- WithLabel(map[string]string{"name": "jiva-upgrade", "cas-type": "jiva"}). // sets labels for job discovery
- WithNamespace(cfg.namespace).
- WithBackOffLimit(cfg.backOffLimit).
- WithPodTemplateSpec(
- func() *core.PodTemplateSpec {
- return core.NewPodTemplateSpec().
- WithServiceAccountName(cfg.serviceAccountName).
- WithContainers(
- func() *core.Container {
- return core.NewContainer().
- WithName("upgrade-jiva-go").
- WithArgumentsNew(getJivaContainerArguments(cfg)).
- WithEnvsNew(
- []corev1.EnvVar{
- {
- Name: "OPENEBS_NAMESPACE",
- ValueFrom: &corev1.EnvVarSource{
- FieldRef: &corev1.ObjectFieldSelector{
- FieldPath: "metadata.namespace",
- },
- },
- },
- },
- ).
- WithImage(fmt.Sprintf("openebs/upgrade:%s", cfg.toVersion)).
- WithImagePullPolicy(corev1.PullIfNotPresent) // Add TTY to openebs/api
- }(),
- )
- }(),
- ).
- WithRestartPolicy(corev1.RestartPolicyOnFailure). // Add restart policy in openebs/api
- Job
-}
-
-func getJivaContainerArguments(cfg *UpgradeJobCfg) []string {
- // Set container arguments
- args := append([]string{
- "jiva-volume",
- fmt.Sprintf("--from-version=%s", cfg.fromVersion),
- fmt.Sprintf("--to-version=%s", cfg.toVersion),
- "--v=4", // can be taken from flags
- }, cfg.resources...)
- args = append(args, cfg.additionalArgs...)
- return args
-}
diff --git a/pkg/upgrade/status/jiva.go b/pkg/upgrade/status/jiva.go
deleted file mode 100644
index 31298ee3..00000000
--- a/pkg/upgrade/status/jiva.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package status
-
-import (
- "fmt"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
-)
-
-// Get job with the name -> apply selector to pod
-func GetJobStatus(namespace string) {
- k := client.NewK8sClient()
- k.Ns = namespace
- // get jiva-upgrade batch jobs
- joblist, err := k.GetBatchJobs(namespace, "cas-type=jiva,name=jiva-upgrade")
- if err != nil {
- fmt.Println("Error getting jiva-upgrade jobs:", err)
- return
- }
-
- // No jobs found
- if len(joblist.Items) == 0 {
- fmt.Printf("No upgrade-jobs Found in %s namespace", namespace)
- return
- }
-
- for _, job := range joblist.Items {
- fmt.Println("***************************************")
- fmt.Println("Job Name: ", job.Name)
- getPodLogs(k, job.Name, namespace)
- }
- fmt.Println("***************************************")
-}
-
-// Get all the logs from the pods associated with a job
-func getPodLogs(k *client.K8sClient, name string, namespace string) {
- // get pods created by the job
- podList, err := k.GetPods(fmt.Sprintf("job-name=%s", name), "", namespace)
- if err != nil {
- printColoredText(fmt.Sprintf("error getting pods of job %s, err: %s", name, err), util.Red)
- return
- }
-
- // range over pods to get all the logs
- for _, pod := range podList.Items {
- fmt.Println("From Pod:", pod.Name)
- logs := k.GetPodLogs(pod.Name, namespace)
- if logs == "" {
- fmt.Printf("-> No recent logs from the pod")
- fmt.Println()
- continue
- }
- printColoredText(logs, util.Blue)
- }
-
- if len(podList.Items) == 0 {
- printColoredText("No pods are running for this job", util.Red)
- }
-}
-
-func printColoredText(message string, color util.Color) {
- fmt.Println(util.ColorText(message, color))
-}
diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go
deleted file mode 100644
index 24e7c9ea..00000000
--- a/pkg/upgrade/upgrade.go
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package upgrade
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- batchV1 "k8s.io/api/batch/v1"
- corev1 "k8s.io/api/core/v1"
-)
-
-// UpgradeOpts are the upgrade options that are provided
-// with the CLI flags
-type UpgradeOpts struct {
- CasType string
- ToVersion string
- ImagePrefix string
- ImageTag string
- ServiceAccountName string
-}
-
-// UpgradeJobCfg holds upgrade job confiogurations while creating a new Job
-type UpgradeJobCfg struct {
- fromVersion string
- toVersion string
- namespace string
- resources []string
- backOffLimit int32
- serviceAccountName string
- logLevel int32
- additionalArgs []string
-}
-
-// inspectRunningUpgradeJobs inspects all the jobs running in the cluster
-// and returns if even one of the the jobs updating the resource is already scheduled/running
-func inspectRunningUpgradeJobs(k *client.K8sClient, cfg *UpgradeJobCfg) error {
- jobs, err := k.GetBatchJobs("", "")
- if err != nil {
- return err
- }
-
- // runningJob holds the information about the jobs that are in use by the PV
- // that has an upgrade-job progress(any status) already going
- // This anonynomous function is used to ease-in the code logic to prevent
- // using multiple booleans to get out of the loops once needed to exit
- // return statement in anonymous functions helps us with preventing additional checks
- var runningJob *batchV1.Job
- func() {
- for _, job := range jobs.Items { // JobItems
- for _, pvName := range cfg.resources { // running pvs in control plane
- for _, container := range job.Spec.Template.Spec.Containers { // iterate on containers provided by the cfg
- for _, args := range container.Args { // check if the running jobs (PVs) and the upcoming job(PVs) are common
- if args == pvName {
- runningJob = &job
- return
- }
- }
- }
- }
- }
- }()
-
- return runningJobHandler(k, runningJob)
-}
-
-// runningJobHandler checks the status of the job and takes action on it
-// to modify or delete it based on the status of the Job
-func runningJobHandler(k *client.K8sClient, runningJob *batchV1.Job) error {
-
- if runningJob != nil {
- jobCondition := runningJob.Status.Conditions
- info := jobInfo{name: runningJob.Name, namespace: runningJob.Namespace}
- if runningJob.Status.Failed > 0 ||
- len(jobCondition) > 0 && jobCondition[0].Type == "Failed" && jobCondition[0].Status == "True" {
- fmt.Println("Previous job failed.")
- fmt.Println("Reason: ", getReason(runningJob))
- fmt.Println("Creating a new Job with name:", info.name)
- // Job found thus delete the job and return false so that further process can be started
- if err := startDeletionTask(k, &info); err != nil {
- fmt.Println("error deleting job:", err)
- return err
- }
- }
-
- if runningJob.Status.Active > 0 {
- fmt.Println("A job is already active with the name", runningJob.Name, " that is upgrading the PV.")
- // TODO: Check the POD underlying the PV if their is any error inside
- os.Exit(0)
- }
-
- if runningJob.Status.Succeeded > 0 {
- fmt.Println("Previous upgrade-job was successful for upgrading P.V.")
- return shouldRestartJob(k, info)
- }
- }
-
- return nil
-}
-
-// getReason returns the reason for the current status of Job
-func getReason(job *batchV1.Job) string {
- reason := job.Status.Conditions[0].Reason
- if len(reason) == 0 {
- return "Reason Not Found, check by inspecting jobs"
- }
- return reason
-}
-
-// startDeletionTask instantiates a deletion process
-func startDeletionTask(k *client.K8sClient, info *jobInfo) error {
- err := k.DeleteBatchJob(info.name, info.namespace)
- if err != nil {
- return err
- }
- confirmDeletion(k, info)
- return nil
-}
-
-// confirmDeletion runs until the job is successfully done or reached threshold duration
-func confirmDeletion(k *client.K8sClient, info *jobInfo) {
- // create interval to call function periodically
- interval := time.NewTicker(time.Second * 2)
-
- // Create channel
- channel := make(chan bool)
-
- // Set threshold time
- go func() {
- time.Sleep(time.Second * 10)
- channel <- true
- }()
-
- for {
- select {
- case <-interval.C:
- _, err := k.GetBatchJob(info.name, info.namespace)
- // Job is deleted successfully
- if err != nil {
- return
- }
- case <-channel:
- fmt.Println("Waiting time reached! Try Again!")
- return
- }
- }
-}
-
-// Returns additional arguments like image-prefix and image-tags
-func addArgs(upgradeOpts UpgradeOpts) []string {
- var result []string
- if upgradeOpts.ImagePrefix != "" {
- result = append(result, fmt.Sprintf("--to-version-image-prefix=%s", upgradeOpts.ImagePrefix))
- }
-
- if upgradeOpts.ImageTag != "" {
- result = append(result, fmt.Sprintf("--to-version-image-tag=%s", upgradeOpts.ImageTag))
- }
-
- return result
-}
-
-// getServiceAccountName returns service account Name for the openEBS resource
-func getServiceAccountName(podList *corev1.PodList) string {
- var serviceAccountName string
- for _, pod := range podList.Items {
- svname := pod.Spec.ServiceAccountName
- if svname != "" {
- serviceAccountName = svname
- }
- }
- return serviceAccountName
-}
-
-// shouldRestartJob prompts if the job should be restarted after deleting
-// the traces of previous one
-func shouldRestartJob(k *client.K8sClient, info jobInfo) error {
- // Provide the option to restart the Job
- shouldStart := util.PromptToStartAgain("Do you want to restart the Job?(no)", false)
- if shouldStart {
- // Delete previous successful task
- if err := startDeletionTask(k, &info); err != nil {
- return err
- }
- } else {
- os.Exit(0)
- }
-
- return nil
-}
diff --git a/pkg/util/checks.go b/pkg/util/checks.go
index 079bf1c5..42b5fd72 100644
--- a/pkg/util/checks.go
+++ b/pkg/util/checks.go
@@ -17,19 +17,9 @@ limitations under the License.
package util
import (
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
-
corev1 "k8s.io/api/core/v1"
)
-// CheckVersion returns a message based on the status of the version
-func CheckVersion(versionDetail v1.VersionDetails) string {
- if string(versionDetail.Status.State) == "Reconciled" || string(versionDetail.Status.State) == "" {
- return versionDetail.Status.Current
- }
- return string(versionDetail.Status.State) + ", desired version " + versionDetail.Desired
-}
-
// CheckForVol is used to check if the we can get the volume, if no volume attachment
// to SC for the corresponding volume is found display error
func CheckForVol(name string, vols map[string]*Volume) *Volume {
@@ -46,7 +36,7 @@ func CheckForVol(name string, vols map[string]*Volume) *Volume {
return errVol
}
-//AccessModeToString Flattens the arrat of AccessModes and returns a string fit to display in the output
+// AccessModeToString Flattens the arrat of AccessModes and returns a string fit to display in the output
func AccessModeToString(accessModeArray []corev1.PersistentVolumeAccessMode) string {
accessModes := ""
for _, mode := range accessModeArray {
diff --git a/pkg/util/checks_test.go b/pkg/util/checks_test.go
index 8783a1dc..8ecf277b 100644
--- a/pkg/util/checks_test.go
+++ b/pkg/util/checks_test.go
@@ -17,42 +17,11 @@ limitations under the License.
package util
import (
- "reflect"
"testing"
corev1 "k8s.io/api/core/v1"
)
-func TestCheckForVol(t *testing.T) {
- type args struct {
- name string
- vols map[string]*Volume
- }
- tests := []struct {
- name string
- args args
- want *Volume
- }{
- {
- "volume_attached_to_storage_class",
- args{name: "cstor_volume", vols: map[string]*Volume{"cstor_volume": {CSIVolumeAttachmentName: "volume_one"}}},
- &Volume{CSIVolumeAttachmentName: "volume_one"},
- },
- {
- "volume_not_attached_to_storage_class",
- args{name: "cstor_volume", vols: map[string]*Volume{"cstor_volume_two": {CSIVolumeAttachmentName: "volume_one"}}},
- &Volume{StorageClass: NotAvailable, Node: NotAvailable, AttachementStatus: NotAvailable, AccessMode: NotAvailable},
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := CheckForVol(tt.args.name, tt.args.vols); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("CheckForVol() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
func TestAccessModeToString(t *testing.T) {
type args struct {
accessModeArray []corev1.PersistentVolumeAccessMode
diff --git a/pkg/util/constant.go b/pkg/util/constant.go
index bbc526da..df2c23ba 100644
--- a/pkg/util/constant.go
+++ b/pkg/util/constant.go
@@ -25,46 +25,22 @@ const (
Unknown = "unknown"
// OpenEBSCasTypeKeySc present in parameter of SC
OpenEBSCasTypeKeySc = "cas-type"
- // CstorCasType cas type name
- CstorCasType = "cstor"
// ZFSCasType cas type name
ZFSCasType = "localpv-zfs"
- // JivaCasType is the cas type name for Jiva
- JivaCasType = "jiva"
// LVMCasType cas type name
LVMCasType = "localpv-lvm"
// LocalPvHostpathCasType cas type name
LocalPvHostpathCasType = "localpv-hostpath"
- // LocalDeviceCasType cas type name
- LocalDeviceCasType = "localpv-device"
// LocalHostpathCasLabel cas-type label in dynamic-localpv-provisioner
LocalHostpathCasLabel = "local-hostpath"
- // Healthy cstor volume status
- Healthy = "Healthy"
// StorageKey key present in pvc status.capacity
StorageKey = "storage"
// NotAvailable shows something is missing, could be a component,
// unknown version, or some other unknowns
NotAvailable = "N/A"
- // CVAVolnameKey present in label of CVA
- CVAVolnameKey = "Volname"
- // UnicodeCross stores the character representation of U+2718
- UnicodeCross = "✘"
- // UnicodeCheck stores the character representation of U+2714
- UnicodeCheck = "✔"
- // NotFound stores the Not Found Status
- NotFound = "Not Found"
- // CVANotAttached stores CVA Not Attached status
- CVANotAttached = "Not Attached to Application"
- // Attached stores CVA Attached Status
- Attached = "Attached"
)
const (
- // CStorCSIDriver is the name of CStor CSI driver
- CStorCSIDriver = "cstor.csi.openebs.io"
- // JivaCSIDriver is the name of the Jiva CSI driver
- JivaCSIDriver = "jiva.csi.openebs.io"
// ZFSCSIDriver is the name of the ZFS localpv CSI driver
ZFSCSIDriver = "zfs.csi.openebs.io"
// LocalPVLVMCSIDriver is the name of the LVM LocalPV CSI driver
@@ -74,10 +50,6 @@ const (
// Constant CSI component-name label values
const (
- // CStorCSIControllerLabelValue is the label value of CSI controller STS & pod
- CStorCSIControllerLabelValue = "openebs-cstor-csi-controller"
- // JivaCSIControllerLabelValue is the label value of CSI controller STS & pod
- JivaCSIControllerLabelValue = "openebs-jiva-csi-controller"
// LVMLocalPVcsiControllerLabelValue is the label value of CSI controller STS & pod
LVMLocalPVcsiControllerLabelValue = "openebs-lvm-controller"
// ZFSLocalPVcsiControllerLabelValue is the label value of CSI controller STS & pod
@@ -85,12 +57,6 @@ const (
)
const (
- // CstorComponentNames for the cstor control plane components
- CstorComponentNames = "cspc-operator,cvc-operator,cstor-admission-webhook,openebs-cstor-csi-node,openebs-cstor-csi-controller"
- // NDMComponentNames for the ndm components
- NDMComponentNames = "openebs-ndm-operator,ndm"
- // JivaComponentNames for the jiva control plane components
- JivaComponentNames = "openebs-jiva-csi-node,openebs-jiva-csi-controller,jiva-operator"
// LVMComponentNames for the lvm control plane components
LVMComponentNames = "openebs-lvm-controller,openebs-lvm-node"
// ZFSComponentNames for the zfs control plane components
@@ -101,36 +67,24 @@ const (
var (
// CasTypeAndComponentNameMap stores the component name of the corresponding cas type
- // NOTE: Not including ZFSLocalPV as it'd break existing code
CasTypeAndComponentNameMap = map[string]string{
- CstorCasType: CStorCSIControllerLabelValue,
- JivaCasType: JivaCSIControllerLabelValue,
LVMCasType: LVMLocalPVcsiControllerLabelValue,
ZFSCasType: ZFSLocalPVcsiControllerLabelValue,
LocalPvHostpathCasType: HostpathComponentNames,
}
// ComponentNameToCasTypeMap is a reverse map of CasTypeAndComponentNameMap
- // NOTE: Not including ZFSLocalPV as it'd break existing code
ComponentNameToCasTypeMap = map[string]string{
- CStorCSIControllerLabelValue: CstorCasType,
- JivaCSIControllerLabelValue: JivaCasType,
LVMLocalPVcsiControllerLabelValue: LVMCasType,
ZFSLocalPVcsiControllerLabelValue: ZFSCasType,
HostpathComponentNames: LocalPvHostpathCasType,
}
// ProvsionerAndCasTypeMap stores the cas type name of the corresponding provisioner
ProvsionerAndCasTypeMap = map[string]string{
- CStorCSIDriver: CstorCasType,
- JivaCSIDriver: JivaCasType,
- // NOTE: In near future this might mean all local-pv volumes
LocalPVLVMCSIDriver: LVMCasType,
ZFSCSIDriver: ZFSCasType,
}
// CasTypeToCSIProvisionerMap stores the provisioner of corresponding cas-types
CasTypeToCSIProvisionerMap = map[string]string{
- CstorCasType: CStorCSIDriver,
- JivaCasType: JivaCSIDriver,
- // NOTE: In near future this might mean all local-pv volumes
LVMCasType: LocalPVLVMCSIDriver,
ZFSCasType: ZFSCSIDriver,
}
@@ -138,43 +92,10 @@ var (
// CasTypeToComponentNamesMap stores the names of the control-plane components of each cas-types.
// To show statuses of new CasTypes, please update this map.
CasTypeToComponentNamesMap = map[string]string{
- CstorCasType: CstorComponentNames + "," + NDMComponentNames,
- JivaCasType: JivaComponentNames + "," + HostpathComponentNames,
LocalPvHostpathCasType: HostpathComponentNames,
- LocalDeviceCasType: HostpathComponentNames + "," + NDMComponentNames,
ZFSCasType: ZFSComponentNames,
LVMCasType: LVMComponentNames,
}
-
- // CstorReplicaColumnDefinations stores the Table headers for CVR Details
- CstorReplicaColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "ZFS Used(compressed)", Type: "string"},
- {Name: "LogicalReferenced", Type: "string"},
- {Name: "Status", Type: "string"},
- {Name: "Age", Type: "string"},
- }
- // PodDetailsColumnDefinations stores the Table headers for Pod Details
- PodDetailsColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Namespace", Type: "string"},
- {Name: "Name", Type: "string"},
- {Name: "Ready", Type: "string"},
- {Name: "Status", Type: "string"},
- {Name: "Age", Type: "string"},
- {Name: "IP", Type: "string"},
- {Name: "Node", Type: "string"},
- }
- // JivaPodDetailsColumnDefinations stores the Table headers for Jiva Pod Details
- JivaPodDetailsColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Namespace", Type: "string"},
- {Name: "Name", Type: "string"},
- {Name: "Mode", Type: "string"},
- {Name: "Node", Type: "string"},
- {Name: "Status", Type: "string"},
- {Name: "IP", Type: "string"},
- {Name: "Ready", Type: "string"},
- {Name: "Age", Type: "string"},
- }
// VolumeListColumnDefinations stores the Table headers for Volume Details
VolumeListColumnDefinations = []metav1.TableColumnDefinition{
{Name: "Namespace", Type: "string"},
@@ -187,66 +108,6 @@ var (
{Name: "Access Mode", Type: "string"},
{Name: "Attached Node", Type: "string"},
}
- // CstorPoolListColumnDefinations stores the Table headers for Cstor Pool Details
- CstorPoolListColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "HostName", Type: "string"},
- {Name: "Free", Type: "string"},
- {Name: "Capacity", Type: "string"},
- {Name: "Read Only", Type: "bool"},
- {Name: "Provisioned Replicas", Type: "int"},
- {Name: "Healthy Replicas", Type: "int"},
- {Name: "Status", Type: "string"},
- {Name: "Age", Type: "string"},
- }
- // BDListColumnDefinations stores the Table headers for Block Device Details
- BDListColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Capacity", Type: "string"},
- {Name: "State", Type: "string"},
- }
- // PoolReplicaColumnDefinations stores the Table headers for Pool Replica Details
- PoolReplicaColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "PVC Name", Type: "string"},
- {Name: "Size", Type: "string"},
- {Name: "State", Type: "string"},
- }
- // CstorBackupColumnDefinations stores the Table headers for Cstor Backup Details
- CstorBackupColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Backup Name", Type: "string"},
- {Name: "Volume Name", Type: "string"},
- {Name: "Backup Destination", Type: "string"},
- {Name: "Snap Name", Type: "string"},
- {Name: "Status", Type: "string"},
- }
- // CstorCompletedBackupColumnDefinations stores the Table headers for Cstor Completed Backup Details
- CstorCompletedBackupColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Backup Name", Type: "string"},
- {Name: "Volume Name", Type: "string"},
- {Name: "Last Snap Name", Type: "string"},
- }
- // CstorRestoreColumnDefinations stores the Table headers for Cstor Restore Details
- CstorRestoreColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Restore Name", Type: "string"},
- {Name: "Volume Name", Type: "string"},
- {Name: "Restore Source", Type: "string"},
- {Name: "Storage Class", Type: "string"},
- {Name: "Status", Type: "string"},
- }
- // BDTreeListColumnDefinations stores the Table headers for Block Device Details, when displayed as tree
- BDTreeListColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Path", Type: "string"},
- {Name: "Size", Type: "string"},
- {Name: "ClaimState", Type: "string"},
- {Name: "Status", Type: "string"},
- {Name: "FsType", Type: "string"},
- {Name: "MountPoint", Type: "string"},
- }
// LVMvolgroupListColumnDefinitions stores the table headers for listing lvm vg-group when displayed as tree
LVMvolgroupListColumnDefinitions = []metav1.TableColumnDefinition{
{Name: "Name", Type: "string"},
@@ -259,38 +120,6 @@ var (
{Name: "FreeSize", Type: "string"},
}
- // JivaReplicaPVCColumnDefinations stores the Table headers for Jiva Replica PVC details
- JivaReplicaPVCColumnDefinations = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Status", Type: "string"},
- {Name: "Volume", Type: "string"},
- {Name: "Capacity", Type: "string"},
- {Name: "Storageclass", Type: "string"},
- {Name: "Age", Type: "string"},
- }
-
- // CstorVolumeCRStatusColumnDefinitions stores the Table headers for Cstor CRs status details
- CstorVolumeCRStatusColumnDefinitions = []metav1.TableColumnDefinition{
- {Name: "Kind", Type: "string"},
- {Name: "Name", Type: "string"},
- {Name: "Status", Type: "string"},
- }
-
- // VolumeTotalAndUsageDetailColumnDefinitions stores the Table headers for volume usage details
- VolumeTotalAndUsageDetailColumnDefinitions = []metav1.TableColumnDefinition{
- {Name: "Total Capacity", Type: "string"},
- {Name: "Used Capacity", Type: "string"},
- {Name: "Available Capacity", Type: "string"},
- }
- // EventsColumnDefinitions stores the Table headers for events details
- EventsColumnDefinitions = []metav1.TableColumnDefinition{
- {Name: "Name", Type: "string"},
- {Name: "Action", Type: "string"},
- {Name: "Reason", Type: "string"},
- {Name: "Message", Type: "string"},
- {Name: "Type", Type: "string"},
- }
-
VersionColumnDefinition = []metav1.TableColumnDefinition{
{Name: "Component", Type: "string"},
{Name: "Version", Type: "string"},
diff --git a/pkg/util/error_test.go b/pkg/util/error_test.go
index c9463d44..8222adc8 100644
--- a/pkg/util/error_test.go
+++ b/pkg/util/error_test.go
@@ -83,9 +83,9 @@ func TestHandleEmptyTableError(t *testing.T) {
args{
resource: "ResourceType",
ns: "InValid",
- casType: "jiva",
+ casType: "zfs",
},
- fmt.Errorf("no jiva ResourceType found in InValid namespace"),
+ fmt.Errorf("no zfs ResourceType found in InValid namespace"),
},
{
"",
diff --git a/pkg/util/k8s_utils.go b/pkg/util/k8s_utils.go
index ec1238f6..090e0964 100644
--- a/pkg/util/k8s_utils.go
+++ b/pkg/util/k8s_utils.go
@@ -19,21 +19,10 @@ package util
import (
"strconv"
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/storage/v1"
)
-// GetUsedCapacityFromCVR as the healthy replicas would have the correct used capacity details
-func GetUsedCapacityFromCVR(cvrList *cstorv1.CStorVolumeReplicaList) string {
- for _, item := range cvrList.Items {
- if item.Status.Phase == Healthy {
- return item.Status.Capacity.Used
- }
- }
- return ""
-}
-
// GetCasType from the v1pv and v1sc, this is a fallback checker method, it checks
// both the resource only if the castype is not found.
func GetCasType(v1PV *corev1.PersistentVolume, v1SC *v1.StorageClass) string {
@@ -106,5 +95,5 @@ func GetReadyContainers(containers []corev1.ContainerStatus) string {
// IsValidCasType to return true if the casType is supported
func IsValidCasType(casType string) bool {
- return casType == CstorCasType || casType == JivaCasType || casType == LVMCasType || casType == ZFSCasType
+ return casType == LVMCasType || casType == ZFSCasType
}
diff --git a/pkg/util/k8s_utils_test.go b/pkg/util/k8s_utils_test.go
index 45d03d74..ed8a0b13 100644
--- a/pkg/util/k8s_utils_test.go
+++ b/pkg/util/k8s_utils_test.go
@@ -19,181 +19,9 @@ package util
import (
"testing"
- cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/api/storage/v1"
)
-var (
- inAnnotationPV, inLabelsPV = cstorPV1, cstorPV1
-)
-
-func TestGetCasType(t *testing.T) {
- inAnnotationPV.Annotations = map[string]string{"openebs.io/cas-type": "cstor"}
- inLabelsPV.Labels = map[string]string{"openebs.io/cas-type": "cstor"}
- type args struct {
- v1PV *corev1.PersistentVolume
- v1SC *v1.StorageClass
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- "PV and SC both present",
- args{
- v1PV: &cstorPV1,
- v1SC: &cstorSC,
- },
- "cstor",
- },
- {
- "PV present and SC absent",
- args{
- v1PV: &cstorPV1,
- v1SC: nil,
- },
- "cstor",
- },
- {
- "PV present and SC absent",
- args{
- v1PV: &inLabelsPV,
- v1SC: nil,
- },
- "cstor",
- },
- {
- "PV present and SC absent",
- args{
- v1PV: &inAnnotationPV,
- v1SC: nil,
- },
- "cstor",
- },
- {
- "PV absent and SC present",
- args{
- v1PV: nil,
- v1SC: &cstorSC,
- },
- "cstor",
- },
- {
- "PV absent and SC present",
- args{
- v1PV: nil,
- v1SC: &jivaSC,
- },
- "jiva",
- },
- {
- "Both PV and SC absent",
- args{
- v1PV: nil,
- v1SC: nil,
- },
- "unknown",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetCasType(tt.args.v1PV, tt.args.v1SC); got != tt.want {
- t.Errorf("GetCasType() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetCasTypeFromPV(t *testing.T) {
- inAnnotationPV.Annotations = map[string]string{"openebs.io/cas-type": "cstor"}
- inLabelsPV.Labels = map[string]string{"openebs.io/cas-type": "cstor"}
- type args struct {
- v1PV *corev1.PersistentVolume
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- "From volume attributes",
- args{
- v1PV: &cstorPV1,
- },
- "cstor",
- },
- {
- "From labels",
- args{
- v1PV: &inLabelsPV,
- },
- "cstor",
- },
- {
- "From annotations",
- args{
- v1PV: &inAnnotationPV,
- },
- "cstor",
- },
- {
- "nil pv",
- args{
- v1PV: nil,
- },
- "unknown",
- },
- {
- "zfs pv, from CSI driver",
- args{v1PV: &zfspv},
- "localpv-zfs",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetCasTypeFromPV(tt.args.v1PV); got != tt.want {
- t.Errorf("GetCasTypeFromPV() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetCasTypeFromSC(t *testing.T) {
- type args struct {
- v1SC *v1.StorageClass
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- "From provisioner",
- args{v1SC: &cstorSC},
- "cstor",
- },
- {
- "From parameters",
- args{v1SC: &jivaSC},
- "jiva",
- },
- {
- "SC nil",
- args{v1SC: nil},
- "unknown",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetCasTypeFromSC(tt.args.v1SC); got != tt.want {
- t.Errorf("GetCasTypeFromSC() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
func TestGetReadyContainers(t *testing.T) {
type args struct {
containers []corev1.ContainerStatus
@@ -223,52 +51,6 @@ func TestGetReadyContainers(t *testing.T) {
}
}
-func TestGetUsedCapacityFromCVR(t *testing.T) {
- type args struct {
- cvrList *cstorv1.CStorVolumeReplicaList
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {
- "Valid values",
- args{cvrList: &cstorv1.CStorVolumeReplicaList{Items: []cstorv1.CStorVolumeReplica{{Status: cstorv1.CStorVolumeReplicaStatus{
- Phase: "Init",
- Capacity: cstorv1.CStorVolumeReplicaCapacityDetails{Total: "5.0 GiB", Used: "2.1 GiB"},
- }}, {Status: cstorv1.CStorVolumeReplicaStatus{
- Phase: "Healthy",
- Capacity: cstorv1.CStorVolumeReplicaCapacityDetails{Total: "5.0 GiB", Used: "2.5 GiB"},
- }}}}},
- "2.5 GiB",
- },
- {
- "Valid values",
- args{cvrList: &cstorv1.CStorVolumeReplicaList{Items: []cstorv1.CStorVolumeReplica{{Status: cstorv1.CStorVolumeReplicaStatus{
- Phase: "Init",
- Capacity: cstorv1.CStorVolumeReplicaCapacityDetails{Total: "5.0 GiB", Used: "2.5 GiB"},
- }}, {Status: cstorv1.CStorVolumeReplicaStatus{
- Phase: "Init",
- Capacity: cstorv1.CStorVolumeReplicaCapacityDetails{Total: "5.0 GiB", Used: "2.5 GiB"},
- }}}}},
- "",
- },
- {
- "Valid values",
- args{cvrList: &cstorv1.CStorVolumeReplicaList{Items: []cstorv1.CStorVolumeReplica{}}},
- "",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetUsedCapacityFromCVR(tt.args.cvrList); got != tt.want {
- t.Errorf("GetUsedCapacityFromCVR() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
func TestIsValidCasType(t *testing.T) {
type args struct {
casType string
@@ -280,7 +62,7 @@ func TestIsValidCasType(t *testing.T) {
}{
{
"Valid Cas Name",
- args{casType: CstorCasType},
+ args{casType: LVMCasType},
true,
},
{
diff --git a/pkg/util/testdata_test.go b/pkg/util/testdata_test.go
deleted file mode 100644
index eb758504..00000000
--- a/pkg/util/testdata_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package util
-
-import (
- "time"
-
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
- corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/api/storage/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var (
- fourGigiByte = resource.MustParse("4Gi")
- cstorScName = "cstor-sc"
- cstorVolumeMode = corev1.PersistentVolumeFilesystem
-
- cstorPV1 = corev1.PersistentVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: corev1.PersistentVolumeSpec{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- ClaimRef: &corev1.ObjectReference{
- Namespace: "default",
- Name: "cstor-pvc-1",
- },
- PersistentVolumeReclaimPolicy: "Retain",
- StorageClassName: cstorScName,
- VolumeMode: &cstorVolumeMode,
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{
- Driver: "cstor.csi.openebs.io", VolumeAttributes: map[string]string{"openebs.io/cas-type": "cstor"},
- }},
- },
- Status: corev1.PersistentVolumeStatus{Phase: corev1.VolumeBound},
- }
- zfspv = corev1.PersistentVolume{
- Spec: corev1.PersistentVolumeSpec{
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{Driver: ZFSCSIDriver}}}}
-
- cstorSC = v1.StorageClass{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-sc",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- },
- Provisioner: "cstor.csi.openebs.io",
- }
-
- jivaSC = v1.StorageClass{
- ObjectMeta: metav1.ObjectMeta{
- Name: "jiva-sc",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- },
- Provisioner: "jiva.csi.openebs.io",
- Parameters: map[string]string{"cas-type": "jiva"},
- }
-)
diff --git a/pkg/util/types.go b/pkg/util/types.go
index 4210453b..e4464f1b 100644
--- a/pkg/util/types.go
+++ b/pkg/util/types.go
@@ -17,12 +17,10 @@ limitations under the License.
package util
import (
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/api/v2/pkg/apis/openebs.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
)
-//Volume struct will have all the details we want to give in the output for
+// Volume struct will have all the details we want to give in the output for
// openebsctl commands
type Volume struct {
// AccessModes contains all ways the volume can be mounted
@@ -31,59 +29,34 @@ type Volume struct {
AttachementStatus string
// Represents the actual capacity of the underlying volume.
Capacity string
- // CStorPoolCluster that this volume belongs to
- CSPC string
- // The unique volume name returned by the CSI volume plugin to
- // refer to the volume on all subsequent calls.
- CSIVolumeAttachmentName string
- Name string
+ Name string
//Namespace defines the space within each name must be unique.
// An empty namespace is equivalent to the "default" namespace
Namespace string
Node string
// Name of the PVClaim of the underlying Persistent Volume
PVC string
- // Status of the CStor Volume
- Status v1.CStorVolumePhase
// Name of StorageClass to which this persistent volume belongs.
StorageClass string
- // will be cStorVolume for all cStor volumes
- VolType string
// version of the spec used to create the volumes
Version string
}
-//VolumeInfo struct will have all the details we want to give in the output for
+// VolumeInfo struct will have all the details we want to give in the output for
// openebsctl command volume describe
type VolumeInfo struct {
AccessMode string
// Capacity of the underlying PV
Capacity string
- // CStorPoolCluster that the volume belongs to
- CSPC string
- // cStor Instance Driver
- CSIDriver string
- CSIVolumeAttachmentName string
// Name of the volume & Namespace on which it exists
- Name string
- Namespace string
- // Name of the underlying PVC
- PVC string
- // ReplicationFactor represents number of volume replica created during
- // volume provisioning connect to the target
- ReplicaCount int
+ Name string
+ PVC string
// Phase indicates if a volume is available, bound to a claim, or released
// by a claim.
VolumePhase corev1.PersistentVolumePhase
// Name of StorageClass to which this persistent volume belongs.
StorageClass string
- // Version of the OpenEBS resource definition being used
- Version string
- Size string
- // Status of the CStor volume
- Status string
- // JVP is the name of the JivaVolumePolicy
- JVP string
+ Size string
}
type LocalHostPathVolInfo struct {
@@ -93,65 +66,6 @@ type LocalHostPathVolInfo struct {
CasType string
}
-// PortalInfo keep info about the ISCSI Target Portal.
-type PortalInfo struct {
- // Target iSCSI Qualified Name.combination of nodeBase
- IQN string
- VolumeName string
- // iSCSI Target Portal. The Portal is combination of IP:port
- // (typically TCP ports 3260)
- Portal string
- // TargetIP IP of the iSCSI target service
- TargetIP string
- //Node Name on which the application pod is running
- TargetNodeName string
-}
-
-// CStorReplicaInfo holds information about the cStor replicas
-type CStorReplicaInfo struct {
- // Replica name present on ObjectMetadata
- Name string
- // Node on which the replica is present
- NodeName string
- ID v1.ReplicaID
- //Replica Status reflects the phase, i.e hold result of last action.
- // ec. Healthy, Offline ,Degraded etc.
- Status string
-}
-
-// CstorPVCInfo struct will have all the details we want to give in the output for describe pvc
-// details section for cstor pvc
-type CstorPVCInfo struct {
- Name string
- Namespace string
- CasType string
- BoundVolume string
- AttachedToNode string
- Pool string
- StorageClassName string
- Size string
- Used string
- CVStatus v1.CStorVolumePhase
- PVStatus corev1.PersistentVolumePhase
- MountPods string
-}
-
-// JivaPVCInfo struct will have all the details we want to give in the output for describe pvc
-// details section for jiva pvc
-type JivaPVCInfo struct {
- Name string
- Namespace string
- CasType string
- BoundVolume string
- AttachedToNode string
- JVP string
- StorageClassName string
- Size string
- JVStatus string
- PVStatus corev1.PersistentVolumePhase
- MountPods string
-}
-
// LVMPVCInfo struct will have all the details we want to give in the output for describe pvc
// details section for lvm pvc
type LVMPVCInfo struct {
@@ -179,7 +93,7 @@ type ZFSPVCInfo struct {
}
// PVCInfo struct will have all the details we want to give in the output for describe pvc
-// details section for non-cstor pvc
+// details section for generic pvc
type PVCInfo struct {
Name string
Namespace string
@@ -191,35 +105,6 @@ type PVCInfo struct {
MountPods string
}
-// PoolInfo struct will have all the details we want to give in the output for describe pool
-// details section for cstor pool instance
-type PoolInfo struct {
- Name string
- HostName string
- Size string
- FreeCapacity string
- ReadOnlyStatus bool
- Status v1.CStorPoolInstancePhase
- RaidType string
-}
-
-// BlockDevicesInfoInPool struct will have all the details we want to give in the output for describe pool
-// details section for block devices in the cstor pool instance
-type BlockDevicesInfoInPool struct {
- Name string
- Capacity uint64
- State v1alpha1.BlockDeviceState
-}
-
-// CVRInfo struct will have all the details we want to give in the output for describe pool
-// details section for provisional replicas in the cstor pool instance
-type CVRInfo struct {
- Name string
- PvcName string
- Size string
- Status v1.CStorVolumeReplicaPhase
-}
-
// MapOptions struct to get the resources as Map with the provided options
// Key defines what to use as a key, ex:- name, label, currently these two are supported, add more according to need.
// LabelKey defines which Label to use as key.
@@ -245,21 +130,6 @@ const (
Label Key = "label"
)
-// CstorVolumeResources would contain all the resources needed for debugging a Cstor Volume
-type CstorVolumeResources struct {
- PV *corev1.PersistentVolume
- PVC *corev1.PersistentVolumeClaim
- CV *v1.CStorVolume
- CVC *v1.CStorVolumeConfig
- CVA *v1.CStorVolumeAttachment
- CVRs *v1.CStorVolumeReplicaList
- PresentBDs *v1alpha1.BlockDeviceList
- ExpectedBDs map[string]bool
- BDCs *v1alpha1.BlockDeviceClaimList
- CSPIs *v1.CStorPoolInstanceList
- CSPC *v1.CStorPoolCluster
-}
-
// ZFSVolDesc is the output helper for ZfsVolDesc
type ZFSVolDesc struct {
Name string
diff --git a/pkg/volume/cstor.go b/pkg/volume/cstor.go
deleted file mode 100644
index 8e1665ec..00000000
--- a/pkg/volume/cstor.go
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "fmt"
- "os"
- "time"
-
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
- "k8s.io/cli-runtime/pkg/printers"
-
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- cstorVolInfoTemplate = `
-{{.Name}} Details :
------------------
-NAME : {{.Name}}
-ACCESS MODE : {{.AccessMode}}
-CSI DRIVER : {{.CSIDriver}}
-STORAGE CLASS : {{.StorageClass}}
-VOLUME PHASE : {{.VolumePhase }}
-VERSION : {{.Version}}
-CSPC : {{.CSPC}}
-SIZE : {{.Size}}
-STATUS : {{.Status}}
-REPLICA COUNT : {{.ReplicaCount}}
-`
- cstorPortalTemplate = `
-Portal Details :
-------------------
-IQN : {{.IQN}}
-VOLUME NAME : {{.VolumeName}}
-TARGET NODE NAME : {{.TargetNodeName}}
-PORTAL : {{.Portal}}
-TARGET IP : {{.TargetIP}}
-`
-)
-
-// GetCStor returns a list of CStor volumes
-func GetCStor(c *client.K8sClient, pvList *corev1.PersistentVolumeList, openebsNS string) ([]metav1.TableRow, error) {
- var (
- cvMap map[string]v1.CStorVolume
- cvaMap map[string]v1.CStorVolumeAttachment
- )
- // no need to proceed if CVs/CVAs don't exist
- var err error
- _, cvMap, err = c.GetCVs(nil, util.Map, "", util.MapOptions{
- Key: util.Name})
- if err != nil {
- return nil, fmt.Errorf("failed to list CStorVolumes")
- }
- _, cvaMap, err = c.GetCVAs(util.Map, "", util.MapOptions{
- Key: util.Label,
- LabelKey: "Volname"})
- if err != nil {
- return nil, fmt.Errorf("failed to list CStorVolumeAttachments")
- }
- var rows []metav1.TableRow
- // 3. Show the required ones
- for _, pv := range pvList.Items {
- var attachedNode, storageVersion, customStatus, ns string
- // TODO: Estimate the cas-type and decide to print it out
- // Should all AccessModes be shown in a csv format, or the highest be displayed ROO < RWO < RWX?
- // 2. For eligible PVs fetch the custom-resource to add more info
- if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == util.CStorCSIDriver {
- // For all CSI CStor PV, there exist a CV
- cv, ok := cvMap[pv.Name]
- if !ok {
- // condition not possible
- _, _ = fmt.Fprintf(os.Stderr, "couldn't find cv "+pv.Name)
- }
- ns = cv.Namespace
- if openebsNS != "" && openebsNS != ns {
- continue
- }
- customStatus = string(cv.Status.Phase)
- storageVersion = cv.VersionDetails.Status.Current
- cva, cvaOk := cvaMap[pv.Name]
- if cvaOk {
- attachedNode = cva.Labels["nodeID"]
- }
- // TODO: What should be done for multiple AccessModes
- accessMode := pv.Spec.AccessModes[0]
- rows = append(rows, metav1.TableRow{
- Cells: []interface{}{
- ns, pv.Name, customStatus, storageVersion, util.ConvertToIBytes(pv.Spec.Capacity.Storage().String()), pv.Spec.StorageClassName, pv.Status.Phase,
- accessMode, attachedNode}})
- }
- }
- return rows, nil
-}
-
-// DescribeCstorVolume describes a cstor storage engine PersistentVolume
-func DescribeCstorVolume(c *client.K8sClient, vol *corev1.PersistentVolume) error {
- // Fetch all details of a volume is called to get the volume controller's
- // info such as controller's IP, status, iqn, replica IPs etc.
- // 1. cStor volume info
- volumeInfo, err := c.GetCV(vol.Name)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "failed to get cStorVolume for %s\n", vol.Name)
- return err
- }
- // 2. cStor Volume Config
- cvcInfo, err := c.GetCVC(vol.Name)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "failed to get cStor Volume config for %s\n", vol.Name)
- return err
- }
- // 3. Get Node for Target Pod from the openebs-ns
- node, err := c.GetCVA(util.CVAVolnameKey + "=" + vol.Name)
- var nodeName string
- if err != nil {
- nodeName = util.NotAvailable
- _, _ = fmt.Fprintf(os.Stderr, "failed to get CStorVolumeAttachments for %s\n", vol.Name)
- } else {
- nodeName = node.Spec.Volume.OwnerNodeID
- }
-
- // 5. cStor Volume Replicas
- cvrInfo, _ := c.GetCVRs(cstortypes.PersistentVolumeLabelKey + "=" + vol.Name)
- if len(cvrInfo.Items) == 0 {
- _, _ = fmt.Fprintf(os.Stderr, "failed to get cStor Volume Replicas for %s\n", vol.Name)
- }
-
- cSPCLabel := cstortypes.CStorPoolClusterLabelKey
- volume := util.VolumeInfo{
- AccessMode: util.AccessModeToString(vol.Spec.AccessModes),
- Capacity: volumeInfo.Status.Capacity.String(),
- CSPC: cvcInfo.Labels[cSPCLabel],
- CSIDriver: vol.Spec.CSI.Driver,
- CSIVolumeAttachmentName: vol.Spec.CSI.VolumeHandle,
- Name: volumeInfo.Name,
- Namespace: volumeInfo.Namespace,
- PVC: vol.Spec.ClaimRef.Name,
- ReplicaCount: volumeInfo.Spec.ReplicationFactor,
- VolumePhase: vol.Status.Phase,
- StorageClass: vol.Spec.StorageClassName,
- Version: util.CheckVersion(volumeInfo.VersionDetails),
- Size: util.ConvertToIBytes(volumeInfo.Status.Capacity.String()),
- Status: string(volumeInfo.Status.Phase),
- }
-
- // Print the output for the portal status info
- _ = util.PrintByTemplate("volume", cstorVolInfoTemplate, volume)
-
- portalInfo := util.PortalInfo{
- IQN: volumeInfo.Spec.Iqn,
- VolumeName: volumeInfo.Name,
- Portal: volumeInfo.Spec.TargetPortal,
- TargetIP: volumeInfo.Spec.TargetIP,
- TargetNodeName: nodeName,
- }
-
- // Print the output for the portal status info
- _ = util.PrintByTemplate("PortalInfo", cstorPortalTemplate, portalInfo)
-
- replicaCount := volumeInfo.Spec.ReplicationFactor
- // This case will occur only if user has manually specified zero replica.
- // or if none of the replicas are healthy & running
- if replicaCount == 0 || len(volumeInfo.Status.ReplicaStatuses) == 0 {
- fmt.Printf("\nNone of the replicas are running\n")
- //please check the volume pod's status by running [kubectl describe pvc -l=openebs/replica --all-namespaces]\Oor try again later.")
- return nil
- }
-
- // Print replica details
- if cvrInfo != nil && len(cvrInfo.Items) > 0 {
- fmt.Printf("\nReplica Details :\n-----------------\n")
- var rows []metav1.TableRow
- for _, cvr := range cvrInfo.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- cvr.Name,
- util.ConvertToIBytes(cvr.Status.Capacity.Total),
- util.ConvertToIBytes(cvr.Status.Capacity.Used),
- cvr.Status.Phase,
- util.Duration(time.Since(cvr.ObjectMeta.CreationTimestamp.Time))}})
- }
- util.TablePrinter(util.CstorReplicaColumnDefinations, rows, printers.PrintOptions{Wide: true})
- }
-
- cStorBackupList, _ := c.GetCVBackups(cstortypes.PersistentVolumeLabelKey + "=" + vol.Name)
- if cStorBackupList != nil {
- fmt.Printf("\nCstor Backup Details :\n" + "---------------------\n")
- var rows []metav1.TableRow
- for _, item := range cStorBackupList.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- item.ObjectMeta.Name,
- item.Spec.BackupName,
- item.Spec.VolumeName,
- item.Spec.BackupDest,
- item.Spec.SnapName,
- item.Status,
- }})
- }
- util.TablePrinter(util.CstorBackupColumnDefinations, rows, printers.PrintOptions{Wide: true})
- }
-
- cstorCompletedBackupList, _ := c.GetCVCompletedBackups(cstortypes.PersistentVolumeLabelKey + "=" + vol.Name)
- if cstorCompletedBackupList != nil {
- fmt.Printf("\nCstor Completed Backup Details :" + "\n-------------------------------\n")
- var rows []metav1.TableRow
- for _, item := range cstorCompletedBackupList.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- item.Name,
- item.Spec.BackupName,
- item.Spec.VolumeName,
- item.Spec.LastSnapName,
- }})
- }
- util.TablePrinter(util.CstorCompletedBackupColumnDefinations, rows, printers.PrintOptions{Wide: true})
- }
-
- cStorRestoreList, _ := c.GetCVRestores(cstortypes.PersistentVolumeLabelKey + "=" + vol.Name)
- if cStorRestoreList != nil {
- fmt.Printf("\nCstor Restores Details :" + "\n-----------------------\n")
- var rows []metav1.TableRow
- for _, item := range cStorRestoreList.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- item.ObjectMeta.Name,
- item.Spec.RestoreName,
- item.Spec.VolumeName,
- item.Spec.RestoreSrc,
- item.Spec.StorageClass,
- item.Spec.Size.String(),
- item.Status,
- }})
- }
- util.TablePrinter(util.CstorRestoreColumnDefinations, rows, printers.PrintOptions{Wide: true})
- }
- fmt.Println()
- return nil
-}
diff --git a/pkg/volume/cstor_test.go b/pkg/volume/cstor_test.go
deleted file mode 100644
index ff7fe226..00000000
--- a/pkg/volume/cstor_test.go
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "reflect"
- "testing"
-
- openebsFakeClientset "github.com/openebs/api/v2/pkg/client/clientset/versioned/fake"
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func TestDescribeCstorVolume(t *testing.T) {
- cvRep := cv1
- cvRep.Spec.ReplicationFactor = 0
- cvRep.Status.ReplicaStatuses = nil
- type args struct {
- c *client.K8sClient
- vol *corev1.PersistentVolume
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- name: "All Valid Values",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- vol: &cstorPV1,
- },
- wantErr: false,
- },
- {
- name: "All Valid Values with CV absent",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv2, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- vol: &cstorPV1,
- },
- wantErr: true,
- },
- {
- name: "All Valid Values with CVC absent",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- vol: &cstorPV1,
- },
- wantErr: true,
- },
- {
- name: "All Valid Values with CVA absent",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- vol: &cstorPV1,
- },
- wantErr: false,
- },
- {
- name: "All Valid Values with CVRs absent",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva2, &cvc1, &cvc2, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- vol: &cstorPV1,
- },
- wantErr: false,
- },
- {
- name: "All Valid Values with CVR count as 0",
- args: args{
- c: &client.K8sClient{
- Ns: "cstor",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cvRep, &cv2, &cva2, &cvc1, &cvc2, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- vol: &cstorPV1,
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := DescribeCstorVolume(tt.args.c, tt.args.vol); (err != nil) != tt.wantErr {
- t.Errorf("DescribeCstorVolume() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGetCStor(t *testing.T) {
- type args struct {
- c *client.K8sClient
- pvList *corev1.PersistentVolumeList
- openebsNS string
- }
- tests := []struct {
- name string
- args args
- want []metav1.TableRow
- wantErr bool
- }{
- {
- name: "Test with all valid resources present.",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1, cstorPV2}},
- openebsNS: "cstor",
- },
- want: []metav1.TableRow{{Cells: []interface{}{
- "cstor", "pvc-1", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-1"},
- }, {Cells: []interface{}{
- "cstor", "pvc-2", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-2"},
- }},
- wantErr: false,
- },
- {
- name: "Test with one of the required cv not present",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cva1, &cva2, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1, cstorPV2}},
- openebsNS: "cstor",
- },
- want: []metav1.TableRow{{Cells: []interface{}{
- "cstor", "pvc-1", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-1"},
- }},
- wantErr: false,
- },
- {
- name: "Test with one of the required cva not present, i.e node cannot be determined",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cvc1, &cvc2, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1, cstorPV2}},
- openebsNS: "cstor",
- },
- want: []metav1.TableRow{{Cells: []interface{}{
- "cstor", "pvc-1", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-1"},
- }, {Cells: []interface{}{
- "cstor", "pvc-2", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, ""},
- }},
- wantErr: false,
- },
- {
- name: "Test with one of the required cvc not present, i.e nothing should break in code",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvr1, &cvr2, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1, cstorPV2}},
- openebsNS: "cstor",
- },
- want: []metav1.TableRow{{Cells: []interface{}{
- "cstor", "pvc-1", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-1"},
- }, {Cells: []interface{}{
- "cstor", "pvc-2", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-2"},
- }},
- wantErr: false,
- },
- {
- name: "Test with two of the required cvrs not present, i.e nothing should break in code",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvr3, &cvr4, &cbkp, &ccbkp, &crestore),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1, cstorPV2}},
- openebsNS: "cstor",
- },
- want: []metav1.TableRow{{Cells: []interface{}{
- "cstor", "pvc-1", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-1"},
- }, {Cells: []interface{}{
- "cstor", "pvc-2", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-2"},
- }},
- wantErr: false,
- },
- {
- name: "Test with backup and restore crs not present, i.e nothing should break in code",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1, &cv2, &cva1, &cva2, &cvc1, &cvr3, &cvr4),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1, cstorPV2}},
- openebsNS: "cstor",
- },
- want: []metav1.TableRow{{Cells: []interface{}{
- "cstor", "pvc-1", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-1"},
- }, {Cells: []interface{}{
- "cstor", "pvc-2", util.Healthy, "2.11.0", "4.0GiB", "cstor-sc", corev1.VolumeBound, corev1.ReadWriteOnce, "node-2"},
- }},
- wantErr: false,
- },
- {
- name: "Test with none of the underlying cstor crs",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1}},
- openebsNS: "cstor",
- },
- want: nil,
- wantErr: true,
- },
- {
- name: "Test with none of the underlying cvas are present",
- args: args{
- c: &client.K8sClient{
- Ns: "",
- K8sCS: fake.NewSimpleClientset(&cstorPV1, &cstorPV2, &cstorPVC1, &cstorPVC2, &nsCstor),
- OpenebsCS: openebsFakeClientset.NewSimpleClientset(&cv1),
- },
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{cstorPV1}},
- openebsNS: "cstor",
- },
- want: nil,
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := GetCStor(tt.args.c, tt.args.pvList, tt.args.openebsNS)
- if (err != nil) != tt.wantErr {
- t.Errorf("GetCStor() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("GetCStor() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/pkg/volume/jiva.go b/pkg/volume/jiva.go
deleted file mode 100644
index abf905d5..00000000
--- a/pkg/volume/jiva.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "fmt"
- "os"
- "strings"
- "time"
-
- "k8s.io/cli-runtime/pkg/printers"
-
- "github.com/openebs/openebsctl/pkg/client"
- "github.com/openebs/openebsctl/pkg/util"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- // JivaVolInfoTemplate to store the jiva volume and pvc describe related details
- JivaVolInfoTemplate = `
-{{.Name}} Details :
------------------
-NAME : {{.Name}}
-ACCESS MODE : {{.AccessMode}}
-CSI DRIVER : {{.CSIDriver}}
-STORAGE CLASS : {{.StorageClass}}
-VOLUME PHASE : {{.VolumePhase }}
-VERSION : {{.Version}}
-JVP : {{.JVP}}
-SIZE : {{.Size}}
-STATUS : {{.Status}}
-REPLICA COUNT : {{.ReplicaCount}}
-
-`
- // JivaPortalTemplate to store the portal details for jiva volume and pvc
- JivaPortalTemplate = `
-Portal Details :
-------------------
-IQN : {{.spec.iscsiSpec.iqn}}
-VOLUME NAME : {{.metadata.name}}
-TARGET NODE NAME : {{.metadata.labels.nodeID}}
-PORTAL : {{.spec.iscsiSpec.targetIP}}:{{.spec.iscsiSpec.targetPort}}
-
-`
-)
-
-// GetJiva returns a list of JivaVolumes
-func GetJiva(c *client.K8sClient, pvList *corev1.PersistentVolumeList, openebsNS string) ([]metav1.TableRow, error) {
- // 1. Fetch all relevant volume CRs without worrying about openebsNS
- _, jvMap, err := c.GetJVs(nil, util.Map, "", util.MapOptions{Key: util.Name})
- if err != nil {
- return nil, fmt.Errorf("failed to list JivaVolumes")
- }
- var rows []metav1.TableRow
- // 3. Show the required ones
- for _, pv := range pvList.Items {
- name := pv.Name
- capacity := pv.Spec.Capacity.Storage()
- sc := pv.Spec.StorageClassName
- attached := pv.Status.Phase
- var attachedNode, storageVersion, customStatus, ns string
- // Should all AccessModes be shown in a csv format, or the highest be displayed ROO < RWO < RWX?
- if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == util.JivaCSIDriver {
- jv, ok := jvMap[pv.Name]
- if !ok {
- _, _ = fmt.Fprintln(os.Stderr, "couldn't find jv "+pv.Name)
- }
- ns = jv.Namespace
- if openebsNS != "" && openebsNS != ns {
- continue
- }
- customStatus = jv.Status.Status // RW, RO, etc
- attachedNode = jv.Labels["nodeID"]
- storageVersion = jv.VersionDetails.Status.Current
- } else {
- // Skip non-Jiva options
- continue
- }
- accessMode := pv.Spec.AccessModes[0]
- rows = append(rows, metav1.TableRow{
- Cells: []interface{}{
- ns, name, customStatus, storageVersion, capacity, sc, attached,
- accessMode, attachedNode},
- })
- }
- return rows, nil
-}
-
-// DescribeJivaVolume describes a jiva storage engine PersistentVolume
-func DescribeJivaVolume(c *client.K8sClient, vol *corev1.PersistentVolume) error {
- // 1. Get the JivaVolume Corresponding to the pv name
- jv, err := c.GetJV(vol.Name)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "failed to get JivaVolume for %s\n", vol.Name)
- return err
- }
- // 2. Fill in JivaVolume related details
- jivaVolInfo := util.VolumeInfo{
- AccessMode: util.AccessModeToString(vol.Spec.AccessModes),
- Capacity: util.ConvertToIBytes(vol.Spec.Capacity.Storage().String()),
- CSIDriver: vol.Spec.CSI.Driver,
- Name: jv.Name,
- Namespace: jv.Namespace,
- PVC: vol.Spec.ClaimRef.Name,
- ReplicaCount: jv.Spec.Policy.Target.ReplicationFactor,
- VolumePhase: vol.Status.Phase,
- StorageClass: vol.Spec.StorageClassName,
- Version: jv.VersionDetails.Status.Current,
- Size: util.ConvertToIBytes(vol.Spec.Capacity.Storage().String()),
- Status: jv.Status.Status,
- JVP: jv.Annotations["openebs.io/volume-policy"],
- }
- // 3. Print the Volume information
- _ = util.PrintByTemplate("jivaVolumeInfo", JivaVolInfoTemplate, jivaVolInfo)
- // 4. Print the Portal Information
- util.TemplatePrinter(JivaPortalTemplate, jv)
-
- replicaPodIPAndModeMap := make(map[string]string)
- // Create Replica IP to Mode Map
- if jv.Status.ReplicaStatuses != nil && len(jv.Status.ReplicaStatuses) != 0 {
- for _, replicaStatus := range jv.Status.ReplicaStatuses {
- replicaPodIPAndModeMap[strings.Split(replicaStatus.Address, ":")[1][2:]] = replicaStatus.Mode
- }
- }
-
- // 5. Fetch the Jiva controller and replica pod details
- podList, err := c.GetJVTargetPod(vol.Name)
- if err == nil {
- fmt.Println("Controller and Replica Pod Details :")
- fmt.Println("-----------------------------------")
- var rows []metav1.TableRow
- for _, pod := range podList.Items {
- if strings.Contains(pod.Name, "-ctrl-") {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- pod.Namespace, pod.Name, jv.Status.Status,
- pod.Spec.NodeName, pod.Status.Phase, pod.Status.PodIP,
- util.GetReadyContainers(pod.Status.ContainerStatuses),
- util.Duration(time.Since(pod.ObjectMeta.CreationTimestamp.Time))}})
- } else {
- if val, ok := replicaPodIPAndModeMap[pod.Status.PodIP]; ok {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- pod.Namespace, pod.Name, val,
- pod.Spec.NodeName, pod.Status.Phase, pod.Status.PodIP,
- util.GetReadyContainers(pod.Status.ContainerStatuses),
- util.Duration(time.Since(pod.ObjectMeta.CreationTimestamp.Time))}})
- }
- }
- }
- util.TablePrinter(util.JivaPodDetailsColumnDefinations, rows, printers.PrintOptions{Wide: true})
- } else {
- fmt.Println("Controller and Replica Pod Details :")
- fmt.Println("-----------------------------------")
- fmt.Println("No Controller and Replica pod exists for the JivaVolume")
- }
- // 6. Fetch the replica PVCs and create rows for cli-runtime
- var rows []metav1.TableRow
- pvcList, err := c.GetPVCs(c.Ns, nil, "openebs.io/component=jiva-replica,openebs.io/persistent-volume="+jv.Name)
- if err != nil || len(pvcList.Items) == 0 {
- fmt.Printf("No replicas found for the JivaVolume %s", vol.Name)
- return nil
- }
- for _, pvc := range pvcList.Items {
- rows = append(rows, metav1.TableRow{Cells: []interface{}{
- pvc.Name,
- pvc.Status.Phase,
- pvc.Spec.VolumeName,
- util.ConvertToIBytes(pvc.Spec.Resources.Requests.Storage().String()),
- *pvc.Spec.StorageClassName,
- util.Duration(time.Since(pvc.ObjectMeta.CreationTimestamp.Time)),
- pvc.Spec.VolumeMode}})
- }
- // 6. Print the replica details if present
- fmt.Println()
- fmt.Println("Replica Data Volume Details :")
- fmt.Println("-----------------------------")
- util.TablePrinter(util.JivaReplicaPVCColumnDefinations, rows, printers.PrintOptions{Wide: true})
- return nil
-}
diff --git a/pkg/volume/jiva_test.go b/pkg/volume/jiva_test.go
deleted file mode 100644
index 48631eff..00000000
--- a/pkg/volume/jiva_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright 2020-2022 The OpenEBS Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "reflect"
- "testing"
-
- "github.com/openebs/openebsctl/pkg/client"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func TestDescribeJivaVolume(t *testing.T) {
- type args struct {
- c *client.K8sClient
- vol *corev1.PersistentVolume
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := DescribeJivaVolume(tt.args.c, tt.args.vol); (err != nil) != tt.wantErr {
- t.Errorf("DescribeJivaVolume() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGetJiva(t *testing.T) {
- type args struct {
- c *client.K8sClient
- pvList *corev1.PersistentVolumeList
- openebsNS string
- }
- tests := []struct {
- name string
- args args
- want []metav1.TableRow
- wantErr bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := GetJiva(tt.args.c, tt.args.pvList, tt.args.openebsNS)
- if (err != nil) != tt.wantErr {
- t.Errorf("GetJiva() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("GetJiva() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/pkg/volume/local_hostpath_test.go b/pkg/volume/local_hostpath_test.go
index 075eedec..5e7cee2d 100644
--- a/pkg/volume/local_hostpath_test.go
+++ b/pkg/volume/local_hostpath_test.go
@@ -42,11 +42,10 @@ func TestGetLocalHostpath(t *testing.T) {
name: "no local hostpath volumes present",
args: args{
c: &client.K8sClient{
- Ns: "random-namespace",
- K8sCS: k8sfake.NewSimpleClientset(),
- OpenebsCS: nil,
+ Ns: "random-namespace",
+ K8sCS: k8sfake.NewSimpleClientset(),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, pv2}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{zfsPV1, lvmPV1}},
openebsNS: "openebs",
},
want: []metav1.TableRow{},
@@ -59,7 +58,7 @@ func TestGetLocalHostpath(t *testing.T) {
Ns: "lvmlocalpv",
K8sCS: k8sfake.NewSimpleClientset(&localpvHostpathDpl1),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, localHostpathPv1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{zfsPV1, localHostpathPv1}},
openebsNS: "localhostpath",
},
wantErr: false,
@@ -76,7 +75,7 @@ func TestGetLocalHostpath(t *testing.T) {
Ns: "lvmlocalpv",
K8sCS: k8sfake.NewSimpleClientset(&localpvHostpathDpl1, &localpvHostpathDpl2),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, localHostpathPv1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{zfsPV1, localHostpathPv1}},
openebsNS: "localhostpath",
},
wantErr: false,
diff --git a/pkg/volume/lvmlocalpv_test.go b/pkg/volume/lvmlocalpv_test.go
index 69c96155..daaa7e7c 100644
--- a/pkg/volume/lvmlocalpv_test.go
+++ b/pkg/volume/lvmlocalpv_test.go
@@ -47,12 +47,11 @@ func TestGetLVMLocalPV(t *testing.T) {
name: "no lvm volumes present",
args: args{
c: &client.K8sClient{
- Ns: "random-namespace",
- LVMCS: fake.NewSimpleClientset(),
- K8sCS: k8sfake.NewSimpleClientset(),
- OpenebsCS: nil,
+ Ns: "random-namespace",
+ LVMCS: fake.NewSimpleClientset(),
+ K8sCS: k8sfake.NewSimpleClientset(),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, pv2, pv3}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{zfsPV1, localHostpathPv1}},
lvmReactors: lvmVolNotExists,
openebsNS: "openebs",
},
@@ -67,7 +66,7 @@ func TestGetLVMLocalPV(t *testing.T) {
K8sCS: k8sfake.NewSimpleClientset(&localpvCSICtrlSTS),
LVMCS: fake.NewSimpleClientset(&lvmVol1),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, lvmPV1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{zfsPV1, lvmPV1}},
openebsNS: "lvmlocalpv",
},
wantErr: false,
@@ -95,11 +94,11 @@ func TestGetLVMLocalPV(t *testing.T) {
name: "only one lvm volume present, namespace conflicts",
args: args{
c: &client.K8sClient{
- Ns: "jiva",
+ Ns: "zfs",
K8sCS: k8sfake.NewSimpleClientset(&localpvCSICtrlSTS),
LVMCS: fake.NewSimpleClientset(&lvmVol1),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, lvmPV1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{zfsPV1, lvmPV1}},
openebsNS: "lvmlocalpvXYZ",
},
wantErr: false,
@@ -174,7 +173,7 @@ func TestDescribeLVMLocalPVs(t *testing.T) {
{
"one lvm volume present and some other volume asked for",
args{c: &client.K8sClient{Ns: "lvm", K8sCS: k8sfake.NewSimpleClientset(&lvmPV1), LVMCS: fake.NewSimpleClientset(&lvmVol1)},
- vol: &cstorPV2,
+ vol: &zfsPV1,
lvmfunc: lvmVolNotExists},
false,
},
diff --git a/pkg/volume/testdata_test.go b/pkg/volume/testdata_test.go
index 509c2271..fee26f76 100644
--- a/pkg/volume/testdata_test.go
+++ b/pkg/volume/testdata_test.go
@@ -19,8 +19,6 @@ package volume
import (
"time"
- v1 "github.com/openebs/api/v2/pkg/apis/cstor/v1"
- cstortypes "github.com/openebs/api/v2/pkg/apis/types"
lvm "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1"
"github.com/openebs/openebsctl/pkg/util"
zfs "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
@@ -36,378 +34,6 @@ var (
blockFS = corev1.PersistentVolumeBlock
)
-/****************
-* CSTOR
-****************/
-
-var nsCstor = corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- },
- Spec: corev1.NamespaceSpec{Finalizers: []corev1.FinalizerName{corev1.FinalizerKubernetes}},
-}
-
-var cv1 = v1.CStorVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeSpec{
- Capacity: fourGigiByte,
- TargetIP: "10.2.2.2",
- TargetPort: "3002",
- Iqn: "pvc1-some-fake-iqn",
- TargetPortal: "10.2.2.2:3002",
- ReplicationFactor: 3,
- ConsistencyFactor: 0,
- DesiredReplicationFactor: 0,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-1-rep-1", "some-id-2": "pvc-1-rep-2", "some-id-3": "pvc-1-rep-3"},
- },
- },
- Status: v1.CStorVolumeStatus{
- Phase: util.Healthy,
- ReplicaStatuses: []v1.ReplicaStatus{{ID: "some-id-1", Mode: "Healthy"}, {ID: "some-id-2", Mode: "Healthy"}, {ID: "some-id-3", Mode: "Healthy"}},
- Capacity: fourGigiByte,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-1-rep-1", "some-id-2": "pvc-1-rep-2", "some-id-3": "pvc-1-rep-3"},
- },
- },
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{
- DependentsUpgraded: true,
- Current: "2.11.0",
- LastUpdateTime: metav1.Time{},
- },
- },
-}
-
-var cv2 = v1.CStorVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeSpec{
- Capacity: fourGigiByte,
- TargetIP: "10.2.2.2",
- TargetPort: "3002",
- Iqn: "pvc1-some-fake-iqn",
- TargetPortal: "10.2.2.2:3002",
- ReplicationFactor: 3,
- ConsistencyFactor: 0,
- DesiredReplicationFactor: 0,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-2-rep-1"},
- },
- },
- Status: v1.CStorVolumeStatus{
- Phase: util.Healthy,
- ReplicaStatuses: []v1.ReplicaStatus{{ID: "some-id-1", Mode: "Healthy"}},
- Capacity: fourGigiByte,
- ReplicaDetails: v1.CStorVolumeReplicaDetails{KnownReplicas: map[v1.ReplicaID]string{
- "some-id-1": "pvc-2-rep-1"},
- },
- },
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{
- DependentsUpgraded: true,
- Current: "2.11.0",
- LastUpdateTime: metav1.Time{},
- },
- },
-}
-
-var cvc1 = v1.CStorVolumeConfig{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeConfigSpec{Provision: v1.VolumeProvision{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- ReplicaCount: 3,
- }},
- Publish: v1.CStorVolumeConfigPublish{},
- Status: v1.CStorVolumeConfigStatus{PoolInfo: []string{"pool-1", "pool-2", "pool-3"}},
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{Current: "2.11.0"},
- },
-}
-
-var cvc2 = v1.CStorVolumeConfig{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeConfigSpec{Provision: v1.VolumeProvision{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- ReplicaCount: 3,
- }},
- Publish: v1.CStorVolumeConfigPublish{},
- Status: v1.CStorVolumeConfigStatus{PoolInfo: []string{"pool-1"}},
- VersionDetails: v1.VersionDetails{
- AutoUpgrade: false,
- Desired: "2.11.0",
- Status: v1.VersionStatus{Current: "2.11.0"},
- },
-}
-
-var cva1 = v1.CStorVolumeAttachment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-cva",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"Volname": "pvc-1", "nodeID": "node-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeAttachmentSpec{Volume: v1.VolumeInfo{OwnerNodeID: "node-1"}},
-}
-
-var cva2 = v1.CStorVolumeAttachment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2-cva",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{"Volname": "pvc-2", "nodeID": "node-2"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Spec: v1.CStorVolumeAttachmentSpec{Volume: v1.VolumeInfo{OwnerNodeID: "node-2"}},
-}
-
-var cvr1 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr2 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr3 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1-rep-3",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var cvr4 = v1.CStorVolumeReplica{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2-rep-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- Namespace: "cstor",
- },
- Status: v1.CStorVolumeReplicaStatus{
- Capacity: v1.CStorVolumeReplicaCapacityDetails{
- Total: "4Gi",
- Used: "70Mi",
- },
- Phase: v1.CVRStatusOnline,
- },
-}
-
-var (
- cstorScName = "cstor-sc"
- cstorVolumeMode = corev1.PersistentVolumeFilesystem
- cstorPVC1 = corev1.PersistentVolumeClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- Namespace: "default",
- },
- Spec: corev1.PersistentVolumeClaimSpec{
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- Resources: corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: fourGigiByte}},
- VolumeName: "pvc-1",
- StorageClassName: &cstorScName,
- VolumeMode: &cstorVolumeMode,
- },
- Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound, Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte}},
- }
-)
-
-var (
- cstorPVC2 = corev1.PersistentVolumeClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cstor-pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- Namespace: "default",
- },
- Spec: corev1.PersistentVolumeClaimSpec{
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- Resources: corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: fourGigiByte}},
- VolumeName: "pvc-2",
- StorageClassName: &cstorScName,
- VolumeMode: &cstorVolumeMode,
- },
- Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound, Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte}},
- }
-)
-
-var (
- cstorPV1 = corev1.PersistentVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: corev1.PersistentVolumeSpec{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- ClaimRef: &corev1.ObjectReference{
- Namespace: "default",
- Name: "cstor-pvc-1",
- },
- PersistentVolumeReclaimPolicy: "Retain",
- StorageClassName: cstorScName,
- VolumeMode: &cstorVolumeMode,
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{
- Driver: "cstor.csi.openebs.io",
- }},
- },
- Status: corev1.PersistentVolumeStatus{Phase: corev1.VolumeBound},
- }
-)
-
-var (
- cstorPV2 = corev1.PersistentVolume{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-2",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-2"},
- Finalizers: []string{},
- },
- Spec: corev1.PersistentVolumeSpec{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
- ClaimRef: &corev1.ObjectReference{
- Namespace: "default",
- Name: "cstor-pvc-2",
- },
- PersistentVolumeReclaimPolicy: "Retain",
- StorageClassName: cstorScName,
- VolumeMode: &cstorVolumeMode,
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{
- Driver: "cstor.csi.openebs.io",
- }},
- },
- Status: corev1.PersistentVolumeStatus{Phase: corev1.VolumeBound},
- }
-)
-
-var cbkp = v1.CStorBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: "bkp-name",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: v1.CStorBackupSpec{
- BackupName: "bkp-name",
- VolumeName: "pvc-1",
- SnapName: "snap-name",
- PrevSnapName: "prev-snap-name",
- BackupDest: "10.2.2.7",
- LocalSnap: true,
- },
- Status: v1.BKPCStorStatusDone,
-}
-
-var ccbkp = v1.CStorCompletedBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: "completed-bkp-name",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: v1.CStorCompletedBackupSpec{
- BackupName: "completed-bkp-name",
- VolumeName: "pvc-1",
- SecondLastSnapName: "secondlast-snapshot-name",
- LastSnapName: "last-snapshot-name",
- },
-}
-
-var crestore = v1.CStorRestore{
- ObjectMeta: metav1.ObjectMeta{
- Name: "restore-name",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{cstortypes.PersistentVolumeLabelKey: "pvc-1"},
- Finalizers: []string{},
- },
- Spec: v1.CStorRestoreSpec{
- RestoreName: "restore-name",
- VolumeName: "pvc-1",
- RestoreSrc: "10.2.2.7",
- MaxRetryCount: 3,
- RetryCount: 2,
- StorageClass: "cstor-sc",
- Size: fourGigiByte,
- Local: true,
- },
-}
-
/****************
* LVM LOCAL PV
****************/
@@ -571,175 +197,6 @@ var localpvzfsCSICtrlSTS = appsv1.StatefulSet{
},
}
-/****************
-* JIVA
-****************/
-
-// var nsJiva = corev1.Namespace{
-// ObjectMeta: metav1.ObjectMeta{
-// Name: "jiva",
-// CreationTimestamp: metav1.Time{Time: time.Now()},
-// Labels: map[string]string{},
-// Finalizers: []string{},
-// },
-// Spec: corev1.NamespaceSpec{Finalizers: []corev1.FinalizerName{corev1.FinalizerKubernetes}},
-// }
-
-// pvc-1 JivaVolume from jiva namespace attached on worker-node-1 & 1-replica & 2.10.0
-// var jv1 = v1alpha1.JivaVolume{
-// TypeMeta: metav1.TypeMeta{},
-// ObjectMeta: metav1.ObjectMeta{
-// Name: "pvc-1",
-// Namespace: "jiva",
-// Labels: map[string]string{"nodeID": "worker-node-1"},
-// },
-// Spec: v1alpha1.JivaVolumeSpec{},
-// Status: v1alpha1.JivaVolumeStatus{
-// Status: "RW",
-// ReplicaCount: 1,
-// ReplicaStatuses: nil, // TODO
-// Phase: "Attached",
-// },
-// VersionDetails: v1alpha1.VersionDetails{
-// AutoUpgrade: false,
-// Desired: "2.10.0",
-// Status: v1alpha1.VersionStatus{
-// DependentsUpgraded: false,
-// Current: "2.10.0",
-// },
-// },
-// }
-//// pvc-2 JivaVolume from jiva namespace attached on worker-node-2, two replicas & 2.10.0
-// var jv2 = v1alpha1.JivaVolume{
-// TypeMeta: metav1.TypeMeta{},
-// ObjectMeta: metav1.ObjectMeta{
-// Name: "pvc-2",
-// Namespace: "jiva",
-// Labels: map[string]string{"nodeID": "worker-node-2"},
-// },
-// Spec: v1alpha1.JivaVolumeSpec{
-// PV: "pvc-2",
-// Capacity: "4Gi",
-// AccessType: "",
-// ISCSISpec: v1alpha1.ISCSISpec{
-// TargetIP: "1.2.3.4",
-// TargetPort: 8080,
-// Iqn: "nice-iqn",
-// },
-// MountInfo: v1alpha1.MountInfo{
-// StagingPath: "/home/staging/",
-// TargetPath: "/home/target",
-// FSType: "ext4",
-// DevicePath: "",
-// },
-// Policy: v1alpha1.JivaVolumePolicySpec{},
-// DesiredReplicationFactor: 0,
-// },
-// Status: v1alpha1.JivaVolumeStatus{
-// Status: "RO",
-// ReplicaCount: 2,
-// ReplicaStatuses: []v1alpha1.ReplicaStatus{
-// {Address: "tcp://192.168.2.7:9502", Mode: "RW"},
-// {Address: "tcp://192.168.2.8:9502", Mode: "RO"},
-// },
-// Phase: "Ready",
-// },
-// VersionDetails: v1alpha1.VersionDetails{
-// AutoUpgrade: false,
-// Desired: "2.10.0",
-// Status: v1alpha1.VersionStatus{
-// DependentsUpgraded: false,
-// Current: "2.10.0",
-// },
-// },
-// }
-var jivaPV1 = corev1.PersistentVolume{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- Namespace: "jiva",
- Labels: map[string]string{},
- Annotations: map[string]string{},
- },
- Spec: corev1.PersistentVolumeSpec{
- // 4GiB
- Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
- PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{Driver: util.JivaCSIDriver}},
- AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
- ClaimRef: &corev1.ObjectReference{
- Kind: "PersistentVolumeClaim",
- Namespace: "jiva-app",
- Name: "mongo-jiva",
- APIVersion: "v1",
- ResourceVersion: "123"},
- PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete,
- StorageClassName: "pvc-1-sc",
- VolumeMode: &blockFS,
- NodeAffinity: &corev1.VolumeNodeAffinity{
- Required: &corev1.NodeSelector{NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {MatchExpressions: []corev1.NodeSelectorRequirement{
- {Key: "kubernetes.io/hostname", Operator: corev1.NodeSelectorOpIn, Values: []string{"node1"}},
- }},
- }},
- },
- },
- Status: corev1.PersistentVolumeStatus{
- Phase: corev1.VolumeBound,
- Message: "",
- Reason: "",
- },
-}
-
-//var jivaPV2 = corev1.PersistentVolume{
-// TypeMeta: metav1.TypeMeta{},
-// ObjectMeta: metav1.ObjectMeta{
-// Name: "pvc-2",
-// Namespace: "jiva",
-// Labels: map[string]string{},
-// Annotations: map[string]string{},
-// },
-// Spec: corev1.PersistentVolumeSpec{
-// // 4GiB
-// Capacity: corev1.ResourceList{corev1.ResourceStorage: fourGigiByte},
-// PersistentVolumeSource: corev1.PersistentVolumeSource{CSI: &corev1.CSIPersistentVolumeSource{Driver: util.JivaCSIDriver}},
-// AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
-// ClaimRef: nil,
-// PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete,
-// StorageClassName: "pvc-2-sc",
-// VolumeMode: &blockFS,
-// NodeAffinity: &corev1.VolumeNodeAffinity{
-// Required: &corev1.NodeSelector{NodeSelectorTerms: []corev1.NodeSelectorTerm{
-// {MatchExpressions: []corev1.NodeSelectorRequirement{
-// {Key: "kubernetes.io/hostname", Operator: corev1.NodeSelectorOpIn, Values: []string{"node2"}},
-// }},
-// }},
-// },
-// },
-// Status: corev1.PersistentVolumeStatus{
-// Phase: corev1.VolumePending,
-// Message: "Storage class not found",
-// Reason: "K8s API was down",
-// },
-//}
-var pv2 = corev1.PersistentVolume{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- },
- Spec: corev1.PersistentVolumeSpec{
- Capacity: corev1.ResourceList{corev1.ResourceStorage: resource.Quantity{}},
- },
- Status: corev1.PersistentVolumeStatus{},
-}
-var pv3 = corev1.PersistentVolume{
- TypeMeta: metav1.TypeMeta{},
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc-1",
- },
- Spec: corev1.PersistentVolumeSpec{},
- Status: corev1.PersistentVolumeStatus{},
-}
-
/****************
* Local Hostpath
****************/
diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go
index 02c0cb75..4d2a6372 100644
--- a/pkg/volume/volume.go
+++ b/pkg/volume/volume.go
@@ -122,15 +122,13 @@ func Describe(vols []string, openebsNs string) error {
// CasList returns a list of functions by cas-types for volume listing
func CasList() []func(*client.K8sClient, *corev1.PersistentVolumeList, string) ([]metav1.TableRow, error) {
// a good hack to implement immutable lists in Golang & also write tests for it
- return []func(*client.K8sClient, *corev1.PersistentVolumeList, string) ([]metav1.TableRow, error){GetJiva, GetCStor, GetZFSLocalPVs, GetLVMLocalPV, GetLocalHostpath}
+ return []func(*client.K8sClient, *corev1.PersistentVolumeList, string) ([]metav1.TableRow, error){GetZFSLocalPVs, GetLVMLocalPV, GetLocalHostpath}
}
// CasListMap returns a map cas-types to functions for volume listing
func CasListMap() map[string]func(*client.K8sClient, *corev1.PersistentVolumeList, string) ([]metav1.TableRow, error) {
// a good hack to implement immutable maps in Golang & also write tests for it
return map[string]func(*client.K8sClient, *corev1.PersistentVolumeList, string) ([]metav1.TableRow, error){
- util.JivaCasType: GetJiva,
- util.CstorCasType: GetCStor,
util.ZFSCasType: GetZFSLocalPVs,
util.LVMCasType: GetLVMLocalPV,
util.LocalPvHostpathCasType: GetLocalHostpath,
@@ -141,8 +139,6 @@ func CasListMap() map[string]func(*client.K8sClient, *corev1.PersistentVolumeLis
func CasDescribeMap() map[string]func(*client.K8sClient, *corev1.PersistentVolume) error {
// a good hack to implement immutable maps in Golang & also write tests for it
return map[string]func(*client.K8sClient, *corev1.PersistentVolume) error{
- util.JivaCasType: DescribeJivaVolume,
- util.CstorCasType: DescribeCstorVolume,
util.ZFSCasType: DescribeZFSLocalPVs,
util.LVMCasType: DescribeLVMLocalPVs,
util.LocalPvHostpathCasType: DescribeLocalHostpathVolume,
diff --git a/pkg/volume/volume_test.go b/pkg/volume/volume_test.go
index f81c9973..d07da460 100644
--- a/pkg/volume/volume_test.go
+++ b/pkg/volume/volume_test.go
@@ -25,7 +25,7 @@ import (
"testing"
)
-const supportedCasTypeCount = 5
+const supportedCasTypeCount = 3
// TestCasList is a dummy test which ensures that each cas-type volumes can be
// listed individually as well as collectively
diff --git a/pkg/volume/zfs_localpv_test.go b/pkg/volume/zfs_localpv_test.go
index fdfe6a25..6e32ed8f 100644
--- a/pkg/volume/zfs_localpv_test.go
+++ b/pkg/volume/zfs_localpv_test.go
@@ -49,12 +49,11 @@ func TestGetZFSLocalPVs(t *testing.T) {
name: "no zfs volumes present",
args: args{
c: &client.K8sClient{
- Ns: "random-namespace",
- ZFCS: fake.NewSimpleClientset(),
- K8sCS: k8sfake.NewSimpleClientset(),
- OpenebsCS: nil,
+ Ns: "random-namespace",
+ ZFCS: fake.NewSimpleClientset(),
+ K8sCS: k8sfake.NewSimpleClientset(),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, pv2, pv3}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{lvmPV1, localHostpathPv1}},
zfsReactors: zfsVolNotExists,
openebsNS: "zfslocalpv",
},
@@ -69,7 +68,7 @@ func TestGetZFSLocalPVs(t *testing.T) {
K8sCS: k8sfake.NewSimpleClientset(&localpvzfsCSICtrlSTS),
ZFCS: fake.NewSimpleClientset(&zfsVol1),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, zfsPV1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{lvmPV1, zfsPV1}},
openebsNS: "zfslocalpv",
},
wantErr: false,
@@ -97,11 +96,11 @@ func TestGetZFSLocalPVs(t *testing.T) {
name: "only one zfs volume present, namespace conflicts",
args: args{
c: &client.K8sClient{
- Ns: "jiva",
+ Ns: "lvm",
K8sCS: k8sfake.NewSimpleClientset(&localpvzfsCSICtrlSTS),
ZFCS: fake.NewSimpleClientset(&zfsVol1),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, zfsPV1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{lvmPV1, zfsPV1}},
openebsNS: "zfslocalpvXYZ",
},
wantErr: false,
@@ -111,11 +110,11 @@ func TestGetZFSLocalPVs(t *testing.T) {
name: "controller sts not present",
args: args{
c: &client.K8sClient{
- Ns: "jiva",
+ Ns: "lvm",
K8sCS: k8sfake.NewSimpleClientset(),
ZFCS: fake.NewSimpleClientset(&zfsVol1),
},
- pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{jivaPV1, zfsPV1}},
+ pvList: &corev1.PersistentVolumeList{Items: []corev1.PersistentVolume{lvmPV1, zfsPV1}},
openebsNS: "zfslocalpv",
},
wantErr: false,
@@ -181,7 +180,7 @@ func TestDescribeZFSLocalPVs(t *testing.T) {
},
{"one zfs volume present and some other volume asked for",
args{c: &client.K8sClient{Ns: "zfs", K8sCS: k8sfake.NewSimpleClientset(&zfsPV1), ZFCS: fake.NewSimpleClientset(&zfsVol1)},
- vol: &cstorPV2,
+ vol: &lvmPV1,
zfsfunc: zfsVolNotExists},
false,
},