Skip to content

Commit

Permalink
Next-generation testcase utils
Browse files Browse the repository at this point in the history
Our testing tools have served us well, and there are plenty of good things with them that we absolutely need to keep.
We have grown a very large test suite though, and the sheer size of it is putting pressure on our tooling which
is starting to shows cracks.
Besides bugs per-se (double execution) - that we hopefully fixed - our design is reaching its limits.
We do have rather impactful issues with regard to test isolation and test concurrency, leading to situations where
it is hard or outright impossible to figure out which test is causing a cascading failure, or why the adding of a new test
file working individually breaks everything.

Furthermore, as we are not prescriptive on certain things, we do see a lot of negative patterns emerging from test authors:
- defer being used instead of t.Cleanup
- not calling cleanup routines before running tests
- Cwd for binary calls is by default the current working directory of the test process - this is causing a variety of issues,
as it could very well be read-only (lima), and should by default be a temp directory
- manipulating the environment directly - which has side-effects for other tests
- tests becoming big blurbs of mixed together setup, cleanup, and actual test routines - making them hard to read and figuring
out what is actually being tested
- in-test homegrown abstractions being inherently repetitive, with the same boilerplate over and over again
- structuring tests and subtests being left as an exercise to the developer - leading to a wide variety of approaches and complex boilerplate
- hard to debug: a lot of the assert methods do not provide any feedback whatsoever on what was the command that actually failed, or what was the output, or environment of it
- icmd.Expected showing its limits, and making assumptions that there is only one error you can test, or that you can only
test the output if exitCode == 0
- running commands with other binaries than the current target (eg: not calling base.Cmd) being left as an exercise to the developer,
leading to all of the issues above (Chdir, Env manipulation, no debugging output, etc)
- no-parallel being the default - unless specified otherwise - which should be the other way around
- very rarely testing stderr in case of error - partly because we do not use typed errors, but also because it is cumbersome /
not obvious / limited

This new tooling offers a set of abstractions that should address all of these and encourage more expressive, better structured,
better isolated, more debuggable tests.

Obviously:
- this is a first draft - albeit informed by months spent debugging our test suites - we should expect this to evolve
- rewriting tests with this framework will take time
- this is not a kill-it-all-and-replace - the existing test tooling is still there, and replacement can be incremental
- the main purpose is to make writing new tests easier, safer, cleaner - not necessarily to replace everything we have already

Signed-off-by: apostasie <[email protected]>
  • Loading branch information
apostasie committed Sep 7, 2024
1 parent 7542c87 commit 2aa72c0
Show file tree
Hide file tree
Showing 2 changed files with 380 additions and 0 deletions.
177 changes: 177 additions & 0 deletions pkg/testutil/testcase/command.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package testcase

import (
"fmt"
"os"
"strings"
"testing"

"gotest.tools/v3/assert"
"gotest.tools/v3/icmd"

"github.com/containerd/nerdctl/v2/pkg/testutil"
)

// Command interface
// Represents any command that will be run, to be compared with an Expected
type Command interface {
WithBinary(binary string) Command
WithArgs(args ...string) Command
WithEnv(env map[string]string) Command
Run(t *testing.T, expect *Expected)

// Short term, allow passing in a base
WithBase(base *testutil.Base) Command
}

// GenericCommand is a concrete Command implementation, that can be run against expectations
// A Command can be used as TestCase Command obviously, but also as part of a Setup or Cleanup routine
type GenericCommand struct {
Home string
Path string
WorkingDir string
Env map[string]string

helper string
helperArgs []string
binary string
extraArgs []string
tempDir string
}

func (cc *GenericCommand) WithBinary(binary string) Command {
cc.binary = binary
return cc
}

func (cc *GenericCommand) WithHelper(binary string, args ...string) Command {
cc.helper = binary
cc.helperArgs = args
return cc
}

func (cc *GenericCommand) WithArgs(args ...string) Command {
cc.extraArgs = append(cc.extraArgs, args...)
return cc
}

func (cc *GenericCommand) WithEnv(env map[string]string) Command {
if cc.Env == nil {
cc.Env = map[string]string{}
}
for k, v := range env {
cc.Env[k] = v
}
return cc
}

func (cc *GenericCommand) Run(t *testing.T, expect *Expected) {
/*
if cc.argsFunc != nil {
args = append(args, cc.argsFunc(cc.testData)...)
}
*/

t.Helper()

binary := cc.binary
args := cc.extraArgs
if cc.helper != "" {
args = append([]string{binary}, args...)
args = append(cc.helperArgs, args...)
binary = cc.helper
}

// Create the command and set the env
icmdCmd := icmd.Command(binary, args...)
icmdCmd.Env = os.Environ()

// Pass along explicit HOME and PATH
if cc.Home != "" {
icmdCmd.Env = append(icmdCmd.Env, fmt.Sprintf("%s=%s", "HOME", cc.Home))
}

if cc.Path != "" {
icmdCmd.Env = append(icmdCmd.Env, fmt.Sprintf("%s=%s", "PATH", cc.Path))
}

// Ensure the subprocess gets executed in a temporary directory unless instructed otherwise
icmdCmd.Dir = cc.WorkingDir
if icmdCmd.Dir == "" {
icmdCmd.Dir = cc.tempDir
}

// Add any extra env
for k, v := range cc.Env {
icmdCmd.Env = append(icmdCmd.Env, fmt.Sprintf("%s=%s", k, v))
}

result := icmd.RunCmd(icmdCmd)

// Check our expectations
if expect != nil {
// ExitCode goes first
assert.Assert(t, expect.ExitCode == result.ExitCode, fmt.Sprintf("Expected exit code: %d", expect.ExitCode)+result.String())
// Range through the expected errors and confirm they are seen on stderr
for _, expectErr := range expect.Errors {
assert.Assert(t, strings.Contains(result.Stderr(), expectErr.Error()),
fmt.Sprintf("Expected error: %q to be found in stderr", expectErr.Error())+result.String())
}
// Finally check the output
if expect.Output != nil {
expect.Output(t, result.Stdout(), result.String()+"Env:\n"+strings.Join(icmdCmd.Env, "\n"))
}
}
}

func (cc *GenericCommand) WithBase(base *testutil.Base) Command {
return cc
}

func NewCommand(args ...string) func(data *Data) Command {
return func(data *Data) Command {
bc := &BaseCommand{}
bc.extraArgs = args
return bc
}
}

// BaseCommand
type BaseCommand struct {
GenericCommand
base *testutil.Base
}

func (cc *BaseCommand) Run(t *testing.T, expect *Expected) {
t.Helper()
cc.binary = cc.base.Binary
cc.extraArgs = append(cc.base.Args, cc.extraArgs...)
// FIXME do we also need the env of base? Probably not.
cc.GenericCommand.Run(t, expect)
}

func (cc *BaseCommand) WithBinary(binary string) Command {
// no-op
return cc
}

func (cc *BaseCommand) WithBase(base *testutil.Base) Command {
cc.base = base
return cc
}
203 changes: 203 additions & 0 deletions pkg/testutil/testcase/testcase.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package testcase

import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"

"gotest.tools/v3/assert"

"github.com/containerd/nerdctl/v2/pkg/testutil"
)

// Data is holding meta information about a test
type Data struct {
Labels map[string]string // Free additional data, at the test discretion

testID string // The unique test id that test rig can use to name resources for examples
tempDir string // A self-cleaning temporary directory
}

// Identifier returns the test identifier that can be used to name resources
func (meta *Data) Identifier() string {
return meta.testID
}

// TempDir returns the test temporary directory
func (meta *Data) TempDir() string {
return meta.tempDir
}

// Expected expresses the expected output of a command
type Expected struct {
// ExitCode to expect
ExitCode int
// Any error that once serialized should be seen in stderr
Errors []error
// Output function to match against stdout
Output func(t *testing.T, stdout string, info string)
}

// Contains is to be used for expected.Output to ensure a comparison string is found in the output
func Contains(compare string) func(t *testing.T, stdout string, info string) {
return func(t *testing.T, stdout string, info string) {
assert.Assert(t, strings.Contains(stdout, compare), fmt.Sprintf("Expected output to contain: %q", compare)+info)
}
}

// DoesNotContain is to be used for expected.Output to ensure a comparison string is NOT found in the output
func DoesNotContain(compare string) func(t *testing.T, stdout string, info string) {
return func(t *testing.T, stdout string, info string) {
assert.Assert(t, !strings.Contains(stdout, compare), fmt.Sprintf("Expected output to not contain: %q", compare)+info)
}
}

// Equals is to be used for expected.Output to ensure it is exactly the output
func Equals(compare string) func(t *testing.T, stdout string, info string) {
return func(t *testing.T, stdout string, info string) {
assert.Equal(t, compare, stdout, info)
}
}

// TestCase describes an entire test-case, including data, setup and cleanup routines, command and expectations
type TestCase struct {
Description string

NoParallel bool // Allows disabling parallel execution
DockerIncompatible bool // Mark this test as not compatible with Docker
PrivateMode bool // Marking this as private will use a dedicated namespace, host-dirs, and DOCKER_CONFIG dir
// Note that this is not bulletproof, and some cross namespace leakage or test collision might still happen

Setup func(data *Data)
Cleanup func(data *Data)
Command func(data *Data) Command
Expected func(data *Data) *Expected

NerdctlToml string
Env map[string]string
Data *Data

SubTests []*TestCase
}

func (test *TestCase) Run(t *testing.T) {
assert.Assert(t, test.Description != "", "A test description cannot be empty")

t.Run(test.Description, func(tt *testing.T) {
// Ensure we have data
if test.Data == nil {
test.Data = &Data{}
}

if test.Env == nil {
test.Env = map[string]string{}
}

// Set identifier and tempdir
test.Data.testID = testutil.Identifier(t)
test.Data.tempDir = tt.TempDir()

// Create a base
var base *testutil.Base
// If private, set hosts-dir, data-root, and DOCKER_CONFIG to tempDir, create a dedicated namespace, and make
// sure we clean it up - for nerdctl
// Docker does not have these facilities to isolate as much, so, disable parallel
if test.PrivateMode {
pvNamespace := "nerdctl-test-" + test.Data.testID
base = testutil.NewBaseWithNamespace(t, pvNamespace)
test.Env["DOCKER_CONFIG"] = test.Data.tempDir
if base.Target == testutil.Nerdctl {
test.Env["NERDCTL_TOML"] = filepath.Join(test.Data.tempDir, "nerdctl.toml")
base.Args = append(base.Args, []string{"--data-root=" + test.Data.tempDir, "--hosts-dir" + test.Data.TempDir()}...)
cleanup := func() {
(&GenericCommand{}).
WithBinary("nerdctl").
WithArgs("namespace", "remove", pvNamespace).
WithEnv(test.Env).
Run(t, nil)
}
cleanup()
tt.Cleanup(cleanup)
} else {
test.NoParallel = true
}
} else {
base = testutil.NewBase(t)
}

if test.NerdctlToml != "" {
dest := filepath.Join(test.Data.tempDir, "nerdctl.toml")
test.Env["NERDCTL_TOML"] = dest
err := os.WriteFile(dest, []byte(test.NerdctlToml), 0400)
assert.NilError(t, err, "failed to write custom nerdctl toml file for test")
}

// Set parallel unless asked not to
if !test.NoParallel {
tt.Parallel()
}

// Cleanup
if test.Cleanup != nil {
test.Cleanup(test.Data)
tt.Cleanup(func() {
test.Cleanup(test.Data)
})
}

// Setup
if test.Setup != nil {
test.Setup(test.Data)
}

// Run the command if any, with expectations
if test.Command != nil {
cmd := test.Command(test.Data)
// Set the base here for base-able commands
cmd.WithBase(base)
// And the env
cmd.WithEnv(test.Env)
// Attach any additional env
var expect *Expected
assert.Assert(t, test.Expected != nil, "Expectation for a test cannot be nil")
expect = test.Expected(test.Data)
// We are not in the business of testing docker error output
if base.Target != testutil.Nerdctl {
expect.Errors = nil
}
cmd.Run(t, expect)
}
for _, subTest := range test.SubTests {
subTest.Run(tt)
}
})

}

func Expects(exitCode int, errors []error, output func(t *testing.T, stdout string, info string)) func(data *Data) *Expected {
return func(data *Data) *Expected {
return &Expected{
ExitCode: exitCode,
Errors: errors,
Output: output,
}
}
}

0 comments on commit 2aa72c0

Please sign in to comment.