From ed99674c10cb1270822905dbd1fb690fd083041b Mon Sep 17 00:00:00 2001 From: Michael Sauter Date: Fri, 30 Jun 2023 15:56:13 +0200 Subject: [PATCH] WIP --- build/package/scripts/build-go.sh | 4 +- build/package/scripts/build-gradle.sh | 2 +- build/package/scripts/build-npm.sh | 2 +- build/package/scripts/build-python.sh | 2 +- build/package/scripts/cache-build.sh | 2 +- build/package/scripts/configure-truststore.sh | 2 +- build/package/scripts/copy-artifacts.sh | 2 +- build/package/scripts/copy-build-if-cached.sh | 2 +- .../package/scripts/download-aqua-scanner.sh | 2 +- ...supply-sonar-project-properties-default.sh | 2 +- cmd/taskdoc/main.go | 69 ++++++ cmd/taskmanifest/main.go | 69 ++++++ deploy/install.sh | 2 +- docs/authoring-tasks.adoc | 4 +- internal/docs/tasks.go | 60 ++++- internal/projectpath/root.go | 4 + pkg/odstasktest/assertions.go | 40 ++++ pkg/odstasktest/doc.go | 77 +++++++ pkg/odstasktest/install.go | 45 ++++ pkg/odstasktest/services.go | 52 +++++ pkg/odstasktest/workspace.go | 45 ++++ pkg/pipelinectxt/context.go | 6 + pkg/sonar/quality_gate_test.go | 6 + pkg/taskdoc/taskdoc.go | 78 +++++++ pkg/taskmanifest/taskmanifest.go | 17 ++ pkg/tektontaskrun/cluster.go | 195 ++++++++++++++++ pkg/tektontaskrun/doc.go | 74 +++++++ pkg/tektontaskrun/events.go | 50 +++++ pkg/tektontaskrun/interrupt.go | 18 ++ pkg/tektontaskrun/logs.go | 111 ++++++++++ pkg/tektontaskrun/namespace.go | 95 ++++++++ pkg/tektontaskrun/random.go | 17 ++ pkg/tektontaskrun/run.go | 208 ++++++++++++++++++ pkg/tektontaskrun/task.go | 38 ++++ pkg/tektontaskrun/taskrun.go | 208 ++++++++++++++++++ pkg/tektontaskrun/workspace.go | 14 ++ scripts/build-and-push-images.sh | 2 +- scripts/build-artifact-download.sh | 2 +- scripts/install-inside-kind.sh | 69 +++--- scripts/install-tekton-pipelines.sh | 28 +-- scripts/kind-with-registry.sh | 91 ++++---- scripts/restart-bitbucket.sh | 4 +- scripts/run-bitbucket.sh | 29 ++- scripts/run-nexus.sh | 33 ++- scripts/run-sonarqube.sh | 28 ++- scripts/run-tls-proxy.sh | 2 +- scripts/waitfor-bitbucket.sh | 4 +- scripts/waitfor-nexus.sh | 4 +- scripts/waitfor-sonarqube.sh | 4 +- scripts/web-terminal-install.sh | 2 +- 50 files changed, 1776 insertions(+), 150 deletions(-) create mode 100644 cmd/taskdoc/main.go create mode 100644 cmd/taskmanifest/main.go create mode 100644 pkg/odstasktest/assertions.go create mode 100644 pkg/odstasktest/doc.go create mode 100644 pkg/odstasktest/install.go create mode 100644 pkg/odstasktest/services.go create mode 100644 pkg/odstasktest/workspace.go create mode 100644 pkg/taskdoc/taskdoc.go create mode 100644 pkg/taskmanifest/taskmanifest.go create mode 100644 pkg/tektontaskrun/cluster.go create mode 100644 pkg/tektontaskrun/doc.go create mode 100644 pkg/tektontaskrun/events.go create mode 100644 pkg/tektontaskrun/interrupt.go create mode 100644 pkg/tektontaskrun/logs.go create mode 100644 pkg/tektontaskrun/namespace.go create mode 100644 pkg/tektontaskrun/random.go create mode 100644 pkg/tektontaskrun/run.go create mode 100644 pkg/tektontaskrun/task.go create mode 100644 pkg/tektontaskrun/taskrun.go create mode 100644 pkg/tektontaskrun/workspace.go diff --git a/build/package/scripts/build-go.sh b/build/package/scripts/build-go.sh index f4c6f4c9..4deddf89 100755 --- a/build/package/scripts/build-go.sh +++ b/build/package/scripts/build-go.sh @@ -16,7 +16,7 @@ ARTIFACT_PREFIX="" PRE_TEST_SCRIPT="" DEBUG="${DEBUG:-false}" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --working-dir) WORKING_DIR="$2"; shift;; @@ -131,4 +131,4 @@ if [ $exitcode != 0 ]; then exit $exitcode fi echo "Building ..." -go build -gcflags "all=-trimpath=$(pwd)" -o "${OUTPUT_DIR}/app" \ No newline at end of file +go build -gcflags "all=-trimpath=$(pwd)" -o "${OUTPUT_DIR}/app" diff --git a/build/package/scripts/build-gradle.sh b/build/package/scripts/build-gradle.sh index 627c0b31..65bdb83d 100755 --- a/build/package/scripts/build-gradle.sh +++ b/build/package/scripts/build-gradle.sh @@ -14,7 +14,7 @@ gradle_build_dir="build" gradle_additional_tasks= gradle_options= -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --working-dir) working_dir="$2"; shift;; diff --git a/build/package/scripts/build-npm.sh b/build/package/scripts/build-npm.sh index 19f20b8f..74215e2b 100755 --- a/build/package/scripts/build-npm.sh +++ b/build/package/scripts/build-npm.sh @@ -30,7 +30,7 @@ ARTIFACT_PREFIX="" DEBUG="${DEBUG:-false}" COPY_NODE_MODULES="false" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --working-dir) WORKING_DIR="$2"; shift;; diff --git a/build/package/scripts/build-python.sh b/build/package/scripts/build-python.sh index a06b0efb..4df18461 100755 --- a/build/package/scripts/build-python.sh +++ b/build/package/scripts/build-python.sh @@ -20,7 +20,7 @@ ARTIFACT_PREFIX="" PRE_TEST_SCRIPT="" DEBUG="${DEBUG:-false}" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --working-dir) WORKING_DIR="$2"; shift;; diff --git a/build/package/scripts/cache-build.sh b/build/package/scripts/cache-build.sh index 2a178e20..e631b3b5 100755 --- a/build/package/scripts/cache-build.sh +++ b/build/package/scripts/cache-build.sh @@ -15,7 +15,7 @@ CACHE_BUILD_KEY= CACHE_LOCATION_USED_PATH= DEBUG="${DEBUG:-false}" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --working-dir) WORKING_DIR="$2"; shift;; diff --git a/build/package/scripts/configure-truststore.sh b/build/package/scripts/configure-truststore.sh index 282645c1..7015caf3 100755 --- a/build/package/scripts/configure-truststore.sh +++ b/build/package/scripts/configure-truststore.sh @@ -7,7 +7,7 @@ src_truststore="${JAVA_HOME}/lib/security/cacerts" src_pass="changeit" dest_pass="changeit" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --src-store) src_truststore="$2"; shift;; diff --git a/build/package/scripts/copy-artifacts.sh b/build/package/scripts/copy-artifacts.sh index 71768173..24e28998 100755 --- a/build/package/scripts/copy-artifacts.sh +++ b/build/package/scripts/copy-artifacts.sh @@ -8,7 +8,7 @@ CP="${GNU_CP:-cp}" DEBUG="${DEBUG:-false}" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --debug) DEBUG="$2"; shift;; --debug=*) DEBUG="${1#*=}";; diff --git a/build/package/scripts/copy-build-if-cached.sh b/build/package/scripts/copy-build-if-cached.sh index 50316f57..7263fc65 100755 --- a/build/package/scripts/copy-build-if-cached.sh +++ b/build/package/scripts/copy-build-if-cached.sh @@ -17,7 +17,7 @@ CACHE_LOCATION_USED_PATH= WORKING_DIR="." DEBUG="${DEBUG:-false}" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --cache-build) CACHE_BUILD="$2"; shift;; diff --git a/build/package/scripts/download-aqua-scanner.sh b/build/package/scripts/download-aqua-scanner.sh index 81761655..29e81300 100755 --- a/build/package/scripts/download-aqua-scanner.sh +++ b/build/package/scripts/download-aqua-scanner.sh @@ -5,7 +5,7 @@ md5_bin="${MD5_BIN:-"md5sum"}" aqua_scanner_url="" bin_dir=".ods-cache/bin" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --bin-dir) bin_dir="$2"; shift;; diff --git a/build/package/scripts/supply-sonar-project-properties-default.sh b/build/package/scripts/supply-sonar-project-properties-default.sh index f6c56fbb..8f35ef4d 100755 --- a/build/package/scripts/supply-sonar-project-properties-default.sh +++ b/build/package/scripts/supply-sonar-project-properties-default.sh @@ -3,7 +3,7 @@ set -eu working_dir="." -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --working-dir) working_dir="$2"; shift;; --working-dir=*) working_dir="${1#*=}";; diff --git a/cmd/taskdoc/main.go b/cmd/taskdoc/main.go new file mode 100644 index 00000000..1550385b --- /dev/null +++ b/cmd/taskdoc/main.go @@ -0,0 +1,69 @@ +// Package taskdoc implements documentation rendering for tasks. +// It is intended to be run via `go run`, passing a task YAML manifest +// and a description in Asciidoctor format. The combined result will be +// written to the specified destination. +// +// Example invocation: +// +// go run github.com/opendevstack/ods-pipeline/cmd/taskdoc \ +// -task tasks/my-task.yaml \ +// -description build/docs/my-task.adoc \ +// -destination docs/my-task.adoc +// +// By default, taskdoc will use the template located at +// docs/tasks/template.adoc.tmpl to produce the resulting file. Another +// template can be specified via -template: +// +// go run github.com/opendevstack/ods-pipeline/cmd/taskdoc \ +// -task tasks/my-task.yaml \ +// -description build/docs/my-task.adoc \ +// -template /path/to/my-custom-template.adoc.tmpl \ +// -destination docs/my-task.adoc +package main + +import ( + "flag" + "log" + "os" + "text/template" + + "github.com/opendevstack/ods-pipeline/internal/projectpath" + "github.com/opendevstack/ods-pipeline/pkg/taskdoc" +) + +func main() { + taskFile := flag.String("task", "", "Task manifest") + descriptionFile := flag.String("description", "", "Description snippet") + templateFile := flag.String("template", projectpath.RootedPath("docs/tasks/template.adoc.tmpl"), "Template file") + destinationFile := flag.String("destination", "", "Destination file") + flag.Parse() + if err := render(*taskFile, *descriptionFile, *templateFile, *destinationFile); err != nil { + log.Fatal(err) + } +} + +func render(taskFile, descriptionFile, templateFile, destinationFile string) error { + t, err := os.ReadFile(taskFile) + if err != nil { + return err + } + d, err := os.ReadFile(descriptionFile) + if err != nil { + return err + } + tmpl, err := template.ParseFiles(templateFile) + if err != nil { + return err + } + + task, err := taskdoc.ParseTask(t, d) + if err != nil { + return err + } + + w, err := os.Create(destinationFile) + if err != nil { + return err + } + return taskdoc.RenderTaskDocumentation(w, tmpl, task) +} diff --git a/cmd/taskmanifest/main.go b/cmd/taskmanifest/main.go new file mode 100644 index 00000000..e36ebb91 --- /dev/null +++ b/cmd/taskmanifest/main.go @@ -0,0 +1,69 @@ +// Package taskmanifest implements manifest rendering for tasks. +// It is intended to be run via `go run`, passing a task YAML template +// and data to be rendered. The combined result will be +// written to the specified destination. The -data flag can be passed +// multiple times and may specify any key-value combination, which can then +// be consumed in the template through Go's text/template package. E.g. +// passing -data Foo=bar will replace {{.Foo}} in the template with bar. +// +// Example invocation: +// +// go run github.com/opendevstack/ods-pipeline/cmd/taskmanifest \ +// -data ImageRepository=ghcr.io/my-org/my-repo \ +// -data Version=latest \ +// -template build/tasks/my-task.yaml \ +// -destination tasks/my-task.yaml +package main + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + "text/template" + + "github.com/opendevstack/ods-pipeline/pkg/taskmanifest" + "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func main() { + templateFile := flag.String("template", "", "Template file") + destinationFile := flag.String("destination", "", "Destination file") + cc := tektontaskrun.NewClusterConfig() + mf := &MapFlag{v: cc.DefaultTaskTemplateData()} + flag.Var(mf, "data", "Key-value pairs") + flag.Parse() + if err := render(*templateFile, *destinationFile, mf.v); err != nil { + log.Fatal(err) + } +} + +func render(templateFile, destinationFile string, data map[string]string) error { + tmpl, err := template.ParseFiles(templateFile) + if err != nil { + return err + } + + w, err := os.Create(destinationFile) + if err != nil { + return err + } + return taskmanifest.RenderTask(w, tmpl, data) +} + +type MapFlag struct { + v map[string]string +} + +func (mf *MapFlag) String() string { + return fmt.Sprintf("%v", mf.v) +} +func (mf *MapFlag) Set(v string) error { + key, value, ok := strings.Cut(v, "=") + if !ok { + return fmt.Errorf("must have = sign") + } + mf.v[key] = value + return nil +} diff --git a/deploy/install.sh b/deploy/install.sh index 88225e12..aa10e473 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -63,7 +63,7 @@ function usage { \n\t\t--sonar-auth 'auth-token' \n\n" "$0" } -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do # shellcheck disable=SC2034 case $1 in diff --git a/docs/authoring-tasks.adoc b/docs/authoring-tasks.adoc index 0ad602a0..f825831c 100644 --- a/docs/authoring-tasks.adoc +++ b/docs/authoring-tasks.adoc @@ -102,9 +102,7 @@ In theory you can use pretty much any image that works in OpenShift (e.g. the im === How do I create my own container image to use in a task? -In OpenShift, the easiest way is by creating an `ImageStream` and a `BuildConfig`. See the link:https://docs.openshift.com/container-platform/latest/cicd/builds/understanding-image-builds.html[OpenShift documentation on builds] for more information. You may also use the YAML definitions in `deploy/ods-pipeline/charts/images` as an example. - -Occasionally, you might want to extend the images used in an official tasks, e.g. to deploy additional CA certificates, configure proxy settings, etc. The `images` subchart of `ods-pipeline` provides build configurations that allow you to create images that are based on the official `ods-pipeline` images from ghcr.io. The build configurations include inline Dockerfiles that you can adjust to suit your specific needs. +In OpenShift, the easiest way is by creating an `ImageStream` and a `BuildConfig`. See the link:https://docs.openshift.com/container-platform/latest/cicd/builds/understanding-image-builds.html[OpenShift documentation on builds] for more information. === How can I test my tasks? diff --git a/internal/docs/tasks.go b/internal/docs/tasks.go index 9189b480..0ed6c227 100644 --- a/internal/docs/tasks.go +++ b/internal/docs/tasks.go @@ -2,11 +2,12 @@ package docs import ( "bytes" + "errors" "fmt" + "io" "log" "os" "path/filepath" - "strings" "text/template" "github.com/opendevstack/ods-pipeline/internal/command" @@ -37,20 +38,11 @@ func renderTemplate(targetDir, targetFilename string, data Task) error { if err != nil { return err } - templateFilename := filepath.Join(targetDir, "template.adoc.tmpl") - templateFileParts := strings.Split(templateFilename, "/") - templateDisplayname := templateFileParts[len(templateFileParts)-1] - _, err = targetFile.WriteString( - "// Document generated by internal/documentation/tasks.go from " + templateDisplayname + "; DO NOT EDIT.\n\n", - ) + tmpl, err := template.ParseFiles(filepath.Join(targetDir, "template.adoc.tmpl")) if err != nil { return err } - tmpl, err := template.ParseFiles(templateFilename) - if err != nil { - return err - } - return tmpl.Execute(targetFile, data) + return RenderTaskDocumentation(targetFile, tmpl, &data) } func parseTasks(helmTemplateOutput []byte) ([]*tekton.Task, error) { @@ -131,3 +123,47 @@ func RenderTasks(tasksSourceDir, descriptionsSourceDir, targetDir string) error } return nil } + +func ParseTask(f []byte, desc []byte) (*Task, error) { + var t tekton.Task + err := yaml.Unmarshal(f, &t) + if err != nil { + return nil, err + } + if t.Name == "" { + return nil, errors.New("encountered empty name, something is wrong with the task") + } + task := &Task{ + Name: t.Name, + Description: string(desc), + Params: []Param{}, + Results: []Result{}, + } + for _, p := range t.Spec.Params { + defaultValue := "" + if p.Default != nil { + defaultValue = p.Default.StringVal + } + task.Params = append(task.Params, Param{ + Name: p.Name, + Default: defaultValue, + Description: p.Description, + }) + } + for _, r := range t.Spec.Results { + task.Results = append(task.Results, Result{ + Name: r.Name, + Description: r.Description, + }) + } + return task, nil +} + +func RenderTaskDocumentation(w io.Writer, tmpl *template.Template, task *Task) error { + if _, err := w.Write( + []byte("// File is generated; DO NOT EDIT.\n\n"), + ); err != nil { + return err + } + return tmpl.Execute(w, task) +} diff --git a/internal/projectpath/root.go b/internal/projectpath/root.go index 96357e4e..adc3cbd9 100644 --- a/internal/projectpath/root.go +++ b/internal/projectpath/root.go @@ -11,3 +11,7 @@ var ( // Root folder of this project Root = filepath.Join(filepath.Dir(b), "../..") ) + +func RootedPath(path string) string { + return filepath.Join(Root, path) +} diff --git a/pkg/odstasktest/assertions.go b/pkg/odstasktest/assertions.go new file mode 100644 index 00000000..d373a096 --- /dev/null +++ b/pkg/odstasktest/assertions.go @@ -0,0 +1,40 @@ +package odstasktest + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +// AssertFilesExist checks that all files named by wantFiles exist in wsDir. +// Any files that do not exist will report a test error. +func AssertFilesExist(t *testing.T, wsDir string, wantFiles ...string) { + for _, wf := range wantFiles { + filename := filepath.Join(wsDir, wf) + if _, err := os.Stat(filename); os.IsNotExist(err) { + t.Errorf("Want %s, but got nothing", filename) + } + } +} + +// AssertFileContent checks that the file named by filename in the directory +// wsDir has the exact context specified by want. +func AssertFileContent(t *testing.T, wsDir, filename, want string) { + got, err := getTrimmedFileContent(filepath.Join(wsDir, filename)) + if err != nil { + t.Errorf("get content of %s: %s", filename, err) + return + } + if got != want { + t.Errorf("got '%s', want '%s' in file %s", got, want, filename) + } +} + +func getTrimmedFileContent(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return strings.TrimSpace(string(content)), nil +} diff --git a/pkg/odstasktest/doc.go b/pkg/odstasktest/doc.go new file mode 100644 index 00000000..1432e3b0 --- /dev/null +++ b/pkg/odstasktest/doc.go @@ -0,0 +1,77 @@ +/* +Package odstasktest implements ODS Pipeline specific functionality to run +Tekton tasks in a KinD cluster on top of package tektontaskrun. + +odstasktest is intended to be used as a library for testing ODS Pipeline +tasks using Go. + +Example usage: + + package test + + import ( + "log" + "os" + "path/filepath" + "testing" + + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" + ) + + var ( + namespaceConfig *ttr.NamespaceConfig + rootPath = "../.." + ) + + func TestMain(m *testing.M) { + cc, err := ttr.StartKinDCluster( + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.my-task", + ContextDir: rootPath, + }), + ) + if err != nil { + log.Fatal("Could not start KinD cluster: ", err) + } + nc, cleanup, err := ttr.SetupTempNamespace( + cc, + ott.StartNexus(), + ott.InstallODSPipeline(), + ttr.InstallTaskFromPath( + filepath.Join(rootPath, "build/tasks/my-task.yaml"), + nil, + ), + ) + if err != nil { + log.Fatal("Could not setup temporary namespace: ", err) + } + defer cleanup() + namespaceConfig = nc + os.Exit(m.Run()) + } + + func TestMyTask(t *testing.T) { + if err := ttr.RunTask( + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("my-task"), + ttr.WithStringParams(map[string]string{ + "go-os": runtime.GOOS, + "go-arch": runtime.GOARCH, + }), + ott.WithGitSourceWorkspace(t, "../testdata/workspaces/go-sample-app"), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun) { + ott.AssertFilesExist( + t, config.WorkspaceConfigs["source"].Dir, + "docker/Dockerfile", + "docker/app", + ) + }), + ); err != nil { + t.Fatal(err) + } + } + + // further tests here ... +*/ +package odstasktest diff --git a/pkg/odstasktest/install.go b/pkg/odstasktest/install.go new file mode 100644 index 00000000..9f03cd0d --- /dev/null +++ b/pkg/odstasktest/install.go @@ -0,0 +1,45 @@ +package odstasktest + +import ( + "flag" + "fmt" + "os" + "path/filepath" + + "github.com/opendevstack/ods-pipeline/internal/command" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +var privateCertFlag = flag.Bool("ods-private-cert", false, "Whether to use a private cert") + +// InstallODSPipeline installs the ODS Pipeline Helm chart in the namespace +// given in NamespaceConfig. +func InstallODSPipeline() ttr.NamespaceOpt { + flag.Parse() + return func(cc *ttr.ClusterConfig, nc *ttr.NamespaceConfig) error { + return installCDNamespaceResources(nc.Name, "pipeline", *privateCertFlag) + } +} + +func installCDNamespaceResources(ns, serviceaccount string, privateCert bool) error { + scriptArgs := []string{filepath.Join(projectpath.Root, "scripts/install-inside-kind.sh"), "-n", ns, "-s", serviceaccount, "--no-diff"} + // if testing.Verbose() { + // scriptArgs = append(scriptArgs, "-v") + // } + if privateCert { + // Insert as first flag because install-inside-kind.sh won't recognize it otherwise. + scriptArgs = append( + []string{fmt.Sprintf("--private-cert=%s", filepath.Join(projectpath.Root, "test/testdata/private-cert/tls.crt"))}, + scriptArgs..., + ) + } + + return command.Run( + "sh", + scriptArgs, + []string{}, + os.Stdout, + os.Stderr, + ) +} diff --git a/pkg/odstasktest/services.go b/pkg/odstasktest/services.go new file mode 100644 index 00000000..ef45fb8b --- /dev/null +++ b/pkg/odstasktest/services.go @@ -0,0 +1,52 @@ +package odstasktest + +import ( + "flag" + "os" + + "github.com/opendevstack/ods-pipeline/internal/command" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +var restartNexusFlag = flag.Bool("ods-restart-nexus", false, "Whether to force a restart of Nexus") +var restartSonarQubeFlag = flag.Bool("ods-restart-sonarqube", false, "Whether to force a restart of SonarQube") +var restartBitbucketFlag = flag.Bool("ods-restart-bitbucket", false, "Whether to force a restart of Bitbucket") +var skipSonarQubeFlag = flag.Bool("ods-skip-sonar", false, "Whether to skip SonarQube steps") + +// StartNexus starts a Nexus instance in a Docker container (named +// ods-test-nexus). If a container of the same name already exists, it will be +// reused unless -ods-restart-nexus is passed. +func StartNexus() ttr.NamespaceOpt { + flag.Parse() + return runService("run-nexus.sh", *restartNexusFlag) +} + +// StartSonarQube starts a SonarQube instance in a Docker container (named +// ods-test-sonarqube). If a container of the same name already exists, it will +// be reused unless -ods-restart-sonarqube is passed. +func StartSonarQube() ttr.NamespaceOpt { + flag.Parse() + if *skipSonarQubeFlag { + return func(cc *ttr.ClusterConfig, nc *ttr.NamespaceConfig) error { return nil } + } + return runService("run-sonarqube.sh", *restartSonarQubeFlag) +} + +// StartBitbucket starts a Bitbucket instance in a Docker container (named +// ods-test-bitbucket-server). If a container of the same name already exists, +// it will be reused unless -ods-restart-bitbucket is passed. +func StartBitbucket() ttr.NamespaceOpt { + flag.Parse() + return runService("run-bitbucket.sh", *restartBitbucketFlag) +} + +func runService(script string, restart bool) ttr.NamespaceOpt { + return func(cc *ttr.ClusterConfig, nc *ttr.NamespaceConfig) error { + args := []string{projectpath.RootedPath("scripts/" + script)} + if !restart { + args = append(args, "--reuse") + } + return command.Run("sh", args, []string{}, os.Stdout, os.Stderr) + } +} diff --git a/pkg/odstasktest/workspace.go b/pkg/odstasktest/workspace.go new file mode 100644 index 00000000..2c767f89 --- /dev/null +++ b/pkg/odstasktest/workspace.go @@ -0,0 +1,45 @@ +package odstasktest + +import ( + "testing" + + "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" + "github.com/opendevstack/ods-pipeline/pkg/tasktesting" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func GetSourceWorkspaceContext(t *testing.T, config *ttr.TaskRunConfig) (dir string, ctxt *pipelinectxt.ODSContext) { + dir = config.WorkspaceConfigs["source"].Dir + ctxt, err := pipelinectxt.NewFromCache(dir) + if err != nil { + t.Fatal(err) + } + return +} + +// InitGitRepo initialises a Git repository inside the given workspace. +// The workspace will also be setup with an ODS context directory in .ods. +func InitGitRepo(t *testing.T) ttr.WorkspaceOpt { + return func(c *ttr.WorkspaceConfig) error { + // TODO: namespace is not the real name of the namespace ... + // TODO: do we need the ODS context later? if so, how to transport? + _ = tasktesting.SetupGitRepo(t, "namespace", c.Dir) + return nil + } +} + +// WithGitSourceWorkspace configures the task run with a workspace named +// "source", mapped to the directory sourced from sourceDir. The directory is +// initialised as a Git repository with an ODS context. +func WithGitSourceWorkspace(t *testing.T, sourceDir string, opts ...ttr.WorkspaceOpt) ttr.TaskRunOpt { + return WithSourceWorkspace( + t, sourceDir, + append([]ttr.WorkspaceOpt{InitGitRepo(t)}, opts...)..., + ) +} + +// WithSourceWorkspace configures the task run with a workspace named +// "source", mapped to the directory sourced from sourceDir. +func WithSourceWorkspace(t *testing.T, sourceDir string, opts ...ttr.WorkspaceOpt) ttr.TaskRunOpt { + return ttr.WithWorkspace("source", sourceDir, opts...) +} diff --git a/pkg/pipelinectxt/context.go b/pkg/pipelinectxt/context.go index 14c0df0c..1d11a088 100644 --- a/pkg/pipelinectxt/context.go +++ b/pkg/pipelinectxt/context.go @@ -60,6 +60,12 @@ func (o *ODSContext) WriteCache(wsDir string) error { return nil } +func NewFromCache(wsDir string) (o *ODSContext, err error) { + o = &ODSContext{} + err = o.ReadCache(wsDir) + return +} + // ReadCache reads ODS context from .ods // TODO: test that this works func (o *ODSContext) ReadCache(wsDir string) error { diff --git a/pkg/sonar/quality_gate_test.go b/pkg/sonar/quality_gate_test.go index 1c35398d..0677321d 100644 --- a/pkg/sonar/quality_gate_test.go +++ b/pkg/sonar/quality_gate_test.go @@ -36,6 +36,12 @@ func TestQualityGateGet(t *testing.T) { wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&branch=bar", wantStatus: "OK", }, + "OK status for branch (PR=0)": { + params: QualityGateGetParams{ProjectKey: "foo", Branch: "bar", PullRequest: "0"}, + responseFixture: "sonar/project_status_ok.json", + wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&branch=bar", + wantStatus: "OK", + }, "OK status for PR": { params: QualityGateGetParams{ProjectKey: "foo", PullRequest: "123"}, responseFixture: "sonar/project_status_ok.json", diff --git a/pkg/taskdoc/taskdoc.go b/pkg/taskdoc/taskdoc.go new file mode 100644 index 00000000..2c06cadd --- /dev/null +++ b/pkg/taskdoc/taskdoc.go @@ -0,0 +1,78 @@ +package taskdoc + +import ( + "errors" + "io" + "text/template" + + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "sigs.k8s.io/yaml" +) + +type Param struct { + Name string + Default string + Description string +} + +type Result struct { + Name string + Description string +} + +type Task struct { + Name string + Description string + Params []Param + Results []Result +} + +// ParseTask reads a Tekton task from given bytes f, +// and assembles a new Task with the name, params and +// results from the parsed Tekton task, as well as the +// given description. +func ParseTask(f []byte, desc []byte) (*Task, error) { + var t tekton.Task + err := yaml.Unmarshal(f, &t) + if err != nil { + return nil, err + } + if t.Name == "" { + return nil, errors.New("encountered empty name, something is wrong with the task") + } + task := &Task{ + Name: t.Name, + Description: string(desc), + Params: []Param{}, + Results: []Result{}, + } + for _, p := range t.Spec.Params { + defaultValue := "" + if p.Default != nil { + defaultValue = p.Default.StringVal + } + task.Params = append(task.Params, Param{ + Name: p.Name, + Default: defaultValue, + Description: p.Description, + }) + } + for _, r := range t.Spec.Results { + task.Results = append(task.Results, Result{ + Name: r.Name, + Description: r.Description, + }) + } + return task, nil +} + +// RenderTaskDocumentation renders the given template with the task data, +// writing the result to w. +func RenderTaskDocumentation(w io.Writer, tmpl *template.Template, task *Task) error { + if _, err := w.Write( + []byte("// File is generated; DO NOT EDIT.\n\n"), + ); err != nil { + return err + } + return tmpl.Execute(w, task) +} diff --git a/pkg/taskmanifest/taskmanifest.go b/pkg/taskmanifest/taskmanifest.go new file mode 100644 index 00000000..5ad95cc2 --- /dev/null +++ b/pkg/taskmanifest/taskmanifest.go @@ -0,0 +1,17 @@ +package taskmanifest + +import ( + "io" + "text/template" +) + +// RenderTask renders the given template with the passed data, +// writing the result to w. +func RenderTask(w io.Writer, tmpl *template.Template, data map[string]string) error { + if _, err := w.Write( + []byte("# File is generated; DO NOT EDIT.\n\n"), + ); err != nil { + return err + } + return tmpl.Execute(w, data) +} diff --git a/pkg/tektontaskrun/cluster.go b/pkg/tektontaskrun/cluster.go new file mode 100644 index 00000000..a764ab01 --- /dev/null +++ b/pkg/tektontaskrun/cluster.go @@ -0,0 +1,195 @@ +package tektontaskrun + +import ( + "errors" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/opendevstack/ods-pipeline/internal/command" + "github.com/opendevstack/ods-pipeline/internal/projectpath" +) + +const ( + DefaultServiceAccountName = "pipeline" + KinDMountHostPath = "/tmp/ods-pipeline-kind-mount" + KinDMountContainerPath = "/files" + KinDRegistry = "localhost:5000" +) + +var recreateClusterFlag = flag.Bool("ods-recreate-cluster", false, "Whether to remove and recreate the KinD cluster named 'ods-pipeline'") +var registryPortFlag = flag.String("ods-cluster-registry-port", "5000", "Port of cluster registry") +var outsideKindFlag = flag.Bool("ods-outside-kind", false, "Whether to continue if the Kube context is not set to the KinD cluster") +var reuseImagesFlag = flag.Bool("ods-reuse-images", false, "Whether to reuse existing images instead of building again") +var debugFlag = flag.Bool("ods-debug", false, "Turn on debug mode for scripts etc.") + +// ClusterOpt allows to further configure the KinD cluster after its creation. +type ClusterOpt func(c *ClusterConfig) error + +// ClusterConfig represents key configuration of the KinD cluster. +type ClusterConfig struct { + StorageSourceDir string + StorageCapacity string + StorageClassName string + Registry string + DefaultRepository string +} + +// ImageBuildConfig represents the config used to build a container image. +type ImageBuildConfig struct { + Dockerfile string + Tag string + ContextDir string +} + +// Process validates the configuration and defaults the image tag if unset +// using the defaultImageRepository and Dockerfile values. +func (ibc *ImageBuildConfig) Process(defaultImageRepository string) error { + if ibc.Dockerfile == "" || ibc.ContextDir == "" { + return errors.New("both Dockerfile and ContextDir must be set") + } + if ibc.Tag == "" { + imageName := strings.TrimPrefix(path.Base(ibc.Dockerfile), "Dockerfile.") + ibc.Tag = fmt.Sprintf("%s/%s:latest", defaultImageRepository, imageName) + } + return nil +} + +// NewClusterConfig creates a new ClusterConfig instance. +func NewClusterConfig() *ClusterConfig { + return &ClusterConfig{ + StorageClassName: "standard", // if using KinD, set it to "standard" + StorageCapacity: "1Gi", + StorageSourceDir: KinDMountContainerPath, + Registry: KinDRegistry, + DefaultRepository: "ods-pipeline", + } +} + +// DefaultImageRepository returns the registry + default repository +// combination. +func (c *ClusterConfig) DefaultImageRepository() string { + return c.Registry + "/" + c.DefaultRepository +} + +// DefaultTaskTemplateData returns a map with default values which can be used +// in task templates. +func (c *ClusterConfig) DefaultTaskTemplateData() map[string]string { + return map[string]string{ + "ImageRepository": c.DefaultImageRepository(), + "Version": "latest", + } +} + +// StartKinDCluster starts a KinD cluster with Tekton installed. +// Afterwards, any given ClusterOpt is applied. +func StartKinDCluster(opts ...ClusterOpt) (*ClusterConfig, error) { + flag.Parse() + if err := checkCluster(*outsideKindFlag); err != nil { + return nil, fmt.Errorf("check kubectl context: %s", err) + } + if err := createKinDCluster(*debugFlag); err != nil { + return nil, fmt.Errorf("create KinD cluster: %s", err) + } + if err := installTektonPipelines(*debugFlag); err != nil { + return nil, fmt.Errorf("install Tekton: %s", err) + } + + c := NewClusterConfig() + for _, o := range opts { + err := o(c) + if err != nil { + return nil, err + } + } + return c, nil +} + +// LoadImage builds a container image using the docker CLI based on the given +// ImageBuildConfig. +// +// The ImageBuildConfig must set at least Dockerfile and ContextDir option. +// If Tag is unset, it is inferred from the default registry and the Dockerfile +// name. For example, given a Dockerfile of "Dockerfile.foobar", the tag is +// defaulted to localhost:5000/ods-pipeline/foobar. +func LoadImage(ibc ImageBuildConfig) ClusterOpt { + flag.Parse() + return func(c *ClusterConfig) error { + buildImage := true + ibc.Process(c.DefaultImageRepository()) + if *reuseImagesFlag { + cmd := exec.Command("docker", "images", "-q", ibc.Tag) + b, err := cmd.Output() + if err != nil { + return err + } + imageID := strings.TrimSpace(string(b)) + if imageID != "" { + log.Printf("Reusing image ID %s for tag %s ...\n", imageID, ibc.Tag) + buildImage = false + } + } + if buildImage { + log.Printf("Building image %s from %s ...\n", ibc.Tag, ibc.Dockerfile) + if !path.IsAbs(ibc.Dockerfile) { + ibc.Dockerfile = filepath.Join(ibc.ContextDir, ibc.Dockerfile) + } + args := []string{ + "build", + "-f", ibc.Dockerfile, + "-t", ibc.Tag, + ibc.ContextDir, + } + if err := command.Run("docker", args, []string{}, os.Stdout, os.Stderr); err != nil { + return err + } + } + return command.Run("docker", []string{"push", ibc.Tag}, []string{}, os.Stdout, os.Stderr) + } +} + +func checkCluster(outsideKindAllowed bool) error { + if !outsideKindAllowed { + cmd := exec.Command("kubectl", "config", "current-context") + b, err := cmd.Output() + if err != nil || len(b) == 0 { + log.Println("did not detect existing kubectl context") + return nil + } + gotContext := strings.TrimSpace(string(b)) + wantCluster := "ods-pipeline" + if gotContext != "kind-"+wantCluster { + return fmt.Errorf("not running tests outside KinD cluster ('%s') without -ods-outside-kind! Current context: %s", wantCluster, gotContext) + } + } + return nil +} + +func createKinDCluster(debug bool) error { + args := []string{ + projectpath.RootedPath("scripts/kind-with-registry.sh"), + "--registry-port=" + *registryPortFlag, + } + if *recreateClusterFlag { + args = append(args, "--recreate") + } + if debug { + args = append(args, "--verbose") + } + return command.Run("sh", args, []string{}, os.Stdout, os.Stderr) +} + +func installTektonPipelines(debug bool) error { + args := []string{ + projectpath.RootedPath("scripts/install-tekton-pipelines.sh"), + } + if debug { + args = append(args, "--verbose") + } + return command.Run("sh", args, []string{}, os.Stdout, os.Stderr) +} diff --git a/pkg/tektontaskrun/doc.go b/pkg/tektontaskrun/doc.go new file mode 100644 index 00000000..27540de0 --- /dev/null +++ b/pkg/tektontaskrun/doc.go @@ -0,0 +1,74 @@ +/* +Package tektontaskrun implements ODS Pipeline independent functionality to run +Tekton tasks in a KinD cluster. + +Using tektontaskrun it is possible to start a KinD cluster, configure it (e.g. +by setting up a temporary namespace), and running a Tekton task. + +tektontaskrun is intended to be used by CLI programs and as a library for +testing Tekton tasks using Go. + +Example usage: + + package test + + import ( + "log" + "os" + "path/filepath" + "testing" + + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" + ) + + var ( + namespaceConfig *ttr.NamespaceConfig + rootPath = "../.." + ) + + func TestMain(m *testing.M) { + cc, err := ttr.StartKinDCluster( + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.my-task", + ContextDir: rootPath, + }), + ) + if err != nil { + log.Fatal("Could not start KinD cluster: ", err) + } + nc, cleanup, err := ttr.SetupTempNamespace( + cc, + ttr.InstallTaskFromPath( + filepath.Join(rootPath, "build/tasks/my-task.yaml"), + nil, + ), + ) + if err != nil { + log.Fatal("Could not setup temporary namespace: ", err) + } + defer cleanup() + namespaceConfig = nc + os.Exit(m.Run()) + } + + func TestMyTask(t *testing.T) { + if err := ttr.RunTask( + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("my-task"), + ttr.WithStringParams(map[string]string{ + "go-os": runtime.GOOS, + "go-arch": runtime.GOARCH, + }), + ttr.WithWorkspace("source", "my-sample-app"), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun) { + wd := config.WorkspaceConfigs["source"].Dir + // e.g. check files in workspace ... + }), + ); err != nil { + t.Fatal(err) + } + } + + // further tests here ... +*/ +package tektontaskrun diff --git a/pkg/tektontaskrun/events.go b/pkg/tektontaskrun/events.go new file mode 100644 index 00000000..ab575657 --- /dev/null +++ b/pkg/tektontaskrun/events.go @@ -0,0 +1,50 @@ +package tektontaskrun + +import ( + "context" + "fmt" + "log" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func watchPodEvents( + ctx context.Context, + c kubernetes.Interface, + podName, namespace string, + stop chan bool, + errs chan error) { + + log.Printf("Watching events for pod %s in namespace %s", podName, namespace) + + ew, err := c.CoreV1().Events(namespace).Watch(context.Background(), + metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", podName, namespace), + }) + if err != nil { + errs <- fmt.Errorf("failed to watch events from pod %s in namespace %s", podName, namespace) + return + } + + log.Println("---------------------- Events -------------------------") + + // Wait for any event failure or a all its containers to be running + for { + select { + case wev := <-ew.ResultChan(): + if wev.Object != nil { + ev := wev.Object.(*v1.Event) + log.Printf("Type: %s, Message: %s", ev.Type, ev.Message) + if ev.Type == "Warning" && strings.Contains(ev.Message, "Error") { + errs <- fmt.Errorf("error detected in events: %s", ev.Message) + return + } + } + case <-stop: + return + } + } +} diff --git a/pkg/tektontaskrun/interrupt.go b/pkg/tektontaskrun/interrupt.go new file mode 100644 index 00000000..df8ca472 --- /dev/null +++ b/pkg/tektontaskrun/interrupt.go @@ -0,0 +1,18 @@ +package tektontaskrun + +import ( + "os" + "os/signal" +) + +// cleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught +func cleanupOnInterrupt(cleanup func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + for range c { + cleanup() + os.Exit(1) + } + }() +} diff --git a/pkg/tektontaskrun/logs.go b/pkg/tektontaskrun/logs.go new file mode 100644 index 00000000..4f32b110 --- /dev/null +++ b/pkg/tektontaskrun/logs.go @@ -0,0 +1,111 @@ +package tektontaskrun + +import ( + "bufio" + "context" + "fmt" + "log" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +// getEventsAndLogsOfPod streams events of the pod until all containers are ready, +// and streams logs for each container once ready. It stops if there are any +// sends on the errs channels or if the passed context is cancelled. +func getEventsAndLogsOfPod( + ctx context.Context, + c kubernetes.Interface, + pod *corev1.Pod, + collectedLogsChan chan []byte, + errs chan error) { + quitEvents := make(chan bool) + podName := pod.Name + podNamespace := pod.Namespace + + go watchPodEvents( + ctx, + c, + podName, + podNamespace, + quitEvents, + errs, + ) + + watchingEvents := true + for _, container := range pod.Spec.Containers { + err := streamContainerLogs(ctx, c, podNamespace, podName, container.Name, collectedLogsChan) + if err != nil { + fmt.Printf("failure while getting container logs: %s", err) + errs <- err + return + } + if watchingEvents { + quitEvents <- true + watchingEvents = false + } + } +} + +func streamContainerLogs( + ctx context.Context, + c kubernetes.Interface, + podNamespace, podName, containerName string, collectedLogsChan chan []byte) error { + log.Printf("Waiting for container %s from pod %s to be ready...\n", containerName, podName) + + w, err := c.CoreV1().Pods(podNamespace).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ + Name: podName, + Namespace: podNamespace, + })) + if err != nil { + return fmt.Errorf("error watching pods: %s", err) + } + + for { + ev := <-w.ResultChan() + if cs, ok := containerFromEvent(ev, podName, containerName); ok { + if cs.State.Running != nil { + log.Printf("---------------------- Logs from %s -------------------------\n", containerName) + // Set up log stream using a new ctx so that it's not cancelled + // when the task is done before all logs have been read. + ls, err := c.CoreV1().Pods(podNamespace).GetLogs(podName, &corev1.PodLogOptions{ + Follow: true, + Container: containerName, + }).Stream(context.Background()) + if err != nil { + return fmt.Errorf("could not create log stream for pod %s in namespace %s: %w", podName, podNamespace, err) + } + defer ls.Close() + reader := bufio.NewScanner(ls) + for reader.Scan() { + select { + case <-ctx.Done(): + collectedLogsChan <- reader.Bytes() + fmt.Println(reader.Text()) + return nil + default: + collectedLogsChan <- reader.Bytes() + fmt.Println(reader.Text()) + } + } + return reader.Err() + } + } + } +} + +func containerFromEvent(ev watch.Event, podName, containerName string) (corev1.ContainerStatus, bool) { + if ev.Object != nil { + p, ok := ev.Object.(*corev1.Pod) + if ok && p.Name == podName { + for _, cs := range p.Status.ContainerStatuses { + if cs.Name == containerName { + return cs, true + } + } + } + } + return corev1.ContainerStatus{}, false +} diff --git a/pkg/tektontaskrun/namespace.go b/pkg/tektontaskrun/namespace.go new file mode 100644 index 00000000..ea7ff9f1 --- /dev/null +++ b/pkg/tektontaskrun/namespace.go @@ -0,0 +1,95 @@ +package tektontaskrun + +import ( + "context" + "fmt" + + k "github.com/opendevstack/ods-pipeline/internal/kubernetes" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NamespaceOpt allows to further configure the K8s namespace after its creation. +type NamespaceOpt func(cc *ClusterConfig, nc *NamespaceConfig) error + +// NamespaceConfig represents key configuration of the K8s namespace. +type NamespaceConfig struct { + Name string +} + +// SetupTempNamespace sets up a new namespace using a pseduo-random name, +// applies any given NamespaceOpt and returns a function to clean up the +// namespace at a later time. +func SetupTempNamespace(cc *ClusterConfig, opts ...NamespaceOpt) (nc *NamespaceConfig, cleanup func(), err error) { + nc = &NamespaceConfig{ + Name: makeRandomString(8), + } + cleanup, err = initNamespace(cc, nc) + if err != nil { + return + } + cleanupOnInterrupt(cleanup) + for _, o := range opts { + err = o(cc, nc) + if err != nil { + return + } + } + return +} + +// InstallTaskFromPath renders the task template at path using the given data, +// then installs the resulting task into the namespace identified by +// NamespaceConfig. +func InstallTaskFromPath(path string, data map[string]string) NamespaceOpt { + return func(cc *ClusterConfig, nc *NamespaceConfig) error { + d := cc.DefaultTaskTemplateData() + for k, v := range data { + d[k] = v + } + _, err := installTask(path, nc.Name, d) + return err + } +} + +func initNamespace(cc *ClusterConfig, nc *NamespaceConfig) (cleanup func(), err error) { + clients := k.NewClients() + + k.CreateNamespace(clients.KubernetesClientSet, nc.Name) + + _, err = k.CreatePersistentVolume( + clients.KubernetesClientSet, + nc.Name, + cc.StorageCapacity, + cc.StorageSourceDir, + cc.StorageClassName, + ) + if err != nil { + return nil, err + } + + _, err = k.CreatePersistentVolumeClaim( + clients.KubernetesClientSet, + cc.StorageCapacity, + cc.StorageClassName, + nc.Name, + ) + if err != nil { + return nil, err + } + + return func() { removeNamespace(clients, nc.Name) }, nil +} + +func removeNamespace(cs *k.Clients, namespace string) error { + if err := cs.KubernetesClientSet.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("delete namespace %s: %s", namespace, err) + } + + // For simplicity and traceability, we use for the PV the same name as the namespace + pvName := namespace + if err := cs.KubernetesClientSet.CoreV1().PersistentVolumes().Delete(context.Background(), pvName, metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("delete persistent volume %s: %s", pvName, err) + } + + return nil +} diff --git a/pkg/tektontaskrun/random.go b/pkg/tektontaskrun/random.go new file mode 100644 index 00000000..36a432b2 --- /dev/null +++ b/pkg/tektontaskrun/random.go @@ -0,0 +1,17 @@ +package tektontaskrun + +import ( + "math/rand" + "strings" + "time" +) + +func makeRandomString(length int) string { + rand.Seed(time.Now().UnixNano()) + chars := []rune("abcdefghijklmnopqrstuvwxyz") + var b strings.Builder + for i := 0; i < length; i++ { + b.WriteRune(chars[rand.Intn(len(chars))]) + } + return b.String() +} diff --git a/pkg/tektontaskrun/run.go b/pkg/tektontaskrun/run.go new file mode 100644 index 00000000..f80b355f --- /dev/null +++ b/pkg/tektontaskrun/run.go @@ -0,0 +1,208 @@ +package tektontaskrun + +import ( + "errors" + "log" + "os" + "time" + + "github.com/opendevstack/ods-pipeline/internal/directory" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +const ( + DefaultTimeout = 5 * time.Minute +) + +// TaskRunOpt allows to configure the Tekton task run before it is started. +type TaskRunOpt func(c *TaskRunConfig) error + +// TaskRunConfig represents key configuration of the Tekton task run. +type TaskRunConfig struct { + Name string + Params []tekton.Param + Workspaces map[string]string + Namespace string + ServiceAccountName string + Timeout time.Duration + AfterRunFunc func(config *TaskRunConfig, taskRun *tekton.TaskRun) + CleanupFuncs []func() + NamespaceConfig *NamespaceConfig + WorkspaceConfigs map[string]*WorkspaceConfig + ExpectFailure bool +} + +// Cleanup calls all registered CleanupFuncs. +func (nc *TaskRunConfig) Cleanup() { + for _, f := range nc.CleanupFuncs { + f() + } +} + +// RunTask executes a task run after applying all given TaskRunOpt. +func RunTask(opts ...TaskRunOpt) error { + trc := &TaskRunConfig{ + Workspaces: map[string]string{}, + WorkspaceConfigs: map[string]*WorkspaceConfig{}, + Timeout: DefaultTimeout, + ServiceAccountName: DefaultServiceAccountName, + } + for _, o := range opts { + err := o(trc) + if err != nil { + return err + } + } + + cleanupOnInterrupt(trc.Cleanup) + defer trc.Cleanup() + + taskRun, err := runTask(trc) + if err != nil { + return err + } + + if !taskRun.IsSuccessful() && !trc.ExpectFailure { + return errors.New("task run was not successful") + } + + if trc.AfterRunFunc != nil { + trc.AfterRunFunc(trc, taskRun) + } + + return err +} + +// InNamespace configures the task run to execute in given namespace. +func InNamespace(namespace string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Namespace = namespace + return nil + } +} + +// InTempNamespace configures the task run to execute in a newly created, +// temporary namespace. +func InTempNamespace(cc *ClusterConfig, opts ...NamespaceOpt) TaskRunOpt { + return func(c *TaskRunConfig) error { + nc, cleanup, err := SetupTempNamespace(cc, opts...) + if err != nil { + return err + } + c.Namespace = nc.Name + c.NamespaceConfig = nc + c.CleanupFuncs = append(c.CleanupFuncs, cleanup) + return nil + } +} + +// UsingTask configures the task run to execute the Task identified by name in +// the configured namespace. +func UsingTask(name string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Name = name + return nil + } +} + +// WithServiceAccountName configures the task run to execute under the +// specified serviceaccount name. +func WithServiceAccountName(name string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.ServiceAccountName = name + return nil + } +} + +// WithTimeout configures the task run to execute within the given duration. +func WithTimeout(timeout time.Duration) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Timeout = timeout + return nil + } +} + +// WithWorkspace sets up a workspace with given name and contents of sourceDir. +// sourceDir is copied to a temporary directory so that the original contents +// remain unchanged. +func WithWorkspace(name, sourceDir string, opts ...WorkspaceOpt) TaskRunOpt { + return func(c *TaskRunConfig) error { + workspaceDir, cleanup, err := SetupWorkspaceDir(sourceDir) + if err != nil { + return err + } + wc := &WorkspaceConfig{ + Name: name, + Dir: workspaceDir, + Cleanup: cleanup, + } + for _, o := range opts { + err := o(wc) + if err != nil { + return err + } + } + c.WorkspaceConfigs[wc.Name] = wc + c.CleanupFuncs = append(c.CleanupFuncs, wc.Cleanup) + c.Workspaces[wc.Name] = wc.Dir + return nil + } +} + +// WithParams configures the task run to use the specified Tekton parameters. +func WithParams(params ...tekton.Param) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Params = append(c.Params, params...) + return nil + } +} + +// WithStringParams configures the task run to use the specified string +// parameters. WithStringParams is a more convenient way to configure +// simple parameters compares to WithParams. +func WithStringParams(params map[string]string) TaskRunOpt { + return func(c *TaskRunConfig) error { + for k, v := range params { + tp := tekton.Param{Name: k, Value: tekton.ParamValue{ + Type: tekton.ParamTypeString, + StringVal: v, + }} + c.Params = append(c.Params, tp) + } + return nil + } +} + +// ExpectFailure sets up an expectation that the task will fail. If the task +// does not fail, RunTask will error. Conversely, if ExpectFailure is not set, +// RunTask will error when the task run fails. +func ExpectFailure() TaskRunOpt { + return func(c *TaskRunConfig) error { + c.ExpectFailure = true + return nil + } +} + +// AfterRun registers a function which is run after the task run completes. +// The function will receive the task run configuration, as well as an instance +// of the TaskRun. +func AfterRun(f func(c *TaskRunConfig, r *tekton.TaskRun)) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.AfterRunFunc = f + return nil + } +} + +// SetupWorkspaceDir copies sourceDir to the KinD mount host path, which is +// set to /tmp/ods-pipeline-kind-mount. The created folder can then be used +// as a Tekton task run workspace. SetupWorkspaceDir returns the +// created directory as well as a function to clean it up. +func SetupWorkspaceDir(sourceDir string) (dir string, cleanup func(), err error) { + dir, err = directory.CopyToTempDir(sourceDir, KinDMountHostPath, "workspace-") + cleanup = func() { + if err := os.RemoveAll(dir); err != nil { + log.Printf("failed to clean up temporary workspace dir %s: %s", dir, err) + } + } + return +} diff --git a/pkg/tektontaskrun/task.go b/pkg/tektontaskrun/task.go new file mode 100644 index 00000000..cc068ee8 --- /dev/null +++ b/pkg/tektontaskrun/task.go @@ -0,0 +1,38 @@ +package tektontaskrun + +import ( + "bytes" + "context" + "fmt" + "text/template" + + k "github.com/opendevstack/ods-pipeline/internal/kubernetes" + "github.com/opendevstack/ods-pipeline/pkg/taskmanifest" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func installTask(path, namespace string, data map[string]string) (*tekton.Task, error) { + var t tekton.Task + tmpl, err := template.ParseFiles(path) + if err != nil { + return nil, fmt.Errorf("parse file: %w", err) + } + w := new(bytes.Buffer) + err = taskmanifest.RenderTask(w, tmpl, data) + if err != nil { + return nil, fmt.Errorf("render task: %w", err) + } + err = yaml.Unmarshal(w.Bytes(), &t) + if err != nil { + return nil, fmt.Errorf("unmarshal: %w", err) + } + clients := k.NewClients() + tc := clients.TektonClientSet + it, err := tc.TektonV1beta1().Tasks(namespace).Create(context.TODO(), &t, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("create task: %w", err) + } + return it, nil +} diff --git a/pkg/tektontaskrun/taskrun.go b/pkg/tektontaskrun/taskrun.go new file mode 100644 index 00000000..6726c89f --- /dev/null +++ b/pkg/tektontaskrun/taskrun.go @@ -0,0 +1,208 @@ +package tektontaskrun + +import ( + "bytes" + "context" + "fmt" + "log" + "path" + "strings" + "time" + + k "github.com/opendevstack/ods-pipeline/internal/kubernetes" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/apis" +) + +func runTask(tc *TaskRunConfig) (*tekton.TaskRun, error) { + clients := k.NewClients() + tr, err := createTaskRunWithParams(clients.TektonClientSet, tc) + if err != nil { + return nil, err + } + + // TODO: if last output is short, it may be omitted from the logs. + taskRun, _, err := watchTaskRunUntilDone(clients, tc, tr) + if err != nil { + return nil, err + } + + log.Printf( + "Task status: %q - %q\n", + taskRun.Status.GetCondition(apis.ConditionSucceeded).GetReason(), + taskRun.Status.GetCondition(apis.ConditionSucceeded).GetMessage(), + ) + + return taskRun, nil +} + +func createTaskRunWithParams(tknClient *pipelineclientset.Clientset, tc *TaskRunConfig) (*tekton.TaskRun, error) { + + taskWorkspaces := []tekton.WorkspaceBinding{} + for wn, wd := range tc.Workspaces { + if path.IsAbs(wd) && !strings.HasPrefix(wd, KinDMountHostPath) { + return nil, fmt.Errorf("workspace dir %q is not located within %q", wd, KinDMountHostPath) + } + taskWorkspaces = append(taskWorkspaces, tekton.WorkspaceBinding{ + Name: wn, + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "task-pv-claim", + ReadOnly: false, + }, + SubPath: strings.TrimPrefix(wd, KinDMountHostPath+"/"), + }) + } + + tr, err := tknClient.TektonV1beta1().TaskRuns(tc.Namespace).Create(context.TODO(), + &tekton.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: makeRandomTaskrunName(tc.Name), + }, + Spec: tekton.TaskRunSpec{ + TaskRef: &tekton.TaskRef{Kind: tekton.NamespacedTaskKind, Name: tc.Name}, + Params: tc.Params, + Workspaces: taskWorkspaces, + ServiceAccountName: tc.ServiceAccountName, + }, + }, + metav1.CreateOptions{}) + + return tr, err +} + +func makeRandomTaskrunName(taskName string) string { + return fmt.Sprintf("%s-taskrun-%s", taskName, makeRandomString(8)) +} + +func waitForTaskRunDone( + ctx context.Context, + c pipelineclientset.Interface, + name, ns string, + errs chan error, + done chan *tekton.TaskRun) { + + deadline, _ := ctx.Deadline() + timeout := time.Until(deadline) + log.Printf("Waiting up to %v seconds for task %s in namespace %s to be done...\n", timeout.Round(time.Second).Seconds(), name, ns) + + w, err := c.TektonV1beta1().TaskRuns(ns).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ + Name: name, + Namespace: ns, + })) + if err != nil { + errs <- fmt.Errorf("error watching taskrun: %s", err) + return + } + + // Wait for the TaskRun to be done + for { + ev := <-w.ResultChan() + if ev.Object != nil { + tr, ok := ev.Object.(*tekton.TaskRun) + if ok { + if tr.IsDone() { + done <- tr + close(done) + return + } + } + + } + } +} + +func waitForTaskRunPod( + ctx context.Context, + c *kubernetes.Clientset, + taskRunName, + namespace string, + podAdded chan *corev1.Pod) { + log.Printf("Waiting for pod related to TaskRun %s to be added to the cluster\n", taskRunName) + stop := make(chan struct{}) + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(c, time.Second*30) + podsInformer := kubeInformerFactory.Core().V1().Pods().Informer() + + var taskRunPod *corev1.Pod + + podsInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + // when a new task is created, watch its events + pod := obj.(*corev1.Pod) + if strings.HasPrefix(pod.Name, taskRunName) { + taskRunPod = pod + log.Printf("TaskRun %s added pod %s to the cluster", taskRunName, pod.Name) + stop <- struct{}{} + } + + }, + }) + + defer close(stop) + kubeInformerFactory.Start(stop) + + <-stop + podAdded <- taskRunPod +} + +func watchTaskRunUntilDone(c *k.Clients, tc *TaskRunConfig, tr *tekton.TaskRun) (*tekton.TaskRun, bytes.Buffer, error) { + taskRunDone := make(chan *tekton.TaskRun) + podAdded := make(chan *corev1.Pod) + errs := make(chan error) + collectedLogsChan := make(chan []byte) + var collectedLogsBuffer bytes.Buffer + + ctx, cancel := context.WithTimeout(context.TODO(), tc.Timeout) + defer cancel() + go waitForTaskRunDone( + ctx, + c.TektonClientSet, + tr.Name, + tc.Namespace, + errs, + taskRunDone, + ) + + go waitForTaskRunPod( + ctx, + c.KubernetesClientSet, + tr.Name, + tc.Namespace, + podAdded, + ) + + for { + select { + case err := <-errs: + if err != nil { + return nil, collectedLogsBuffer, err + } + + case pod := <-podAdded: + if pod != nil { + go getEventsAndLogsOfPod( + ctx, + c.KubernetesClientSet, + pod, + collectedLogsChan, + errs, + ) + } + + case b := <-collectedLogsChan: + collectedLogsBuffer.Write(b) + + case tr := <-taskRunDone: + return tr, collectedLogsBuffer, nil + case <-ctx.Done(): + return nil, collectedLogsBuffer, fmt.Errorf("timeout waiting for task run to finish. Consider increasing the timeout for your testcase at hand") + } + } +} diff --git a/pkg/tektontaskrun/workspace.go b/pkg/tektontaskrun/workspace.go new file mode 100644 index 00000000..c6c52fd3 --- /dev/null +++ b/pkg/tektontaskrun/workspace.go @@ -0,0 +1,14 @@ +package tektontaskrun + +// WorkspaceOpt allows to further configure a Tekton workspace after its creation. +type WorkspaceOpt func(c *WorkspaceConfig) error + +// WorkspaceConfig describes a Tekton workspace. +type WorkspaceConfig struct { + // Name of the Tekton workspace. + Name string + // Directory on the host of the workspace. + Dir string + // Cleanup function. + Cleanup func() +} diff --git a/scripts/build-and-push-images.sh b/scripts/build-and-push-images.sh index d155164a..8e17438c 100755 --- a/scripts/build-and-push-images.sh +++ b/scripts/build-and-push-images.sh @@ -17,7 +17,7 @@ https_proxy="${https_proxy:-}" PLATFORM="" # eg. --platform linux/amd64 -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; diff --git a/scripts/build-artifact-download.sh b/scripts/build-artifact-download.sh index ee8d48ab..cf080c96 100755 --- a/scripts/build-artifact-download.sh +++ b/scripts/build-artifact-download.sh @@ -4,7 +4,7 @@ set -ue GO_OS="" GO_ARCH="" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in --go-os) GO_OS="$2"; shift;; diff --git a/scripts/install-inside-kind.sh b/scripts/install-inside-kind.sh index c16f9a9b..3a40357d 100755 --- a/scripts/install-inside-kind.sh +++ b/scripts/install-inside-kind.sh @@ -1,51 +1,64 @@ #!/usr/bin/env bash set -ue -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} -kind_values_dir="${ODS_PIPELINE_DIR}/deploy/.kind-values" -HELM_GENERATED_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/ods-pipeline/values.generated.yaml" +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ods_pipeline_dir=${script_dir%/*} +kind_deploy_path="/tmp/ods-pipeline-kind-deploy" +kind_values_dir="/tmp/ods-pipeline-kind-values" +helm_generated_values_file="${kind_deploy_path}/ods-pipeline/values.generated.yaml" -URL_SUFFIX="http" -BITBUCKET_AUTH="unavailable" -NEXUS_AUTH="unavailable:unavailable" -SONAR_AUTH="unavailable" +url_suffix="http" +bitbucket_auth="unavailable" +nexus_auth="unavailable:unavailable" +sonar_auth="unavailable" if [ "$#" -gt 0 ]; then case $1 in - --private-cert=*) URL_SUFFIX="https"; + --private-cert=*) url_suffix="https"; esac; fi +# Copy deploy path to tmp dir as the deploy path may be used through the Go package. +# The source directories of Go packages are placed into a non-writable location. +rm -rf "${kind_deploy_path}" +cp -r "${ods_pipeline_dir}/deploy" "${kind_deploy_path}" +chmod -R u+w "${kind_deploy_path}" + if [ -f "${kind_values_dir}/bitbucket-auth" ]; then - BITBUCKET_AUTH=$(cat "${kind_values_dir}/bitbucket-auth") + bitbucket_auth=$(cat "${kind_values_dir}/bitbucket-auth") fi if [ -f "${kind_values_dir}/nexus-auth" ]; then - NEXUS_AUTH=$(cat "${kind_values_dir}/nexus-auth") + nexus_auth=$(cat "${kind_values_dir}/nexus-auth") fi if [ -f "${kind_values_dir}/sonar-auth" ]; then - SONAR_AUTH=$(cat "${kind_values_dir}/sonar-auth") + sonar_auth=$(cat "${kind_values_dir}/sonar-auth") fi -if [ ! -e "${HELM_GENERATED_VALUES_FILE}" ]; then - echo "setup:" > "${HELM_GENERATED_VALUES_FILE}" +if [ ! -e "${helm_generated_values_file}" ]; then + echo "setup:" > "${helm_generated_values_file}" +fi +if [ -f "${kind_values_dir}/bitbucket-${url_suffix}" ]; then + bitbucket_url=$(cat "${kind_values_dir}/bitbucket-${url_suffix}") + echo " bitbucketUrl: '${bitbucket_url}'" >> "${helm_generated_values_file}" fi -if [ -f "${kind_values_dir}/bitbucket-${URL_SUFFIX}" ]; then - BITBUCKET_URL=$(cat "${kind_values_dir}/bitbucket-${URL_SUFFIX}") - echo " bitbucketUrl: '${BITBUCKET_URL}'" >> "${HELM_GENERATED_VALUES_FILE}" +if [ -f "${kind_values_dir}/nexus-${url_suffix}" ]; then + nexus_url=$(cat "${kind_values_dir}/nexus-${url_suffix}") + echo " nexusUrl: '${nexus_url}'" >> "${helm_generated_values_file}" fi -if [ -f "${kind_values_dir}/nexus-${URL_SUFFIX}" ]; then - NEXUS_URL=$(cat "${kind_values_dir}/nexus-${URL_SUFFIX}") - echo " nexusUrl: '${NEXUS_URL}'" >> "${HELM_GENERATED_VALUES_FILE}" +if [ -f "${kind_values_dir}/sonar-${url_suffix}" ]; then + sonar_url=$(cat "${kind_values_dir}/sonar-${url_suffix}") + echo " sonarUrl: '${sonar_url}'" >> "${helm_generated_values_file}" fi -if [ -f "${kind_values_dir}/sonar-${URL_SUFFIX}" ]; then - SONAR_URL=$(cat "${kind_values_dir}/sonar-${URL_SUFFIX}") - echo " sonarUrl: '${SONAR_URL}'" >> "${HELM_GENERATED_VALUES_FILE}" + +values_arg="${kind_deploy_path}/ods-pipeline/values.kind.yaml" +if [ "$(cat "${helm_generated_values_file}")" != "setup:" ]; then + values_arg="${values_arg},${helm_generated_values_file}" fi -"${ODS_PIPELINE_DIR}"/deploy/install.sh \ +cd "${kind_deploy_path}" +sh ./install.sh \ --aqua-auth "unavailable:unavailable" \ --aqua-scanner-url "none" \ - --bitbucket-auth "${BITBUCKET_AUTH}" \ - --nexus-auth "${NEXUS_AUTH}" \ - --sonar-auth "${SONAR_AUTH}" \ - -f "./ods-pipeline/values.kind.yaml,${HELM_GENERATED_VALUES_FILE}" "$@" + --bitbucket-auth "${bitbucket_auth}" \ + --nexus-auth "${nexus_auth}" \ + --sonar-auth "${sonar_auth}" \ + -f "${values_arg}" "$@" diff --git a/scripts/install-tekton-pipelines.sh b/scripts/install-tekton-pipelines.sh index 78782c0d..878655a0 100755 --- a/scripts/install-tekton-pipelines.sh +++ b/scripts/install-tekton-pipelines.sh @@ -1,39 +1,39 @@ #!/bin/bash set -eu -KUBE_CONTEXT="--context kind-kind" -KUBECTL_BIN="kubectl $KUBE_CONTEXT" +kube_context="--context kind-ods-pipeline" +kubectl_bin="kubectl $kube_context" -# Tekton version is aligned with Red Hat OpenShift Pipelines General Availability 1.6. -# See https://docs.openshift.com/container-platform/4.9/cicd/pipelines/op-release-notes.html. -TKN_VERSION="v0.41.1" -TKN_DASHBOARD_VERSION="v0.17.0" +# Tekton version is aligned with Red Hat OpenShift Pipelines General Availability 1.10. +# See https://docs.openshift.com/container-platform/latest/cicd/pipelines/op-release-notes.html. +tkn_version="v0.44.4" +tkn_dashboard_version="v0.17.0" -INSTALL_TKN_DASHBOARD="false" +install_tkn_dashboard="false" -if ! which kubectl &> /dev/null; then +if ! command -v kubectl &> /dev/null; then echo "kubectl is required" fi -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; - --tekton-dashboard) INSTALL_TKN_DASHBOARD="true";; + --tekton-dashboard) install_tkn_dashboard="true";; *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done # Install Tekton # https://tekton.dev/docs/getting-started/#installation -if ! $KUBECTL_BIN get namespace tekton-pipelines &> /dev/null; then +if ! $kubectl_bin get namespace tekton-pipelines &> /dev/null; then echo "Installing Tekton ..." - $KUBECTL_BIN apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/${TKN_VERSION}/release.notags.yaml + $kubectl_bin apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/${tkn_version}/release.notags.yaml - if [ "${INSTALL_TKN_DASHBOARD}" != "false" ]; then + if [ "${install_tkn_dashboard}" != "false" ]; then echo "Installing Tekton Dashboard..." - $KUBECTL_BIN apply --filename https://storage.googleapis.com/tekton-releases/dashboard/previous/${TKN_DASHBOARD_VERSION}/tekton-dashboard-release.yaml + $kubectl_bin apply --filename https://storage.googleapis.com/tekton-releases/dashboard/previous/${tkn_dashboard_version}/tekton-dashboard-release.yaml fi else echo "Tekton already installed." diff --git a/scripts/kind-with-registry.sh b/scripts/kind-with-registry.sh index 8a305b26..d8ff40c5 100755 --- a/scripts/kind-with-registry.sh +++ b/scripts/kind-with-registry.sh @@ -19,89 +19,94 @@ set -o errexit -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} +if ! command -v kind >/dev/null 2>&1; then + echo "kind is not installed. Please see https://kind.sigs.k8s.io/" + exit 1 +fi # desired cluster name; default is "kind" -KIND_CLUSTER_NAME="kind" -RECREATE_KIND_CLUSTER="false" -REGISTRY_PORT="5000" +kind_cluster_name="ods-pipeline" +recreate_kind_cluster="false" +registry_port="5000" +kind_mount_path="/tmp/ods-pipeline-kind-mount" # K8S version is aligned with OpenShift GA 4.11. # See https://docs.openshift.com/container-platform/4.11/release_notes/ocp-4-11-release-notes.html -K8S_VERSION="v1.24.7" +k8s_version="v1.24.7" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; - --name) KIND_CLUSTER_NAME="$2"; shift;; - --name=*) KIND_CLUSTER_NAME="${1#*=}";; + --name) kind_cluster_name="$2"; shift;; + --name=*) kind_cluster_name="${1#*=}";; - --recreate) RECREATE_KIND_CLUSTER="true";; + --recreate) recreate_kind_cluster="true";; - --registry-port) REGISTRY_PORT="$2"; shift;; - --registry-port=*) REGISTRY_PORT="${1#*=}";; + --registry-port) registry_port="$2"; shift;; + --registry-port=*) registry_port="${1#*=}";; *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done -kind_version=$(kind version) -REGISTRY_NAME="${KIND_CLUSTER_NAME}-registry" +registry_name="${kind_cluster_name}-registry" reg_ip_selector='{{.NetworkSettings.Networks.kind.IPAddress}}' reg_network='kind' -case "${kind_version}" in - "kind v0.7."* | "kind v0.6."* | "kind v0.5."*) - reg_ip_selector='{{.NetworkSettings.IPAddress}}' - reg_network='bridge' - ;; -esac # create registry container unless it already exists -running="$(docker inspect -f '{{.State.Running}}' "${REGISTRY_NAME}" 2>/dev/null || true)" +running="$(docker inspect -f '{{.State.Running}}' "${registry_name}" 2>/dev/null || true)" # If the registry already exists, but is in the wrong network, we have to # re-create it. if [ "${running}" = 'true' ]; then - reg_ip="$(docker inspect -f ${reg_ip_selector} "${REGISTRY_NAME}")" + reg_ip="$(docker inspect -f ${reg_ip_selector} "${registry_name}")" if [ "${reg_ip}" = '' ]; then - docker kill "${REGISTRY_NAME}" - docker rm "${REGISTRY_NAME}" + docker kill "${registry_name}" + docker rm "${registry_name}" running="false" fi fi if [ "${running}" != 'true' ]; then - if [ "${reg_network}" != "bridge" ]; then - docker network create "${reg_network}" || true + net_driver=$(docker network inspect "${reg_network}" -f '{{.Driver}}' || true) + if [ "${net_driver}" != "bridge" ]; then + docker network create "${reg_network}" + fi + if docker inspect "${registry_name}" &>/dev/null; then + docker rm "${registry_name}" fi - docker run \ - -d --restart=always -p "${REGISTRY_PORT}:5000" --name "${REGISTRY_NAME}" --net "${reg_network}" \ + -d --restart=always -p "${registry_port}:5000" --name "${registry_name}" --net "${reg_network}" \ registry:2 fi -reg_ip="$(docker inspect -f ${reg_ip_selector} "${REGISTRY_NAME}")" +reg_ip="$(docker inspect -f ${reg_ip_selector} "${registry_name}")" if [ "${reg_ip}" = "" ]; then echo "Error creating registry: no IPAddress found at: ${reg_ip_selector}" exit 1 fi -echo "Registry IP: ${reg_ip}" -if [ "${RECREATE_KIND_CLUSTER}" == "true" ]; then - kind delete cluster --name "${KIND_CLUSTER_NAME}" +if [ "${recreate_kind_cluster}" == "false" ]; then + if kind get clusters | grep "${kind_cluster_name}" &>/dev/null; then + echo "Reusing existing cluster ..." + exit 0 + fi +fi + +if [ "${recreate_kind_cluster}" == "true" ]; then + kind delete cluster --name "${kind_cluster_name}" fi # create a cluster with the local registry enabled in containerd -cat < /dev/null; then + echo "No existing Bitbucket container ${BITBUCKET_SERVER_CONTAINER_NAME} found ..." + else + echo "Reusing existing Bitbucket container ${BITBUCKET_SERVER_CONTAINER_NAME} ..." + exit 0 + fi +fi + echo "Run Postgres container" docker rm -f ${BITBUCKET_POSTGRES_CONTAINER_NAME} || true docker run --name ${BITBUCKET_POSTGRES_CONTAINER_NAME} \ @@ -61,21 +74,21 @@ docker run --name ${BITBUCKET_SERVER_CONTAINER_NAME} \ -d --net kind -p "${HOST_HTTP_PORT}:7990" -p 7999:7999 \ "${BITBUCKET_SERVER_IMAGE_NAME}:${BITBUCKET_SERVER_IMAGE_TAG}" -if ! "${SCRIPT_DIR}/waitfor-bitbucket.sh" ; then +if ! sh "${SCRIPT_DIR}/waitfor-bitbucket.sh" ; then docker logs ${BITBUCKET_SERVER_CONTAINER_NAME} exit 1 fi echo "Launch TLS proxy" TLS_CONTAINER_NAME="${BITBUCKET_SERVER_CONTAINER_NAME}-tls" -"${SCRIPT_DIR}/run-tls-proxy.sh" \ +sh "${SCRIPT_DIR}/run-tls-proxy.sh" \ --container-name "${TLS_CONTAINER_NAME}" \ --https-port "${HOST_HTTPS_PORT}" \ --nginx-conf "nginx-bitbucket.conf" # Write values / secrets so that it can be picked up by install.sh later. mkdir -p "${kind_values_dir}" -echo -n "https://${TLS_CONTAINER_NAME}.kind:${HOST_HTTPS_PORT}" > "${kind_values_dir}/bitbucket-https" -echo -n "http://${BITBUCKET_SERVER_CONTAINER_NAME}.kind:${HOST_HTTP_PORT}" > "${kind_values_dir}/bitbucket-http" -echo -n "admin:NzU0OTk1MjU0NjEzOpzj5hmFNAaawvupxPKpcJlsfNgP" > "${kind_values_dir}/bitbucket-auth" -echo -n "webhook:s3cr3t" > "${kind_values_dir}/bitbucket-webhook-secret" +printf "https://${TLS_CONTAINER_NAME}.kind:${HOST_HTTPS_PORT}" > "${kind_values_dir}/bitbucket-https" +printf "http://${BITBUCKET_SERVER_CONTAINER_NAME}.kind:${HOST_HTTP_PORT}" > "${kind_values_dir}/bitbucket-http" +printf "admin:NzU0OTk1MjU0NjEzOpzj5hmFNAaawvupxPKpcJlsfNgP" > "${kind_values_dir}/bitbucket-auth" +printf "webhook:s3cr3t" > "${kind_values_dir}/bitbucket-webhook-secret" diff --git a/scripts/run-nexus.sh b/scripts/run-nexus.sh index 731798a8..5f02e483 100755 --- a/scripts/run-nexus.sh +++ b/scripts/run-nexus.sh @@ -15,19 +15,32 @@ NEXUS_URL= IMAGE_NAME="ods-test-nexus" CONTAINER_NAME="ods-test-nexus" NEXUS_IMAGE_TAG="3.30.1" -kind_values_dir="${ODS_PIPELINE_DIR}/deploy/.kind-values" +kind_values_dir="/tmp/ods-pipeline-kind-values" +mkdir -p "${kind_values_dir}" DOCKER_CONTEXT_DIR="${ODS_PIPELINE_DIR}/test/testdata/private-cert" +reuse="false" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; -i|--insecure) INSECURE="--insecure";; + -r|--reuse) reuse="true";; + *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done +if [ "${reuse}" = "true" ]; then + if ! docker inspect ${CONTAINER_NAME} &> /dev/null; then + echo "No existing Nexus container ${CONTAINER_NAME} found ..." + else + echo "Reusing existing Nexus container ${CONTAINER_NAME} ..." + exit 0 + fi +fi + echo "Run container using image tag ${NEXUS_IMAGE_TAG}" docker rm -f ${CONTAINER_NAME} || true cd "${SCRIPT_DIR}"/nexus @@ -35,7 +48,7 @@ docker build -t ${IMAGE_NAME} -f "Dockerfile.$(uname -m)" "${DOCKER_CONTEXT_DIR} cd - &> /dev/null docker run -d -p "${HOST_HTTP_PORT}:8081" --net kind --name ${CONTAINER_NAME} ${IMAGE_NAME} -if ! "${SCRIPT_DIR}/waitfor-nexus.sh" ; then +if ! sh "${SCRIPT_DIR}/waitfor-nexus.sh" ; then docker logs ${CONTAINER_NAME} exit 1 fi @@ -81,19 +94,19 @@ echo "Setup developer role" runJsonScript "createRole" "-d @${SCRIPT_DIR}/nexus/developer-role.json" echo "Setup developer user" -sed "s|@developer_password@|${DEVELOPER_PASSWORD}|g" "${SCRIPT_DIR}"/nexus/developer-user.json > "${SCRIPT_DIR}"/nexus/developer-user-with-password.json -runJsonScript "createUser" "-d @${SCRIPT_DIR}/nexus/developer-user-with-password.json" -rm "${SCRIPT_DIR}"/nexus/developer-user-with-password.json +sed "s|@developer_password@|${DEVELOPER_PASSWORD}|g" "${SCRIPT_DIR}"/nexus/developer-user.json > /tmp/nexus-developer-user-with-password.json +runJsonScript "createUser" "-d @/tmp/nexus-developer-user-with-password.json" +rm /tmp/nexus-developer-user-with-password.json echo "Launch TLS proxy" TLS_CONTAINER_NAME="${CONTAINER_NAME}-tls" -"${SCRIPT_DIR}/run-tls-proxy.sh" \ +sh "${SCRIPT_DIR}/run-tls-proxy.sh" \ --container-name "${TLS_CONTAINER_NAME}" \ --https-port "${HOST_HTTPS_PORT}" \ --nginx-conf "nginx-nexus.conf" # Write values / secrets so that it can be picked up by install.sh later. mkdir -p "${kind_values_dir}" -echo -n "https://${TLS_CONTAINER_NAME}.kind:${HOST_HTTPS_PORT}" > "${kind_values_dir}/nexus-https" -echo -n "http://${CONTAINER_NAME}.kind:${HOST_HTTP_PORT}" > "${kind_values_dir}/nexus-http" -echo -n "${DEVELOPER_USERNAME}:${DEVELOPER_PASSWORD}" > "${kind_values_dir}/nexus-auth" +printf "https://${TLS_CONTAINER_NAME}.kind:${HOST_HTTPS_PORT}" > "${kind_values_dir}/nexus-https" +printf "http://${CONTAINER_NAME}.kind:${HOST_HTTP_PORT}" > "${kind_values_dir}/nexus-http" +printf "${DEVELOPER_USERNAME}:${DEVELOPER_PASSWORD}" > "${kind_values_dir}/nexus-auth" diff --git a/scripts/run-sonarqube.sh b/scripts/run-sonarqube.sh index 2ff46c87..2072b3ed 100755 --- a/scripts/run-sonarqube.sh +++ b/scripts/run-sonarqube.sh @@ -2,7 +2,6 @@ set -ue SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} INSECURE="" HOST_HTTP_PORT="9000" @@ -14,18 +13,31 @@ SONAR_USERNAME="admin" SONAR_PASSWORD="admin" SONAR_EDITION="community" SONAR_IMAGE_TAG="${SONAR_VERSION}-${SONAR_EDITION}" -kind_values_dir="${ODS_PIPELINE_DIR}/deploy/.kind-values" +kind_values_dir="/tmp/ods-pipeline-kind-values" +mkdir -p "${kind_values_dir}" +reuse="false" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; -i|--insecure) INSECURE="--insecure";; + -r|--reuse) reuse="true";; + *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done +if [ "${reuse}" = "true" ]; then + if [ "$(docker inspect ${CONTAINER_NAME} -f '{{.State.Running}}')" = "true" ]; then + echo "Reusing running SonarQube container ${CONTAINER_NAME} ..." + exit 0 + else + echo "No running SonarQube container ${CONTAINER_NAME} found ..." + fi +fi + echo "Run container using image tag ${SONAR_IMAGE_TAG}" docker rm -f ${CONTAINER_NAME} || true @@ -54,7 +66,7 @@ cd - &> /dev/null docker run -d --net kind --name ${CONTAINER_NAME} -e SONAR_ES_BOOTSTRAP_CHECKS_DISABLE=true -p "${HOST_HTTP_PORT}:9000" ${IMAGE_NAME}:${SONAR_IMAGE_TAG} SONARQUBE_URL="http://localhost:${HOST_HTTP_PORT}" -if ! "${SCRIPT_DIR}/waitfor-sonarqube.sh" ; then +if ! sh "${SCRIPT_DIR}/waitfor-sonarqube.sh" ; then docker logs ${CONTAINER_NAME} exit 1 fi @@ -68,13 +80,13 @@ token=$(echo "${tokenResponse}" | jq -r .token) echo "Launch TLS proxy" TLS_CONTAINER_NAME="${CONTAINER_NAME}-tls" -"${SCRIPT_DIR}/run-tls-proxy.sh" \ +sh "${SCRIPT_DIR}/run-tls-proxy.sh" \ --container-name "${TLS_CONTAINER_NAME}" \ --https-port "${HOST_HTTPS_PORT}" \ --nginx-conf "nginx-sonarqube.conf" # Write values / secrets so that it can be picked up by install.sh later. mkdir -p "${kind_values_dir}" -echo -n "https://${TLS_CONTAINER_NAME}.kind:${HOST_HTTPS_PORT}" > "${kind_values_dir}/sonar-https" -echo -n "http://${CONTAINER_NAME}.kind:${HOST_HTTP_PORT}" > "${kind_values_dir}/sonar-http" -echo -n ":${token}" > "${kind_values_dir}/sonar-auth" +printf "https://${TLS_CONTAINER_NAME}.kind:${HOST_HTTPS_PORT}" > "${kind_values_dir}/sonar-https" +printf "http://${CONTAINER_NAME}.kind:${HOST_HTTP_PORT}" > "${kind_values_dir}/sonar-http" +printf ":${token}" > "${kind_values_dir}/sonar-auth" diff --git a/scripts/run-tls-proxy.sh b/scripts/run-tls-proxy.sh index 7f1e3e6e..4fe48c25 100755 --- a/scripts/run-tls-proxy.sh +++ b/scripts/run-tls-proxy.sh @@ -8,7 +8,7 @@ https_port="8443" container_name="" nginx_conf="" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; diff --git a/scripts/waitfor-bitbucket.sh b/scripts/waitfor-bitbucket.sh index c0a30c10..64f0c027 100755 --- a/scripts/waitfor-bitbucket.sh +++ b/scripts/waitfor-bitbucket.sh @@ -7,7 +7,7 @@ set -ue INSECURE="" BITBUCKET_SERVER_HOST_PORT="7990" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; @@ -28,7 +28,7 @@ until [ $n -ge 30 ]; do echo " success" break else - echo -n "." + printf "." sleep 10 n=$((n+1)) fi diff --git a/scripts/waitfor-nexus.sh b/scripts/waitfor-nexus.sh index ce53c363..139c936d 100755 --- a/scripts/waitfor-nexus.sh +++ b/scripts/waitfor-nexus.sh @@ -5,7 +5,7 @@ INSECURE="" HOST_PORT="8081" NEXUS_URL= -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; @@ -27,7 +27,7 @@ function waitForReady { echo " success" break else - echo -n "." + printf "." sleep 10 n=$((n+1)) fi diff --git a/scripts/waitfor-sonarqube.sh b/scripts/waitfor-sonarqube.sh index 3e9c6aa7..d776b1fa 100755 --- a/scripts/waitfor-sonarqube.sh +++ b/scripts/waitfor-sonarqube.sh @@ -6,7 +6,7 @@ HOST_PORT="9000" SONAR_USERNAME="admin" SONAR_PASSWORD="admin" -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; @@ -29,7 +29,7 @@ until [ $n -ge 30 ]; do echo " success" break else - echo -n "." + printf "." sleep 10 n=$((n+1)) fi diff --git a/scripts/web-terminal-install.sh b/scripts/web-terminal-install.sh index ceb6b7b2..dbd0bac5 100755 --- a/scripts/web-terminal-install.sh +++ b/scripts/web-terminal-install.sh @@ -5,7 +5,7 @@ HELM_PLUGIN_DIFF_VERSION=3.3.2 REPOSITORY="" NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) -while [[ "$#" -gt 0 ]]; do +while [ "$#" -gt 0 ]; do # shellcheck disable=SC2034 case $1 in