diff --git a/.gimps.yaml b/.gimps.yaml index e66c16d..6276ebe 100644 --- a/.gimps.yaml +++ b/.gimps.yaml @@ -19,9 +19,14 @@ sets: - name: kubernetes patterns: - 'k8s.io/**' - - '*.k8s.io/**' + - 'sigs.k8s.io/controller-runtime/**' + - 'sigs.k8s.io/controller-tools/**' + - 'sigs.k8s.io/yaml/**' - 'github.com/kcp-dev/client-go/**' - 'github.com/kcp-dev/kubernetes/**' - name: kcp patterns: - 'github.com/kcp-dev/kcp/**' + - 'github.com/kcp-dev/multicluster-provider/**' + - 'github.com/kcp-dev/code-generator/**' + - 'sigs.k8s.io/multicluster-runtime/**' diff --git a/go.mod b/go.mod index 614fca9..882474f 100644 --- a/go.mod +++ b/go.mod @@ -5,10 +5,10 @@ go 1.24.0 replace github.com/kcp-dev/api-syncagent/sdk => ./sdk replace ( - k8s.io/apiextensions-apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250313100806-0011b8c72acd - k8s.io/apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250313100806-0011b8c72acd - k8s.io/kms => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20250313100806-0011b8c72acd - sigs.k8s.io/controller-runtime => github.com/kcp-dev/controller-runtime v0.19.0-kcp.1 + k8s.io/api => github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20250425143807-ddbe171670d8 + k8s.io/apiextensions-apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250425143807-ddbe171670d8 + k8s.io/apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250425143807-ddbe171670d8 + k8s.io/kms => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20250425143807-ddbe171670d8 ) require ( @@ -19,9 +19,10 @@ require ( github.com/google/go-cmp v0.7.0 github.com/kcp-dev/api-syncagent/sdk v0.0.0-00010101000000-000000000000 github.com/kcp-dev/code-generator/v2 v2.3.1 - github.com/kcp-dev/kcp v0.27.1 + github.com/kcp-dev/kcp v0.0.0-20250606081858-d77640860cac github.com/kcp-dev/kcp/sdk v0.27.1 github.com/kcp-dev/logicalcluster/v3 v3.0.5 + github.com/kcp-dev/multicluster-provider v0.1.0 github.com/openshift-eng/openshift-goimports v0.0.0-20230304234052-c70783e636f2 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/spf13/pflag v1.0.6 @@ -29,16 +30,17 @@ require ( github.com/tidwall/sjson v1.2.5 go.uber.org/zap v1.27.0 k8c.io/reconciler v0.5.0 - k8s.io/api v0.31.6 - k8s.io/apiextensions-apiserver v0.31.6 - k8s.io/apimachinery v0.31.6 - k8s.io/apiserver v0.31.6 - k8s.io/client-go v0.31.6 - k8s.io/code-generator v0.31.6 - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 - sigs.k8s.io/controller-runtime v0.18.3 + k8s.io/api v0.32.3 + k8s.io/apiextensions-apiserver v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/apiserver v0.32.3 + k8s.io/client-go v0.32.3 + k8s.io/code-generator v0.32.3 + k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 + k8s.io/utils v0.0.0-20241210054802-24370beab758 + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/controller-tools v0.16.5 + sigs.k8s.io/multicluster-runtime v0.20.4-alpha.7 sigs.k8s.io/yaml v1.4.0 ) @@ -70,9 +72,9 @@ require ( github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.24.1 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.22.1 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect github.com/google/uuid v1.6.0 // indirect @@ -80,12 +82,11 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huandu/xstrings v1.5.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kcp-dev/apimachinery/v2 v2.0.1-0.20250223115924-431177b024f3 // indirect - github.com/kcp-dev/client-go v0.0.0-20250223133118-3dea338dc267 // indirect + github.com/kcp-dev/apimachinery/v2 v2.0.1-0.20250512171935-ebb573a40077 // indirect + github.com/kcp-dev/client-go v0.0.0-20250512170835-5457a0f4bd98 // indirect github.com/klauspost/compress v1.17.11 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect @@ -131,17 +132,17 @@ require ( go.opentelemetry.io/otel/trace v1.35.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.31.0 // indirect + golang.org/x/tools v0.32.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect @@ -151,11 +152,13 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.31.6 // indirect - k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect + k8s.io/component-base v0.32.3 // indirect + k8s.io/component-helpers v0.32.3 // indirect + k8s.io/controller-manager v0.32.3 // indirect + k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kubernetes v1.31.6 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect + k8s.io/kubernetes v1.32.3 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 6e1c962..13c9c56 100644 --- a/go.sum +++ b/go.sum @@ -73,16 +73,14 @@ github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnD github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= -github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.24.1 h1:jsBCtxG8mM5wiUJDSGUqU0K7Mtr3w7Eyv00rw4DiZxI= -github.com/google/cel-go v0.24.1/go.mod h1:Hdf9TqOaTNSFQA1ybQaRqATVoK7m/zcf7IMhGXP5zI8= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.22.1 h1:AfVXx3chM2qwoSbM7Da8g8hX8OVSkBFwX+rz2+PcK40= +github.com/google/cel-go v0.22.1/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -109,36 +107,36 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kcp-dev/apimachinery/v2 v2.0.1-0.20250223115924-431177b024f3 h1:YwNX7ZIpQXg9u5vav/fobmf4nnO0WhbELWaL3X74Oe4= -github.com/kcp-dev/apimachinery/v2 v2.0.1-0.20250223115924-431177b024f3/go.mod h1:n0+EV+LGKl1MXXqGbGcn0AaBv7hdKsdazSYuq8nM8Us= -github.com/kcp-dev/client-go v0.0.0-20250223133118-3dea338dc267 h1:Ec2/Mh7mVvboBFol0S8u30arfA7oyk/VtHL9Xojjvfs= -github.com/kcp-dev/client-go v0.0.0-20250223133118-3dea338dc267/go.mod h1:1lEs8b8BYzGrMr7Q8Fs7cNVaDAWogu5lLkz5t6HtRLI= +github.com/kcp-dev/apimachinery/v2 v2.0.1-0.20250512171935-ebb573a40077 h1:lDi9nZ75ypmRJwDFXUN70Cdu8+HxAjPU1kcnn+l4MvI= +github.com/kcp-dev/apimachinery/v2 v2.0.1-0.20250512171935-ebb573a40077/go.mod h1:jnMZxVnCuKlkIXc4J1Qtmy1Lyo171CDF/RQhNAo0tvA= +github.com/kcp-dev/client-go v0.0.0-20250512170835-5457a0f4bd98 h1:A1Hc2zVGd9LRSQqlGGqfzin+4skWJVcsNXw2+MjU6z4= +github.com/kcp-dev/client-go v0.0.0-20250512170835-5457a0f4bd98/go.mod h1:79pmlxmvE/hohqD/qvhKaaoXmNDF/uhKnnAO6Vf5hZk= github.com/kcp-dev/code-generator/v2 v2.3.1 h1:FnGGaDeO033d6wg1gBndhZzO/PZAmU0NKVCretEpQbQ= github.com/kcp-dev/code-generator/v2 v2.3.1/go.mod h1:uvIHtZzfv8qPzW9Hym+kL4aNpZaiTBONvPJkTWVVCBk= -github.com/kcp-dev/controller-runtime v0.19.0-kcp.1 h1:mbCyVzWuJpg+pkzIkIKLltiOgOSiQ3bqWmHi2mftzgc= -github.com/kcp-dev/controller-runtime v0.19.0-kcp.1/go.mod h1:jwK5sBnpu/xJJ+xdpSzzI0aM52E/EvF0uLF9bR61h/Y= -github.com/kcp-dev/kcp v0.27.1 h1:VJz5CK6GkoiKVZfolXdaPpDdn04MD9XXnee+yytaFrw= -github.com/kcp-dev/kcp v0.27.1/go.mod h1:PGNTZLn/iaN6h5oyoxlCd2qOZx1C6hOYqpQ+iyQkZv8= +github.com/kcp-dev/kcp v0.0.0-20250606081858-d77640860cac h1:crNaYl7q29wB8Z9hCSAO7azyHJaX4cIaNdf8cgITa18= +github.com/kcp-dev/kcp v0.0.0-20250606081858-d77640860cac/go.mod h1:3CTr37IKGXwKYlqflKI84i6pL84viFPLrIXAhFc4DPs= github.com/kcp-dev/kcp/sdk v0.27.1 h1:jBVdrZoJd5hy2RqaBnmCCzldimwOqDkf8FXtNq5HaWA= github.com/kcp-dev/kcp/sdk v0.27.1/go.mod h1:3eRgW42d81Ng60DbG1xbne0FSS2znpcN/GUx4rqJgUo= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250313100806-0011b8c72acd h1:qkRYC+2O4mZ4F61A6I4bv0m3ATQGqT4reCXq4bnkDec= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250313100806-0011b8c72acd/go.mod h1:rFKE/IMrmvZBbQEEVOuqDa5OtugDDxnLi5kG+MFr20w= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250313100806-0011b8c72acd h1:VDSIPvVTGTUXtdaJnVss8tm7B95fubFZCNX6UHQ8/8I= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250313100806-0011b8c72acd/go.mod h1:jECRKuvZ/CcXI+zZTe44z/c7PzPyygRGLtBFAcmwSZY= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20250313100806-0011b8c72acd h1:Q3yOdqA0S2G3xpFRQEa/2IxBnngUhV12hE3+hSjPGyE= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20250313100806-0011b8c72acd/go.mod h1:gClzb5q8LLAagWlaL9S/rt8IcU3iY6gRARKN09DY4o8= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20250425143807-ddbe171670d8 h1:oPP9XnpYpTv0dvqTGUozYbgvdB16kzX9oZ2r5QfoQxE= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20250425143807-ddbe171670d8/go.mod h1:7sL6AnFDKD/ke3g56SKzA+hLRWWuhujrxUBvRYlGwD0= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250425143807-ddbe171670d8 h1:lhZjzj5K9bVp33MfkM6KbTH2etE9Wi5e/90jTFmD8NI= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250425143807-ddbe171670d8/go.mod h1:eOZI4fqbsFue7NxS/nkKW7fmblSsm2N9u3WT6CqL4Sg= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250425143807-ddbe171670d8 h1:om/ndI5xQPQ4ho8z6A5uckh94QfgLSWt+WHfsRvH6n8= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250425143807-ddbe171670d8/go.mod h1:wQatO8gBJa1b4KK9fGISKorQLQUleYdQiRoPHHYZXAw= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20250425143807-ddbe171670d8 h1:z3FDRYca6rhsyHmF8HbwrQ5JTUOygdwbzvGlFTi/MzI= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20250425143807-ddbe171670d8/go.mod h1:6c0fX+GV12dynYuruueesB5Emf9PwK+Jo/0UijPRhFk= github.com/kcp-dev/logicalcluster/v3 v3.0.5 h1:JbYakokb+5Uinz09oTXomSUJVQsqfxEvU4RyHUYxHOU= github.com/kcp-dev/logicalcluster/v3 v3.0.5/go.mod h1:EWBUBxdr49fUB1cLMO4nOdBWmYifLbP1LfoL20KkXYY= +github.com/kcp-dev/multicluster-provider v0.1.0 h1:LS4z4d6AbsYg7Lj9Hlmkbv1M+ZIyw4laNpSsUgF3tRI= +github.com/kcp-dev/multicluster-provider v0.1.0/go.mod h1:8a53s17AhgsEq5mL7VDHZ30eflhu7sFS0isHG1zRz0Y= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= @@ -245,26 +243,26 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7 github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= -go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8= -go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= +go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= +go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= -go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M= -go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= -go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA= -go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw= -go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok= -go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ= +go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= +go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= +go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= +go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= +go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= +go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= @@ -296,10 +294,10 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= @@ -308,43 +306,43 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= @@ -371,32 +369,38 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8c.io/reconciler v0.5.0 h1:BHpelg1UfI/7oBFctqOq8sX6qzflXpl3SlvHe7e8wak= k8c.io/reconciler v0.5.0/go.mod h1:pT1+SVcVXJQeBJhpJBXQ5XW64QnKKeYTnVlQf0dGE0k= -k8s.io/api v0.31.6 h1:ocWG/UhC9Mqp5oEfYWy9wCddbZiZyBAFTlBt0LVlhDg= -k8s.io/api v0.31.6/go.mod h1:i16xSiKMgVIVhsJMxfWq0mJbXA+Z7KhjPgYmwT41hl4= -k8s.io/apimachinery v0.31.6 h1:Pn96A0wHD0X8+l7QTdAzdLQPrpav1s8rU6A+v2/9UEY= -k8s.io/apimachinery v0.31.6/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.6 h1:51HT40qVIZ13BrHKeWxFuU52uoPnFhxTYJnv4+LTgp4= -k8s.io/client-go v0.31.6/go.mod h1:MEq7JQJelUQ0/4fMoPEUrc/OOFyGo/9LmGA38H6O6xY= -k8s.io/code-generator v0.31.6 h1:CX4/NGV5UIdt7+nYG/G4+eGHOvcXAlKWswUhPPOtPtc= -k8s.io/code-generator v0.31.6/go.mod h1:vbqDrvP5hJJ5S/jzBtyMJoH5kJBWZMo/DZwMYiOQniE= -k8s.io/component-base v0.31.6 h1:FgI25PuZtCp2n7AFpOaDpMQOLieFdrpAbpeoZu7VhDI= -k8s.io/component-base v0.31.6/go.mod h1:aVRrh8lAI1kSShFmwcKLhc3msQoUcmFWPBDf0sXaISM= -k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk= -k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/code-generator v0.32.3 h1:31p2TVzC9+hVdSkAFruAk3JY+iSfzrJ83Qij1yZutyw= +k8s.io/code-generator v0.32.3/go.mod h1:+mbiYID5NLsBuqxjQTygKM/DAdKpAjvBzrJd64NU1G8= +k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= +k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/component-helpers v0.32.3 h1:9veHpOGTPLluqU4hAu5IPOwkOIZiGAJUhHndfVc5FT4= +k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao= +k8s.io/controller-manager v0.32.3 h1:jBxZnQ24k6IMeWLyxWZmpa3QVS7ww+osAIzaUY/jqyc= +k8s.io/controller-manager v0.32.3/go.mod h1:out1L3DZjE/p7JG0MoMMIaQGWIkt3c+pKaswqSHgKsI= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubernetes v1.31.6 h1:zVhgWDFHmIj51o5sNARmjdgNvpq4K2Smya8pS5vxqlc= -k8s.io/kubernetes v1.31.6/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA= +k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 h1:uOuSLOMBWkJH0TWa9X6l+mj5nZdm6Ay6Bli8HL8rNfk= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/controller-tools v0.16.5 h1:5k9FNRqziBPwqr17AMEPPV/En39ZBplLAdOwwQHruP4= sigs.k8s.io/controller-tools v0.16.5/go.mod h1:8vztuRVzs8IuuJqKqbXCSlXcw+lkAv/M2sTpg55qjMY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/multicluster-runtime v0.20.4-alpha.7 h1:AFlM/TFQaESxtCRX6scodEKensLhcbfGwXfjJIvoaT8= +sigs.k8s.io/multicluster-runtime v0.20.4-alpha.7/go.mod h1:2N2/c3p08bYC9eDaRs0dllTxgAm5xiLDSkmGZpWKyw4= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= diff --git a/hack/tools.go b/hack/tools.go index 415a781..a4855ac 100644 --- a/hack/tools.go +++ b/hack/tools.go @@ -19,10 +19,11 @@ limitations under the License. package tools import ( - _ "github.com/kcp-dev/code-generator/v2" _ "github.com/openshift-eng/openshift-goimports" _ "k8c.io/reconciler/cmd/reconciler-gen" + _ "github.com/kcp-dev/code-generator/v2" + _ "k8s.io/code-generator/cmd/applyconfiguration-gen" _ "k8s.io/code-generator/cmd/client-gen" _ "sigs.k8s.io/controller-tools/cmd/controller-gen" diff --git a/internal/controller/apiexport/controller.go b/internal/controller/apiexport/controller.go index 7e8e302..c6c9ed2 100644 --- a/internal/controller/apiexport/controller.go +++ b/internal/controller/apiexport/controller.go @@ -39,7 +39,6 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/kontext" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -170,9 +169,7 @@ func (r *Reconciler) reconcile(ctx context.Context) error { r.createAPIExportReconciler(arsList, claimedResources, r.agentName, r.apiExportName), } - wsCtx := kontext.WithCluster(ctx, r.lcName) - - if err := reconciling.ReconcileAPIExports(wsCtx, factories, "", r.kcpClient); err != nil { + if err := reconciling.ReconcileAPIExports(ctx, factories, "", r.kcpClient); err != nil { return fmt.Errorf("failed to reconcile APIExport: %w", err) } diff --git a/internal/controller/apiresourceschema/controller.go b/internal/controller/apiresourceschema/controller.go index 3655867..6f49d10 100644 --- a/internal/controller/apiresourceschema/controller.go +++ b/internal/controller/apiresourceschema/controller.go @@ -45,7 +45,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/kontext" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -169,12 +168,11 @@ func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, pubR arsName := r.getAPIResourceSchemaName(projectedCRD) // ensure ARS exists (don't try to reconcile it, it's basically entirely immutable) - wsCtx := kontext.WithCluster(ctx, r.lcName) ars := &kcpdevv1alpha1.APIResourceSchema{} - err = r.kcpClient.Get(wsCtx, types.NamespacedName{Name: arsName}, ars, &ctrlruntimeclient.GetOptions{}) + err = r.kcpClient.Get(ctx, types.NamespacedName{Name: arsName}, ars, &ctrlruntimeclient.GetOptions{}) if apierrors.IsNotFound(err) { - if err := r.createAPIResourceSchema(wsCtx, log, projectedCRD, arsName); err != nil { + if err := r.createAPIResourceSchema(ctx, log, projectedCRD, arsName); err != nil { return nil, fmt.Errorf("failed to create APIResourceSchema: %w", err) } } else if err != nil { diff --git a/internal/controller/sync/controller.go b/internal/controller/sync/controller.go index 314caff..ed7a2b6 100644 --- a/internal/controller/sync/controller.go +++ b/internal/controller/sync/controller.go @@ -32,18 +32,21 @@ import ( kcpcore "github.com/kcp-dev/kcp/sdk/apis/core" kcpdevcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + mccontroller "sigs.k8s.io/multicluster-runtime/pkg/controller" + mchandler "sigs.k8s.io/multicluster-runtime/pkg/handler" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" + mcsource "sigs.k8s.io/multicluster-runtime/pkg/source" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/kontext" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -55,12 +58,14 @@ const ( ) type Reconciler struct { - localClient ctrlruntimeclient.Client - vwClient ctrlruntimeclient.Client - log *zap.SugaredLogger - syncer *sync.ResourceSyncer - remoteDummy *unstructured.Unstructured - pubRes *syncagentv1alpha1.PublishedResource + localClient ctrlruntimeclient.Client + remoteManager mcmanager.Manager + log *zap.SugaredLogger + remoteDummy *unstructured.Unstructured + pubRes *syncagentv1alpha1.PublishedResource + localCRD *apiextensionsv1.CustomResourceDefinition + stateNamespace string + agentName string } // Create creates a new controller and importantly does *not* add it to the manager, @@ -68,14 +73,14 @@ type Reconciler struct { func Create( ctx context.Context, localManager manager.Manager, - virtualWorkspaceCluster cluster.Cluster, + remoteManager mcmanager.Manager, pubRes *syncagentv1alpha1.PublishedResource, discoveryClient *discovery.Client, stateNamespace string, agentName string, log *zap.SugaredLogger, numWorkers int, -) (controller.Controller, error) { +) (mccontroller.Controller, error) { log = log.Named(ControllerName) // find the local CRD so we know the actual local object scope @@ -103,48 +108,46 @@ func Create( remoteDummy.SetGroupVersionKind(remoteGVK) // create the syncer that holds the meat&potatoes of the synchronization logic - mutator := mutation.NewMutator(pubRes.Spec.Mutation) - syncer, err := sync.NewResourceSyncer(log, localManager.GetClient(), virtualWorkspaceCluster.GetClient(), pubRes, localCRD, mutator, stateNamespace, agentName) - if err != nil { - return nil, fmt.Errorf("failed to create syncer: %w", err) - } // setup the reconciler reconciler := &Reconciler{ - localClient: localManager.GetClient(), - vwClient: virtualWorkspaceCluster.GetClient(), - log: log, - remoteDummy: remoteDummy, - syncer: syncer, - pubRes: pubRes, + localClient: localManager.GetClient(), + remoteManager: remoteManager, + log: log, + remoteDummy: remoteDummy, + pubRes: pubRes, + stateNamespace: stateNamespace, + agentName: agentName, + localCRD: localCRD, } - ctrlOptions := controller.Options{ + ctrlOptions := mccontroller.Options{ Reconciler: reconciler, MaxConcurrentReconciles: numWorkers, SkipNameValidation: ptr.To(true), } - // It doesn't really matter what manager is used here, as starting/stopping happens - // outside of the manager's control anyway. - c, err := controller.NewUnmanaged(ControllerName, localManager, ctrlOptions) + log.Info("Setting up unmanaged controller...") + + // The manager parameter is mostly unused and will be removed in future CR versions. + c, err := mccontroller.NewUnmanaged(ControllerName, remoteManager, ctrlOptions) if err != nil { return nil, err } // watch the target resource in the virtual workspace - if err := c.Watch(source.Kind(virtualWorkspaceCluster.GetCache(), remoteDummy, &handler.TypedEnqueueRequestForObject[*unstructured.Unstructured]{})); err != nil { + if err := c.MultiClusterWatch(mcsource.TypedKind(remoteDummy, mchandler.TypedEnqueueRequestForObject[*unstructured.Unstructured]())); err != nil { return nil, err } // watch the source resource in the local cluster, but enqueue the origin remote object - enqueueRemoteObjForLocalObj := handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, o *unstructured.Unstructured) []reconcile.Request { + enqueueRemoteObjForLocalObj := handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, o *unstructured.Unstructured) []mcreconcile.Request { req := sync.RemoteNameForLocalObject(o) if req == nil { return nil } - return []reconcile.Request{*req} + return []mcreconcile.Request{*req} }) // only watch local objects that we own @@ -152,21 +155,27 @@ func Create( return sync.OwnedBy(u, agentName) }) - if err := c.Watch(source.Kind(localManager.GetCache(), localDummy, enqueueRemoteObjForLocalObj, nameFilter)); err != nil { + if err := c.Watch(source.TypedKind(localManager.GetCache(), localDummy, enqueueRemoteObjForLocalObj, nameFilter)); err != nil { return nil, err } + log.Info("Done setting up unmanaged controller.") + return c, nil } -func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { +func (r *Reconciler) Reconcile(ctx context.Context, request mcreconcile.Request) (reconcile.Result, error) { log := r.log.With("request", request, "cluster", request.ClusterName) log.Debug("Processing") - wsCtx := kontext.WithCluster(ctx, logicalcluster.Name(request.ClusterName)) + cl, err := r.remoteManager.GetCluster(ctx, request.ClusterName) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get cluster: %w", err) + } + vwClient := cl.GetClient() remoteObj := r.remoteDummy.DeepCopy() - if err := r.vwClient.Get(wsCtx, request.NamespacedName, remoteObj); ctrlruntimeclient.IgnoreNotFound(err) != nil { + if err := vwClient.Get(ctx, request.NamespacedName, remoteObj); ctrlruntimeclient.IgnoreNotFound(err) != nil { return reconcile.Result{}, fmt.Errorf("failed to retrieve remote object: %w", err) } @@ -181,7 +190,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( namespace = &corev1.Namespace{} key := types.NamespacedName{Name: remoteObj.GetNamespace()} - if err := r.vwClient.Get(wsCtx, key, namespace); err != nil { + if err := vwClient.Get(ctx, key, namespace); err != nil { return reconcile.Result{}, fmt.Errorf("failed to retrieve remote object's namespace: %w", err) } } @@ -196,22 +205,29 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, nil } - syncContext := sync.NewContext(ctx, wsCtx) + cInfo := sync.NewClusterInfo(logicalcluster.Name(request.ClusterName)) // if desired, fetch the cluster path as well (some downstream service providers might make use of it, // but since it requires an additional permission claim, it's optional) if r.pubRes.Spec.EnableWorkspacePaths { lc := &kcpdevcorev1alpha1.LogicalCluster{} - if err := r.vwClient.Get(wsCtx, types.NamespacedName{Name: kcpdevcorev1alpha1.LogicalClusterName}, lc); err != nil { + if err := vwClient.Get(ctx, types.NamespacedName{Name: kcpdevcorev1alpha1.LogicalClusterName}, lc); err != nil { return reconcile.Result{}, fmt.Errorf("failed to retrieve remote logicalcluster: %w", err) } path := lc.Annotations[kcpcore.LogicalClusterPathAnnotationKey] - syncContext = syncContext.WithWorkspacePath(logicalcluster.NewPath(path)) + cInfo = cInfo.WithWorkspacePath(logicalcluster.NewPath(path)) } // sync main object - requeue, err := r.syncer.Process(syncContext, remoteObj) + mutator := mutation.NewMutator(r.pubRes.Spec.Mutation) + + syncer, err := sync.NewResourceSyncer(log, r.localClient, vwClient, r.pubRes, r.localCRD, mutator, r.stateNamespace, r.agentName) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create syncer: %w", err) + } + + requeue, err := syncer.Process(ctx, cInfo, remoteObj) if err != nil { return reconcile.Result{}, err } diff --git a/internal/controller/syncmanager/controller.go b/internal/controller/syncmanager/controller.go index 8a8d80b..c127193 100644 --- a/internal/controller/syncmanager/controller.go +++ b/internal/controller/syncmanager/controller.go @@ -18,22 +18,24 @@ package syncmanager import ( "context" - "errors" "fmt" - "github.com/kcp-dev/logicalcluster/v3" "go.uber.org/zap" "github.com/kcp-dev/api-syncagent/internal/controller/sync" - "github.com/kcp-dev/api-syncagent/internal/controller/syncmanager/lifecycle" "github.com/kcp-dev/api-syncagent/internal/controllerutil" "github.com/kcp-dev/api-syncagent/internal/controllerutil/predicate" "github.com/kcp-dev/api-syncagent/internal/discovery" syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" - kcpdevv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + kcpapisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + apiexportprovider "github.com/kcp-dev/multicluster-provider/apiexport" + mccontroller "sigs.k8s.io/multicluster-runtime/pkg/controller" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/rest" @@ -42,8 +44,8 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/kontext" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -72,18 +74,32 @@ type Reconciler struct { stateNamespace string agentName string - apiExport *kcpdevv1alpha1.APIExport + apiExport *kcpapisv1alpha1.APIExport // URL for which the current vwCluster instance has been created vwURL string - // a Cluster representing the virtual workspace for the APIExport - vwCluster *lifecycle.Cluster + // A multi-cluster Manager representing the virtual workspace cluster; this manager will + // not handle the individual controllers' lifecycle, because their lifecycle depends on + // PublishedResources, not the set of workspaces/clusters in the APIExport's virtual workspace. + // This manager is stopped and recreated whenever the APIExport's URL changes. + vwManager mcmanager.Manager + vwManagerCtx context.Context + vwManagerCancel context.CancelFunc - // a map of sync controllers, one for each PublishedResource, using their + // The provider based on the APIExport; like the vwManager, this is stopped and recreated + // whenever the APIExport's URL changes. + vwProvider *apiexportprovider.Provider + + // A map of sync controllers, one for each PublishedResource, using their // UIDs and resourceVersion as the map keys; using the version ensures that // when a PR changes, the old controller is orphaned and will be shut down. - syncWorkers map[string]lifecycle.Controller + syncWorkers map[string]syncWorker +} + +type syncWorker struct { + controller mccontroller.Controller + cancel context.CancelFunc } // Add creates a new controller and adds it to the given manager. @@ -93,7 +109,7 @@ func Add( kcpCluster cluster.Cluster, kcpRestConfig *rest.Config, log *zap.SugaredLogger, - apiExport *kcpdevv1alpha1.APIExport, + apiExport *kcpapisv1alpha1.APIExport, prFilter labels.Selector, stateNamespace string, agentName string, @@ -111,11 +127,11 @@ func Add( kcpRestConfig: kcpRestConfig, log: log, recorder: localManager.GetEventRecorderFor(ControllerName), - syncWorkers: map[string]lifecycle.Controller{}, discoveryClient: discoveryClient, prFilter: prFilter, stateNamespace: stateNamespace, agentName: agentName, + syncWorkers: map[string]syncWorker{}, } _, err = builder.ControllerManagedBy(localManager). @@ -125,12 +141,13 @@ func Add( MaxConcurrentReconciles: 1, }). // Watch for changes to APIExport on the kcp side to start/restart the actual syncing controllers; - // the cache is already restricted by a fieldSelector in the main.go to respect the RBC restrictions, + // the cache is already restricted by a fieldSelector in the main.go to respect the RBAC restrictions, // so there is no need here to add an additional filter. - WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpdevv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpdevv1alpha1.APIExport]("dummy"))). + WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpapisv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpapisv1alpha1.APIExport]("dummy"))). // Watch for changes to the PublishedResources Watches(&syncagentv1alpha1.PublishedResource{}, controllerutil.EnqueueConst[ctrlruntimeclient.Object]("dummy"), builder.WithPredicates(predicate.ByLabels(prFilter))). Build(reconciler) + return err } @@ -138,18 +155,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, _ reconcile.Request) (reconc log := r.log.Named(ControllerName) log.Debug("Processing") - wsCtx := kontext.WithCluster(ctx, logicalcluster.From(r.apiExport)) key := types.NamespacedName{Name: r.apiExport.Name} - apiExport := &kcpdevv1alpha1.APIExport{} - if err := r.kcpCluster.GetClient().Get(wsCtx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { + apiExport := &kcpapisv1alpha1.APIExport{} + if err := r.kcpCluster.GetClient().Get(ctx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { return reconcile.Result{}, fmt.Errorf("failed to retrieve APIExport: %w", err) } return reconcile.Result{}, r.reconcile(ctx, log, apiExport) } -func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, apiExport *kcpdevv1alpha1.APIExport) error { +func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, apiExport *kcpapisv1alpha1.APIExport) error { // We're not yet making use of APIEndpointSlices, as we don't even fully // support a sharded kcp setup yet. Hence for now we're safe just using // this deprecated VW URL. @@ -163,10 +179,9 @@ func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, apiE vwURL := urls[0].URL - // if the VW URL changed, stop the cluster and all sync controllers + // if the VW URL changed, stop the manager and all sync controllers if r.vwURL != "" && vwURL != r.vwURL { - r.stopSyncControllers(log) - r.stopVirtualWorkspaceCluster(log) + r.shutdown(log) } // if kcp had a hiccup and wrote a status without an actual URL @@ -174,9 +189,9 @@ func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, apiE return nil } - // make sure we have a running cluster object for the virtual workspace - if err := r.ensureVirtualWorkspaceCluster(log, vwURL); err != nil { - return fmt.Errorf("failed to ensure virtual workspace cluster: %w", err) + // make sure we have a running manager object for the virtual workspace + if err := r.ensureManager(log, vwURL); err != nil { + return fmt.Errorf("failed to ensure virtual workspace manager: %w", err) } // find all PublishedResources @@ -195,40 +210,127 @@ func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, apiE return nil } -func (r *Reconciler) ensureVirtualWorkspaceCluster(log *zap.SugaredLogger, vwURL string) error { - if r.vwCluster == nil { - log.Info("Setting up virtual workspace cluster…") +func (r *Reconciler) ensureManager(log *zap.SugaredLogger, vwURL string) error { + // Use the global app context so this provider is independent of the reconcile + // context, which might get cancelled right after Reconcile() is done. + r.vwManagerCtx, r.vwManagerCancel = context.WithCancel(r.ctx) + + vwConfig := rest.CopyConfig(r.kcpRestConfig) + vwConfig.Host = vwURL + + scheme := runtime.NewScheme() + + if err := corev1.AddToScheme(scheme); err != nil { + return fmt.Errorf("failed to register scheme %s: %w", corev1.SchemeGroupVersion, err) + } + + if err := kcpapisv1alpha1.AddToScheme(scheme); err != nil { + return fmt.Errorf("failed to register scheme %s: %w", kcpapisv1alpha1.SchemeGroupVersion, err) + } + + if r.vwProvider == nil { + log.Debug("Setting up APIExport provider…") + + fmt.Printf("config: %#v\n", vwConfig) - stoppableCluster, err := lifecycle.NewCluster(vwURL, r.kcpRestConfig) + provider, err := apiexportprovider.New(vwConfig, apiexportprovider.Options{ + Scheme: scheme, + }) + if err != nil { + return fmt.Errorf("failed to init apiexport provider: %w", err) + } + + r.vwProvider = provider + } + + if r.vwManager == nil { + log.Debug("Setting up virtual workspace manager…") + + manager, err := mcmanager.New(vwConfig, r.vwProvider, manager.Options{ + Scheme: scheme, + LeaderElection: false, + Metrics: server.Options{ + BindAddress: "0", + }, + }) if err != nil { return fmt.Errorf("failed to initialize cluster: %w", err) } + // Make sure the vwManager can Engage() on the controller, even though we + // start and stop them outside the control of the manager. This shim will + // ensure Engage() calls are handed to the underlying sync controller as + // as long as the controller is running. + if err := manager.Add(&controllerShim{reconciler: r}); err != nil { + return fmt.Errorf("failed to initialize cluster: %w", err) + } + // use the app's root context as the base, not the reconciling context, which // might get cancelled after Reconcile() is done; // likewise use the reconciler's log without any additional reconciling context - if err := stoppableCluster.Start(r.ctx, r.log); err != nil { - return fmt.Errorf("failed to start cluster: %w", err) - } + go func() { + if err := manager.Start(r.vwManagerCtx); err != nil { + log.Fatalw("Failed to start manager.", zap.Error(err)) + } + }() log.Debug("Virtual workspace cluster setup completed.") r.vwURL = vwURL - r.vwCluster = stoppableCluster + r.vwManager = manager } + // start the provider + go func() { + // Use the global app context so this provider is independent of the reconcile + // context, which might get cancelled right after Reconcile() is done. + if err := r.vwProvider.Run(r.vwManagerCtx, r.vwManager); err != nil { + log.Fatalw("Failed to start apiexport provider.", zap.Error(err)) + } + }() + return nil } -func (r *Reconciler) stopVirtualWorkspaceCluster(log *zap.SugaredLogger) { - if r.vwCluster != nil { - if err := r.vwCluster.Stop(log); err != nil { - log.Errorw("Failed to stop cluster", zap.Error(err)) +type controllerShim struct { + reconciler *Reconciler +} + +func (s *controllerShim) Engage(ctx context.Context, clusterName string, cl cluster.Cluster) error { + s.reconciler.log.Infof("Engage(%q)\n", clusterName) + + for _, worker := range s.reconciler.syncWorkers { + if err := worker.controller.Engage(ctx, clusterName, cl); err != nil { + return err } } - r.vwCluster = nil + return nil +} + +func (s *controllerShim) Start(_ context.Context) error { + // NOP, controllers are started outside the control of the manager. + return nil +} + +// shutdown will cancel the current context and thereby stop the manager and all +// sync controllers at the same time. +func (r *Reconciler) shutdown(log *zap.SugaredLogger) { + log.Debug("Shutting down existing manager…") + + if r.vwManagerCancel != nil { + r.vwManagerCancel() + } + + r.vwProvider = nil + r.vwManager = nil + r.vwManagerCtx = nil + r.vwManagerCancel = nil r.vwURL = "" + + // Free all workers; since their contexts are based on the manager's context, + // they have also been cancelled already above. + r.syncWorkers = nil } func getPublishedResourceKey(pr *syncagentv1alpha1.PublishedResource) string { @@ -236,31 +338,20 @@ func getPublishedResourceKey(pr *syncagentv1alpha1.PublishedResource) string { } func (r *Reconciler) ensureSyncControllers(ctx context.Context, log *zap.SugaredLogger, publishedResources []syncagentv1alpha1.PublishedResource) error { - currentPRWorkers := sets.New[string]() + requiredWorkers := sets.New[string]() for _, pr := range publishedResources { - currentPRWorkers.Insert(getPublishedResourceKey(&pr)) + requiredWorkers.Insert(getPublishedResourceKey(&pr)) } // stop controllers that are no longer needed - for key, ctrl := range r.syncWorkers { - // if the controller failed to properly start, its goroutine will have - // ended already, but it's still lingering around in the syncWorkers map; - // controller is still required and running - if currentPRWorkers.Has(key) && ctrl.Running() { + for key, worker := range r.syncWorkers { + if requiredWorkers.Has(key) { continue } log.Infow("Stopping sync controller…", "key", key) - var cause error - if ctrl.Running() { - cause = errors.New("PublishedResource not available anymore") - } else { - cause = errors.New("gc'ing failed controller") - } - - // can only fail if the controller wasn't running; a situation we do not care about here - _ = ctrl.Stop(log, cause) + worker.cancel() delete(r.syncWorkers, key) } @@ -276,6 +367,8 @@ func (r *Reconciler) ensureSyncControllers(ctx context.Context, log *zap.Sugared log.Infow("Starting new sync controller…", "key", key) + ctrlCtx, ctrlCancel := context.WithCancel(r.vwManagerCtx) + // create the sync controller; // use the reconciler's log without any additional reconciling context syncController, err := sync.Create( @@ -283,7 +376,7 @@ func (r *Reconciler) ensureSyncControllers(ctx context.Context, log *zap.Sugared // this context *must not* be stored in the sync controller! ctx, r.localManager, - r.vwCluster.GetCluster(), + r.vwManager, &pubRes, r.discoveryClient, r.stateNamespace, @@ -292,34 +385,24 @@ func (r *Reconciler) ensureSyncControllers(ctx context.Context, log *zap.Sugared numSyncWorkers, ) if err != nil { + ctrlCancel() return fmt.Errorf("failed to create sync controller: %w", err) } - // wrap it so we can start/stop it easily - wrappedController, err := lifecycle.NewController(syncController) - if err != nil { - return fmt.Errorf("failed to wrap sync controller: %w", err) + log.Infof("storing worker at %s", key) + r.syncWorkers[key] = syncWorker{ + controller: syncController, + cancel: ctrlCancel, } - // let 'er rip (remember to use the long-lived app root context here) - if err := wrappedController.Start(r.ctx, log); err != nil { + // let 'er rip (remember to use the long-lived context here) + if err := syncController.Start(ctrlCtx); err != nil { + ctrlCancel() + log.Info("deleting again") + delete(r.syncWorkers, key) return fmt.Errorf("failed to start sync controller: %w", err) } - - r.syncWorkers[key] = wrappedController } return nil } - -func (r *Reconciler) stopSyncControllers(log *zap.SugaredLogger) { - cause := errors.New("virtual workspace cluster is recreating") - - for uid, ctrl := range r.syncWorkers { - if err := ctrl.Stop(log, cause); err != nil { - log.Errorw("Failed to stop controller", "uid", uid, zap.Error(err)) - } - - delete(r.syncWorkers, uid) - } -} diff --git a/internal/controller/syncmanager/lifecycle/cluster.go b/internal/controller/syncmanager/lifecycle/cluster.go deleted file mode 100644 index 6a4bff8..0000000 --- a/internal/controller/syncmanager/lifecycle/cluster.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Copyright 2025 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lifecycle - -import ( - "context" - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/kcp-dev/logicalcluster/v3" - "go.uber.org/zap" - - kcpdevcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/kcp" - "sigs.k8s.io/controller-runtime/pkg/kontext" -) - -// Cluster is a controller-runtime cluster -// that can be stopped by cancelling its root context. -type Cluster struct { - // a Cluster representing the virtual workspace for the APIExport - obj cluster.Cluster - - // a signal that is closed when the vwCluster has stopped - stopped chan struct{} - - // a function that is used to stop the vwCluster - cancelFunc context.CancelCauseFunc -} - -// newWildcardClusterMapperProvider returns a RESTMapper that talks to the /clusters/* endpoint. -func newWildcardClusterMapperProvider(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { - mapperCfg := rest.CopyConfig(c) - if !strings.HasSuffix(mapperCfg.Host, "/clusters/*") { - mapperCfg.Host += "/clusters/*" - } - - return apiutil.NewDynamicRESTMapper(mapperCfg, httpClient) -} - -// clusterAwareRoundTripper is a cluster-aware wrapper around http.RoundTripper -// taking the cluster from the context. -type clusterAwareRoundTripper struct { - delegate http.RoundTripper -} - -// newClusterAwareRoundTripper creates a new cluster aware round tripper. -func newClusterAwareRoundTripper(delegate http.RoundTripper) *clusterAwareRoundTripper { - return &clusterAwareRoundTripper{ - delegate: delegate, - } -} - -func (c *clusterAwareRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - cluster, ok := kontext.ClusterFrom(req.Context()) - if ok && !cluster.Empty() { - return clusterRoundTripper{cluster: cluster.Path(), delegate: c.delegate}.RoundTrip(req) - } - return c.delegate.RoundTrip(req) -} - -// clusterRoundTripper is static cluster-aware wrapper around http.RoundTripper. -type clusterRoundTripper struct { - cluster logicalcluster.Path - delegate http.RoundTripper -} - -func (c clusterRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if !c.cluster.Empty() { - req = req.Clone(req.Context()) - req.URL.Path = generatePath(req.URL.Path, c.cluster) - req.URL.RawPath = generatePath(req.URL.RawPath, c.cluster) - } - return c.delegate.RoundTrip(req) -} - -// apiRegex matches any string that has /api/ or /apis/ in it. -var apiRegex = regexp.MustCompile(`(/api/|/apis/)`) - -// generatePath formats the request path to target the specified cluster. -func generatePath(originalPath string, workspacePath logicalcluster.Path) string { - // If the originalPath already has cluster.Path() then the path was already modified and no change needed - if strings.Contains(originalPath, workspacePath.RequestPath()) { - return originalPath - } - // If the originalPath has /api/ or /apis/ in it, it might be anywhere in the path, so we use a regex to find and - // replaces /api/ or /apis/ with $cluster/api/ or $cluster/apis/ - if apiRegex.MatchString(originalPath) { - return apiRegex.ReplaceAllString(originalPath, fmt.Sprintf("%s$1", workspacePath.RequestPath())) - } - // Otherwise, we're just prepending /clusters/$name - path := workspacePath.RequestPath() - // if the original path is relative, add a / separator - if len(originalPath) > 0 && originalPath[0] != '/' { - path += "/" - } - // finally append the original path - path += originalPath - return path -} - -func NewCluster(address string, baseRestConfig *rest.Config) (*Cluster, error) { - // note that this cluster and all its components are kcp-aware - config := rest.CopyConfig(baseRestConfig) - config.Host = address - - config.Wrap(func(rt http.RoundTripper) http.RoundTripper { - return newClusterAwareRoundTripper(rt) - }) - - scheme := runtime.NewScheme() - - if err := corev1.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to register scheme %s: %w", corev1.SchemeGroupVersion, err) - } - - if err := kcpdevcorev1alpha1.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevcorev1alpha1.SchemeGroupVersion, err) - } - - clusterObj, err := cluster.New(config, func(o *cluster.Options) { - o.Scheme = scheme - o.NewCache = kcp.NewClusterAwareCache - o.NewAPIReader = kcp.NewClusterAwareAPIReader - o.NewClient = kcp.NewClusterAwareClient - o.MapperProvider = newWildcardClusterMapperProvider - }) - if err != nil { - return nil, fmt.Errorf("failed to initialize cluster: %w", err) - } - - return &Cluster{ - obj: clusterObj, - }, nil -} - -// Start starts a goroutine for the underlying cluster object; make sure to use -// a long-lived context here. -func (c *Cluster) Start(ctx context.Context, log *zap.SugaredLogger) error { - if c.obj == nil { - return errors.New("cannot restart a stopped cluster") - } - - if c.stopped != nil { - return errors.New("cluster is already running") - } - - clusterCtx, cancel := context.WithCancelCause(ctx) - - c.cancelFunc = cancel - c.stopped = make(chan struct{}) - - // start the cluster in a new goroutine - go func() { - defer close(c.stopped) - - // this call blocks until clusterCtx is done; Start() never returns an error - // in real-life scenarios, as the cluster just waits for the cache to - // end and caches only end (cleanly) when the context is closed. - // Since this "cannot fail" at runtime, we do not need to somehow trigger - // a full reconciliation when this fails (like recreating a new cluster, - // stopping and restarting all sync controllers, ...). - if err := c.obj.Start(clusterCtx); err != nil { - log.Errorw("Virtual workspace cluster has failed", zap.Error(err)) - } - - cancel(errors.New("closing to prevent leakage")) - - c.obj = nil - c.cancelFunc = nil - }() - - // wait for the cluster to be up (context can be anything here) - if !c.obj.GetCache().WaitForCacheSync(ctx) { - err := errors.New("failed to wait for caches to sync") - - // stop the cluster - cancel(err) - - // wait for cleanup to be completed - <-c.stopped - - return err - } - - return nil -} - -func (c *Cluster) Running() bool { - if c.obj == nil { - return false - } - - if c.stopped == nil { - return false - } - - select { - case <-c.stopped: - return false - - default: - return true - } -} - -func (c *Cluster) Stop(log *zap.SugaredLogger) error { - if !c.Running() { - return errors.New("cluster is not running") - } - - c.cancelFunc(errors.New("virtual workspace URL has changed")) - log.Info("Waiting for virtual workspace cluster to shut down…") - <-c.stopped - log.Info("Virtual workspace cluster has finished shutting down.") - - return nil -} - -func (c *Cluster) GetCluster() cluster.Cluster { - return c.obj -} - -func (c *Cluster) GetClient() (ctrlruntimeclient.Client, error) { - if !c.Running() { - return nil, errors.New("cluster is not running") - } - - return c.obj.GetClient(), nil -} - -func (c *Cluster) GetCache() (cache.Cache, error) { - if !c.Running() { - return nil, errors.New("cluster is not running") - } - - return c.obj.GetCache(), nil -} diff --git a/internal/controller/syncmanager/lifecycle/controller.go b/internal/controller/syncmanager/lifecycle/controller.go deleted file mode 100644 index 7f1c786..0000000 --- a/internal/controller/syncmanager/lifecycle/controller.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2025 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lifecycle - -import ( - "context" - "errors" - - "go.uber.org/zap" - - "sigs.k8s.io/controller-runtime/pkg/controller" -) - -// Controller is a controller-runtime controller -// that can be stopped by cancelling its root context. -type Controller struct { - // a Controller representing the virtual workspace for the APIExport - obj controller.Controller - - // a signal that is closed when the vwController has stopped - stopped chan struct{} - - // a function that is used to stop the vwController - cancelFunc context.CancelCauseFunc -} - -func NewController(upstream controller.Controller) (Controller, error) { - return Controller{ - obj: upstream, - }, nil -} - -// Start starts the wrapped controller. -func (c *Controller) Start(ctx context.Context, log *zap.SugaredLogger) error { - if c.obj == nil { - return errors.New("cannot restart a stopped controller") - } - - if c.stopped != nil { - return errors.New("controller is already running") - } - - ctrlCtx, cancel := context.WithCancelCause(ctx) - - c.cancelFunc = cancel - c.stopped = make(chan struct{}) - - // start the controller in a new goroutine - go func() { - defer close(c.stopped) - - // this call blocks until ctrlCtx is done or an error occurs - // like failing to start the watches - if err := c.obj.Start(ctrlCtx); err != nil { - log.Errorw("Controller has failed", zap.Error(err)) - } - - cancel(errors.New("closing to prevent leakage")) - - c.obj = nil - c.cancelFunc = nil - }() - - return nil -} - -func (c *Controller) Running() bool { - if c.obj == nil { - return false - } - - if c.stopped == nil { - return false - } - - select { - case <-c.stopped: - return false - - default: - return true - } -} - -func (c *Controller) Stop(log *zap.SugaredLogger, cause error) error { - if !c.Running() { - return errors.New("controller is not running") - } - - c.cancelFunc(cause) - log.Info("Waiting for controller to shut down…") - <-c.stopped - log.Info("Controller has finished shutting down.") - - return nil -} diff --git a/internal/sync/context.go b/internal/sync/context.go index 92a0447..8f10dcd 100644 --- a/internal/sync/context.go +++ b/internal/sync/context.go @@ -17,38 +17,23 @@ limitations under the License. package sync import ( - "context" - "github.com/kcp-dev/logicalcluster/v3" - - "sigs.k8s.io/controller-runtime/pkg/kontext" ) -type Context struct { +type clusterInfo struct { clusterName logicalcluster.Name workspacePath logicalcluster.Path - local context.Context - remote context.Context } -func NewContext(local, remote context.Context) Context { - clusterName, ok := kontext.ClusterFrom(remote) - if !ok { - panic("Provided remote context does not contain cluster name.") - } - - return Context{ +func NewClusterInfo(clusterName logicalcluster.Name) clusterInfo { + return clusterInfo{ clusterName: clusterName, - local: local, - remote: remote, } } -func (c *Context) WithWorkspacePath(path logicalcluster.Path) Context { - return Context{ +func (c *clusterInfo) WithWorkspacePath(path logicalcluster.Path) clusterInfo { + return clusterInfo{ clusterName: c.clusterName, workspacePath: path, - local: c.local, - remote: c.remote, } } diff --git a/internal/sync/context_test.go b/internal/sync/context_test.go deleted file mode 100644 index 461369e..0000000 --- a/internal/sync/context_test.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2025 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sync - -import ( - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - - "sigs.k8s.io/controller-runtime/pkg/kontext" -) - -func TestNewContext(t *testing.T) { - clusterName := logicalcluster.Name("foo") - ctx := kontext.WithCluster(t.Context(), clusterName) - - combinedCtx := NewContext(t.Context(), ctx) - - if combinedCtx.clusterName != clusterName { - t.Fatalf("Expected function to recognize the cluster name in the context, but got %q", combinedCtx.clusterName) - } -} diff --git a/internal/sync/metadata.go b/internal/sync/metadata.go index d052496..afcb848 100644 --- a/internal/sync/metadata.go +++ b/internal/sync/metadata.go @@ -21,6 +21,8 @@ import ( "maps" "strings" + mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" @@ -137,7 +139,7 @@ func filterLabels(original labels.Set, forbidList sets.Set[string]) labels.Set { return filtered } -func RemoteNameForLocalObject(localObj ctrlruntimeclient.Object) *reconcile.Request { +func RemoteNameForLocalObject(localObj ctrlruntimeclient.Object) *mcreconcile.Request { labels := localObj.GetLabels() annotations := localObj.GetAnnotations() clusterName := labels[remoteObjectClusterLabel] @@ -149,11 +151,13 @@ func RemoteNameForLocalObject(localObj ctrlruntimeclient.Object) *reconcile.Requ return nil } - return &reconcile.Request{ + return &mcreconcile.Request{ ClusterName: clusterName, - NamespacedName: types.NamespacedName{ - Namespace: namespace, - Name: name, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, }, } } diff --git a/internal/sync/object_syncer.go b/internal/sync/object_syncer.go index 9b4e422..aa9c67e 100644 --- a/internal/sync/object_syncer.go +++ b/internal/sync/object_syncer.go @@ -60,22 +60,21 @@ type objectSyncer struct { } type syncSide struct { - ctx context.Context clusterName logicalcluster.Name workspacePath logicalcluster.Path client ctrlruntimeclient.Client object *unstructured.Unstructured } -func (s *objectSyncer) Sync(log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { +func (s *objectSyncer) Sync(ctx context.Context, log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { // handle deletion: if source object is in deletion, delete the destination object (the clone) if source.object.GetDeletionTimestamp() != nil { - return s.handleDeletion(log, source, dest) + return s.handleDeletion(ctx, log, source, dest) } // add finalizer to source object so that we never orphan the destination object if s.blockSourceDeletion { - updated, err := ensureFinalizer(source.ctx, log, source.client, source.object, deletionFinalizer) + updated, err := ensureFinalizer(ctx, log, source.client, source.object, deletionFinalizer) if err != nil { return false, fmt.Errorf("failed to add cleanup finalizer to source object: %w", err) } @@ -97,7 +96,7 @@ func (s *objectSyncer) Sync(log *zap.SugaredLogger, source, dest syncSide) (requ // if no destination object exists yet, attempt to create it; // note that the object _might_ exist, but we were not able to find it because of broken labels if dest.object == nil { - err := s.ensureDestinationObject(log, source, dest) + err := s.ensureDestinationObject(ctx, log, source, dest) if err != nil { return false, fmt.Errorf("failed to create destination object: %w", err) } @@ -116,7 +115,7 @@ func (s *objectSyncer) Sync(log *zap.SugaredLogger, source, dest syncSide) (requ return false, nil } - requeue, err = s.syncObjectContents(log, source, dest) + requeue, err = s.syncObjectContents(ctx, log, source, dest) if err != nil { return false, fmt.Errorf("failed to synchronize object state: %w", err) } @@ -164,20 +163,20 @@ func (s *objectSyncer) applyMutations(source, dest syncSide) (syncSide, syncSide return source, dest, nil } -func (s *objectSyncer) syncObjectContents(log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { +func (s *objectSyncer) syncObjectContents(ctx context.Context, log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { // Sync the spec (or more generally, the desired state) from source to dest. - requeue, err = s.syncObjectSpec(log, source, dest) + requeue, err = s.syncObjectSpec(ctx, log, source, dest) if requeue || err != nil { return requeue, err } // Sync the status back in the opposite direction, from dest to source. - return s.syncObjectStatus(log, source, dest) + return s.syncObjectStatus(ctx, log, source, dest) } -func (s *objectSyncer) syncObjectSpec(log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { +func (s *objectSyncer) syncObjectSpec(ctx context.Context, log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { // figure out the last known state - lastKnownSourceState, err := s.stateStore.Get(source) + lastKnownSourceState, err := s.stateStore.Get(ctx, source) if err != nil { return false, fmt.Errorf("failed to determine last known state: %w", err) } @@ -221,7 +220,7 @@ func (s *objectSyncer) syncObjectSpec(log *zap.SugaredLogger, source, dest syncS if string(rawPatch) != "{}" { log.Debugw("Patching destination object…", "patch", string(rawPatch)) - if err := dest.client.Patch(dest.ctx, dest.object, ctrlruntimeclient.RawPatch(types.MergePatchType, rawPatch)); err != nil { + if err := dest.client.Patch(ctx, dest.object, ctrlruntimeclient.RawPatch(types.MergePatchType, rawPatch)); err != nil { return false, fmt.Errorf("failed to patch destination object: %w", err) } @@ -247,7 +246,7 @@ func (s *objectSyncer) syncObjectSpec(log *zap.SugaredLogger, source, dest syncS // are identical w.r.t. the fields we have copied (spec, annotations, labels, ..). log.Warn("Updating destination object because last-known-state is missing/invalid…") - if err := dest.client.Update(dest.ctx, dest.object); err != nil { + if err := dest.client.Update(ctx, dest.object); err != nil { return false, fmt.Errorf("failed to update destination object: %w", err) } @@ -257,7 +256,7 @@ func (s *objectSyncer) syncObjectSpec(log *zap.SugaredLogger, source, dest syncS if requeue { // remember this object state for the next reconciliation (this will strip any syncer-related // metadata the 3-way diff may have added above) - if err := s.stateStore.Put(sourceObjCopy, source.clusterName, s.subresources); err != nil { + if err := s.stateStore.Put(ctx, sourceObjCopy, source.clusterName, s.subresources); err != nil { return true, fmt.Errorf("failed to update sync state: %w", err) } } @@ -265,7 +264,7 @@ func (s *objectSyncer) syncObjectSpec(log *zap.SugaredLogger, source, dest syncS return requeue, nil } -func (s *objectSyncer) syncObjectStatus(log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { +func (s *objectSyncer) syncObjectStatus(ctx context.Context, log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { if !s.syncStatusBack { return false, nil } @@ -280,7 +279,7 @@ func (s *objectSyncer) syncObjectStatus(log *zap.SugaredLogger, source, dest syn sourceContent["status"] = destContent["status"] log.Debug("Updating source object status…") - if err := source.client.Status().Update(source.ctx, source.object); err != nil { + if err := source.client.Status().Update(ctx, source.object); err != nil { return false, fmt.Errorf("failed to update source object status: %w", err) } } @@ -289,7 +288,7 @@ func (s *objectSyncer) syncObjectStatus(log *zap.SugaredLogger, source, dest syn return false, nil } -func (s *objectSyncer) ensureDestinationObject(log *zap.SugaredLogger, source, dest syncSide) error { +func (s *objectSyncer) ensureDestinationObject(ctx context.Context, log *zap.SugaredLogger, source, dest syncSide) error { // create a copy of the source with GVK projected and renaming rules applied destObj, err := s.destCreator(source.object) if err != nil { @@ -297,7 +296,7 @@ func (s *objectSyncer) ensureDestinationObject(log *zap.SugaredLogger, source, d } // make sure the target namespace on the destination cluster exists - if err := s.ensureNamespace(dest.ctx, log, dest.client, destObj.GetNamespace()); err != nil { + if err := s.ensureNamespace(ctx, log, dest.client, destObj.GetNamespace()); err != nil { return fmt.Errorf("failed to ensure destination namespace: %w", err) } @@ -321,25 +320,25 @@ func (s *objectSyncer) ensureDestinationObject(log *zap.SugaredLogger, source, d objectLog := log.With("dest-object", newObjectKey(destObj, dest.clusterName, logicalcluster.None)) objectLog.Debugw("Creating destination object…") - if err := dest.client.Create(dest.ctx, destObj); err != nil { + if err := dest.client.Create(ctx, destObj); err != nil { if !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failed to create destination object: %w", err) } - if err := s.adoptExistingDestinationObject(objectLog, dest, destObj, sourceObjKey); err != nil { + if err := s.adoptExistingDestinationObject(ctx, objectLog, dest, destObj, sourceObjKey); err != nil { return fmt.Errorf("failed to adopt destination object: %w", err) } } // remember the state of the object that we just created - if err := s.stateStore.Put(source.object, source.clusterName, s.subresources); err != nil { + if err := s.stateStore.Put(ctx, source.object, source.clusterName, s.subresources); err != nil { return fmt.Errorf("failed to update sync state: %w", err) } return nil } -func (s *objectSyncer) adoptExistingDestinationObject(log *zap.SugaredLogger, dest syncSide, existingDestObj *unstructured.Unstructured, sourceKey objectKey) error { +func (s *objectSyncer) adoptExistingDestinationObject(ctx context.Context, log *zap.SugaredLogger, dest syncSide, existingDestObj *unstructured.Unstructured, sourceKey objectKey) error { // Cannot add labels to an object in deletion, also there would be no point // in adopting a soon-to-disappear object; instead we silently wait, requeue // and when the object is gone, recreate a fresh one with proper labels. @@ -350,7 +349,7 @@ func (s *objectSyncer) adoptExistingDestinationObject(log *zap.SugaredLogger, de log.Warn("Adopting existing but mislabelled destination object…") // fetch the current state - if err := dest.client.Get(dest.ctx, ctrlruntimeclient.ObjectKeyFromObject(existingDestObj), existingDestObj); err != nil { + if err := dest.client.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(existingDestObj), existingDestObj); err != nil { return fmt.Errorf("failed to get current destination object: %w", err) } @@ -363,7 +362,7 @@ func (s *objectSyncer) adoptExistingDestinationObject(log *zap.SugaredLogger, de s.labelWithAgent(existingDestObj) - if err := dest.client.Update(dest.ctx, existingDestObj); err != nil { + if err := dest.client.Update(ctx, existingDestObj); err != nil { return fmt.Errorf("failed to upsert current destination object labels: %w", err) } @@ -397,7 +396,7 @@ func (s *objectSyncer) ensureNamespace(ctx context.Context, log *zap.SugaredLogg return nil } -func (s *objectSyncer) handleDeletion(log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { +func (s *objectSyncer) handleDeletion(ctx context.Context, log *zap.SugaredLogger, source, dest syncSide) (requeue bool, err error) { // if no finalizer was added, we can safely ignore this event if !s.blockSourceDeletion { return false, nil @@ -407,7 +406,7 @@ func (s *objectSyncer) handleDeletion(log *zap.SugaredLogger, source, dest syncS if dest.object != nil { if dest.object.GetDeletionTimestamp() == nil { log.Debugw("Deleting destination object…", "dest-object", newObjectKey(dest.object, dest.clusterName, logicalcluster.None)) - if err := dest.client.Delete(dest.ctx, dest.object); err != nil { + if err := dest.client.Delete(ctx, dest.object); err != nil { return false, fmt.Errorf("failed to delete destination object: %w", err) } } @@ -416,7 +415,7 @@ func (s *objectSyncer) handleDeletion(log *zap.SugaredLogger, source, dest syncS } // the destination object is gone, we can release the source one - updated, err := removeFinalizer(source.ctx, log, source.client, source.object, deletionFinalizer) + updated, err := removeFinalizer(ctx, log, source.client, source.object, deletionFinalizer) if err != nil { return false, fmt.Errorf("failed to remove cleanup finalizer from source object: %w", err) } diff --git a/internal/sync/state_store.go b/internal/sync/state_store.go index e845b5b..47fab7e 100644 --- a/internal/sync/state_store.go +++ b/internal/sync/state_store.go @@ -17,6 +17,7 @@ limitations under the License. package sync import ( + "context" "fmt" "strings" @@ -32,8 +33,8 @@ import ( ) type ObjectStateStore interface { - Get(source syncSide) (*unstructured.Unstructured, error) - Put(obj *unstructured.Unstructured, clusterName logicalcluster.Name, subresources []string) error + Get(ctx context.Context, source syncSide) (*unstructured.Unstructured, error) + Put(ctx context.Context, obj *unstructured.Unstructured, clusterName logicalcluster.Name, subresources []string) error } // objectStateStore is capable of creating/updating a target Kubernetes object @@ -56,8 +57,8 @@ func newKubernetesStateStoreCreator(namespace string) newObjectStateStoreFunc { } } -func (op *objectStateStore) Get(source syncSide) (*unstructured.Unstructured, error) { - data, err := op.backend.Get(source.object, source.clusterName) +func (op *objectStateStore) Get(ctx context.Context, source syncSide) (*unstructured.Unstructured, error) { + data, err := op.backend.Get(ctx, source.object, source.clusterName) if err != nil { return nil, err } @@ -72,13 +73,13 @@ func (op *objectStateStore) Get(source syncSide) (*unstructured.Unstructured, er return lastKnown, nil } -func (op *objectStateStore) Put(obj *unstructured.Unstructured, clusterName logicalcluster.Name, subresources []string) error { +func (op *objectStateStore) Put(ctx context.Context, obj *unstructured.Unstructured, clusterName logicalcluster.Name, subresources []string) error { encoded, err := op.snapshotObject(obj, subresources) if err != nil { return err } - return op.backend.Put(obj, clusterName, []byte(encoded)) + return op.backend.Put(ctx, obj, clusterName, []byte(encoded)) } func (op *objectStateStore) snapshotObject(obj *unstructured.Unstructured, subresources []string) (string, error) { @@ -102,8 +103,8 @@ func (op *objectStateStore) snapshotObject(obj *unstructured.Unstructured, subre } type backend interface { - Get(obj *unstructured.Unstructured, clusterName logicalcluster.Name) ([]byte, error) - Put(obj *unstructured.Unstructured, clusterName logicalcluster.Name, data []byte) error + Get(ctx context.Context, obj *unstructured.Unstructured, clusterName logicalcluster.Name) ([]byte, error) + Put(ctx context.Context, obj *unstructured.Unstructured, clusterName logicalcluster.Name, data []byte) error } type kubernetesBackend struct { @@ -138,9 +139,9 @@ func newKubernetesBackend(namespace string, primaryObject, stateCluster syncSide } } -func (b *kubernetesBackend) Get(obj *unstructured.Unstructured, clusterName logicalcluster.Name) ([]byte, error) { +func (b *kubernetesBackend) Get(ctx context.Context, obj *unstructured.Unstructured, clusterName logicalcluster.Name) ([]byte, error) { secret := corev1.Secret{} - if err := b.stateCluster.client.Get(b.stateCluster.ctx, b.secretName, &secret); ctrlruntimeclient.IgnoreNotFound(err) != nil { + if err := b.stateCluster.client.Get(ctx, b.secretName, &secret); ctrlruntimeclient.IgnoreNotFound(err) != nil { return nil, err } @@ -153,9 +154,9 @@ func (b *kubernetesBackend) Get(obj *unstructured.Unstructured, clusterName logi return data, nil } -func (b *kubernetesBackend) Put(obj *unstructured.Unstructured, clusterName logicalcluster.Name, data []byte) error { +func (b *kubernetesBackend) Put(ctx context.Context, obj *unstructured.Unstructured, clusterName logicalcluster.Name, data []byte) error { secret := corev1.Secret{} - if err := b.stateCluster.client.Get(b.stateCluster.ctx, b.secretName, &secret); ctrlruntimeclient.IgnoreNotFound(err) != nil { + if err := b.stateCluster.client.Get(ctx, b.secretName, &secret); ctrlruntimeclient.IgnoreNotFound(err) != nil { return err } @@ -173,9 +174,9 @@ func (b *kubernetesBackend) Put(obj *unstructured.Unstructured, clusterName logi secret.Name = b.secretName.Name secret.Namespace = b.secretName.Namespace - err = b.stateCluster.client.Create(b.stateCluster.ctx, &secret) + err = b.stateCluster.client.Create(ctx, &secret) } else { - err = b.stateCluster.client.Update(b.stateCluster.ctx, &secret) + err = b.stateCluster.client.Update(ctx, &secret) } return err diff --git a/internal/sync/state_store_test.go b/internal/sync/state_store_test.go index fcc7a7e..94aea48 100644 --- a/internal/sync/state_store_test.go +++ b/internal/sync/state_store_test.go @@ -44,7 +44,6 @@ func TestStateStoreBasics(t *testing.T) { } stateSide := syncSide{ - ctx: ctx, client: serviceClusterClient, } @@ -54,7 +53,7 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // get nil from empty store - result, err := store.Get(syncSide{object: primaryObject}) + result, err := store.Get(ctx, syncSide{object: primaryObject}) if err != nil { t.Fatalf("Failed to get primary object from empty cache: %v", err) } @@ -74,7 +73,7 @@ func TestStateStoreBasics(t *testing.T) { }, }, withKind("RemoteThing")) - err = store.Put(firstObject, "", nil) + err = store.Put(ctx, firstObject, "", nil) if err != nil { t.Fatalf("Failed to store object in empty cache: %v", err) } @@ -90,7 +89,7 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // retrieve the stored object - result, err = store.Get(syncSide{object: firstObject}) + result, err = store.Get(ctx, syncSide{object: firstObject}) if err != nil { t.Fatalf("Failed to get stored object from cache: %v", err) } @@ -109,7 +108,7 @@ func TestStateStoreBasics(t *testing.T) { }, }, withKind("RemoteThing")) - result, err = store.Get(syncSide{object: secondObject}) + result, err = store.Get(ctx, syncSide{object: secondObject}) if err != nil { t.Fatalf("Failed to get second object from cache: %v", err) } @@ -120,12 +119,12 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // store a 2nd object - err = store.Put(secondObject, "", nil) + err = store.Put(ctx, secondObject, "", nil) if err != nil { t.Fatalf("Failed to store second object in cache: %v", err) } - result, err = store.Get(syncSide{object: secondObject}) + result, err = store.Get(ctx, syncSide{object: secondObject}) if err != nil { t.Fatalf("Failed to get second object from cache: %v", err) } @@ -135,7 +134,7 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // retrieve the first, ensure it's not overwritten - result, err = store.Get(syncSide{object: firstObject}) + result, err = store.Get(ctx, syncSide{object: firstObject}) if err != nil { t.Fatalf("Failed to get first object from cache again: %v", err) } @@ -157,7 +156,7 @@ func TestStateStoreBasics(t *testing.T) { }, }, withKind("RemoteThing")) - err = store.Put(thirdObject, "", nil) + err = store.Put(ctx, thirdObject, "", nil) if err != nil { t.Fatalf("Failed to store third object in cache: %v", err) } @@ -165,7 +164,7 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // ensure status is kept - result, err = store.Get(syncSide{object: thirdObject}) + result, err = store.Get(ctx, syncSide{object: thirdObject}) if err != nil { t.Fatalf("Failed to get third object from cache again: %v", err) } @@ -175,7 +174,7 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // overwrite, but this time strip subresource - err = store.Put(thirdObject, "", []string{"status"}) + err = store.Put(ctx, thirdObject, "", []string{"status"}) if err != nil { t.Fatalf("Failed to store third object in cache: %v", err) } @@ -183,7 +182,7 @@ func TestStateStoreBasics(t *testing.T) { /////////////////////////////////////// // ensure status is gone - result, err = store.Get(syncSide{object: thirdObject}) + result, err = store.Get(ctx, syncSide{object: thirdObject}) if err != nil { t.Fatalf("Failed to get third object from cache again: %v", err) } diff --git a/internal/sync/syncer.go b/internal/sync/syncer.go index 4cf239f..7c78697 100644 --- a/internal/sync/syncer.go +++ b/internal/sync/syncer.go @@ -17,6 +17,7 @@ limitations under the License. package sync import ( + "context" "fmt" "go.uber.org/zap" @@ -115,11 +116,11 @@ func NewResourceSyncer( // Each of these steps can potentially end the current processing and return (true, nil). In this // case, the caller should re-fetch the remote object and call Process() again (most likely in the // next reconciliation). Only when (false, nil) is returned is the entire process finished. -func (s *ResourceSyncer) Process(ctx Context, remoteObj *unstructured.Unstructured) (requeue bool, err error) { - log := s.log.With("source-object", newObjectKey(remoteObj, ctx.clusterName, ctx.workspacePath)) +func (s *ResourceSyncer) Process(ctx context.Context, info clusterInfo, remoteObj *unstructured.Unstructured) (requeue bool, err error) { + log := s.log.With("source-object", newObjectKey(remoteObj, info.clusterName, info.workspacePath)) // find the local equivalent object in the local service cluster - localObj, err := s.findLocalObject(ctx, remoteObj) + localObj, err := s.findLocalObject(ctx, info, remoteObj) if err != nil { return false, fmt.Errorf("failed to find local equivalent: %w", err) } @@ -130,15 +131,13 @@ func (s *ResourceSyncer) Process(ctx Context, remoteObj *unstructured.Unstructur // Prepare object sync sides. sourceSide := syncSide{ - ctx: ctx.remote, - clusterName: ctx.clusterName, - workspacePath: ctx.workspacePath, + clusterName: info.clusterName, + workspacePath: info.workspacePath, client: s.remoteClient, object: remoteObj, } destSide := syncSide{ - ctx: ctx.local, client: s.localClient, object: localObj, } @@ -153,7 +152,7 @@ func (s *ResourceSyncer) Process(ctx Context, remoteObj *unstructured.Unstructur agentName: s.agentName, subresources: s.subresources, // use the projection and renaming rules configured in the PublishedResource - destCreator: s.newLocalObjectCreator(ctx), + destCreator: s.newLocalObjectCreator(info), // for the main resource, status subresource handling is enabled (this // means _allowing_ status back-syncing, it still depends on whether the // status subresource even exists whether an update happens) @@ -171,7 +170,7 @@ func (s *ResourceSyncer) Process(ctx Context, remoteObj *unstructured.Unstructur metadataOnDestination: true, } - requeue, err = syncer.Sync(log, sourceSide, destSide) + requeue, err = syncer.Sync(ctx, log, sourceSide, destSide) if err != nil { return false, err } @@ -188,17 +187,17 @@ func (s *ResourceSyncer) Process(ctx Context, remoteObj *unstructured.Unstructur // it modifies the state of the world, otherwise the objects in // source/dest.object might be ouf date. - return s.processRelatedResources(log, stateStore, sourceSide, destSide) + return s.processRelatedResources(ctx, log, stateStore, sourceSide, destSide) } -func (s *ResourceSyncer) findLocalObject(ctx Context, remoteObj *unstructured.Unstructured) (*unstructured.Unstructured, error) { - localSelector := labels.SelectorFromSet(newObjectKey(remoteObj, ctx.clusterName, ctx.workspacePath).Labels()) +func (s *ResourceSyncer) findLocalObject(ctx context.Context, info clusterInfo, remoteObj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + localSelector := labels.SelectorFromSet(newObjectKey(remoteObj, info.clusterName, info.workspacePath).Labels()) localObjects := &unstructured.UnstructuredList{} localObjects.SetAPIVersion(s.destDummy.GetAPIVersion()) localObjects.SetKind(s.destDummy.GetKind() + "List") - if err := s.localClient.List(ctx.local, localObjects, &ctrlruntimeclient.ListOptions{ + if err := s.localClient.List(ctx, localObjects, &ctrlruntimeclient.ListOptions{ LabelSelector: localSelector, Limit: 2, // 2 in order to detect broken configurations }); err != nil { @@ -215,7 +214,7 @@ func (s *ResourceSyncer) findLocalObject(ctx Context, remoteObj *unstructured.Un } } -func (s *ResourceSyncer) newLocalObjectCreator(ctx Context) objectCreatorFunc { +func (s *ResourceSyncer) newLocalObjectCreator(info clusterInfo) objectCreatorFunc { return func(remoteObj *unstructured.Unstructured) (*unstructured.Unstructured, error) { // map from the remote API into the actual, local API group destObj := remoteObj.DeepCopy() @@ -225,7 +224,7 @@ func (s *ResourceSyncer) newLocalObjectCreator(ctx Context) objectCreatorFunc { destScope := syncagentv1alpha1.ResourceScope(s.localCRD.Spec.Scope) // map namespace/name - mappedName, err := templating.GenerateLocalObjectName(s.pubRes, remoteObj, ctx.clusterName, ctx.workspacePath) + mappedName, err := templating.GenerateLocalObjectName(s.pubRes, remoteObj, info.clusterName, info.workspacePath) if err != nil { return nil, fmt.Errorf("failed to generate local object name: %w", err) } diff --git a/internal/sync/syncer_related.go b/internal/sync/syncer_related.go index b2a427d..53d9bc4 100644 --- a/internal/sync/syncer_related.go +++ b/internal/sync/syncer_related.go @@ -17,6 +17,7 @@ limitations under the License. package sync import ( + "context" "encoding/json" "errors" "fmt" @@ -39,9 +40,9 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func (s *ResourceSyncer) processRelatedResources(log *zap.SugaredLogger, stateStore ObjectStateStore, remote, local syncSide) (requeue bool, err error) { +func (s *ResourceSyncer) processRelatedResources(ctx context.Context, log *zap.SugaredLogger, stateStore ObjectStateStore, remote, local syncSide) (requeue bool, err error) { for _, relatedResource := range s.pubRes.Spec.Related { - requeue, err := s.processRelatedResource(log.With("identifier", relatedResource.Identifier), stateStore, remote, local, relatedResource) + requeue, err := s.processRelatedResource(ctx, log.With("identifier", relatedResource.Identifier), stateStore, remote, local, relatedResource) if err != nil { return false, fmt.Errorf("failed to process related resource %s: %w", relatedResource.Identifier, err) } @@ -61,7 +62,7 @@ type relatedObjectAnnotation struct { Kind string `json:"kind"` } -func (s *ResourceSyncer) processRelatedResource(log *zap.SugaredLogger, stateStore ObjectStateStore, remote, local syncSide, relRes syncagentv1alpha1.RelatedResourceSpec) (requeue bool, err error) { +func (s *ResourceSyncer) processRelatedResource(ctx context.Context, log *zap.SugaredLogger, stateStore ObjectStateStore, remote, local syncSide, relRes syncagentv1alpha1.RelatedResourceSpec) (requeue bool, err error) { // decide what direction to sync (local->remote vs. remote->local) var ( origin syncSide @@ -77,7 +78,7 @@ func (s *ResourceSyncer) processRelatedResource(log *zap.SugaredLogger, stateSto } // find the all objects on the origin side that match the given criteria - resolvedObjects, err := resolveRelatedResourceObjects(origin, dest, relRes) + resolvedObjects, err := resolveRelatedResourceObjects(ctx, origin, dest, relRes) if err != nil { return false, fmt.Errorf("failed to get resolve origin objects: %w", err) } @@ -100,19 +101,17 @@ func (s *ResourceSyncer) processRelatedResource(log *zap.SugaredLogger, stateSto destObject.SetAPIVersion("v1") // we only support ConfigMaps and Secrets, both are in core/v1 destObject.SetKind(relRes.Kind) - if err = dest.client.Get(dest.ctx, resolved.destination, destObject); err != nil { + if err = dest.client.Get(ctx, resolved.destination, destObject); err != nil { destObject = nil } sourceSide := syncSide{ - ctx: origin.ctx, clusterName: origin.clusterName, client: origin.client, object: resolved.original, } destSide := syncSide{ - ctx: dest.ctx, clusterName: dest.clusterName, client: dest.client, object: destObject, @@ -147,7 +146,7 @@ func (s *ResourceSyncer) processRelatedResource(log *zap.SugaredLogger, stateSto metadataOnDestination: false, } - req, err := syncer.Sync(log, sourceSide, destSide) + req, err := syncer.Sync(ctx, log, sourceSide, destSide) if err != nil { return false, fmt.Errorf("failed to sync related object: %w", err) } @@ -185,7 +184,7 @@ func (s *ResourceSyncer) processRelatedResource(log *zap.SugaredLogger, stateSto remote.object.SetAnnotations(annotations) log.Debug("Remembering related object in main object…") - if err := remote.client.Patch(remote.ctx, remote.object, ctrlruntimeclient.MergeFrom(oldState)); err != nil { + if err := remote.client.Patch(ctx, remote.object, ctrlruntimeclient.MergeFrom(oldState)); err != nil { return false, fmt.Errorf("failed to update related data in remote object: %w", err) } @@ -207,7 +206,7 @@ type resolvedObject struct { destination types.NamespacedName } -func resolveRelatedResourceObjects(relatedOrigin, relatedDest syncSide, relRes syncagentv1alpha1.RelatedResourceSpec) ([]resolvedObject, error) { +func resolveRelatedResourceObjects(ctx context.Context, relatedOrigin, relatedDest syncSide, relRes syncagentv1alpha1.RelatedResourceSpec) ([]resolvedObject, error) { // resolving the originNamespace first allows us to scope down any .List() calls later originNamespace := relatedOrigin.object.GetNamespace() destNamespace := relatedDest.object.GetNamespace() @@ -219,7 +218,7 @@ func resolveRelatedResourceObjects(relatedOrigin, relatedDest syncSide, relRes s if nsSpec := relRes.Object.Namespace; nsSpec != nil { var err error - namespaceMap, err = resolveRelatedResourceOriginNamespaces(relatedOrigin, relatedDest, origin, *nsSpec) + namespaceMap, err = resolveRelatedResourceOriginNamespaces(ctx, relatedOrigin, relatedDest, origin, *nsSpec) if err != nil { return nil, fmt.Errorf("failed to resolve namespace: %w", err) } @@ -241,7 +240,7 @@ func resolveRelatedResourceObjects(relatedOrigin, relatedDest syncSide, relRes s // this related resource configuration. Again, for label selectors this can be multiple, // otherwise at most 1. - objects, err := resolveRelatedResourceObjectsInNamespaces(relatedOrigin, relatedDest, relRes, relRes.Object.RelatedResourceObjectSpec, namespaceMap) + objects, err := resolveRelatedResourceObjectsInNamespaces(ctx, relatedOrigin, relatedDest, relRes, relRes.Object.RelatedResourceObjectSpec, namespaceMap) if err != nil { return nil, fmt.Errorf("failed to resolve objects: %w", err) } @@ -249,7 +248,7 @@ func resolveRelatedResourceObjects(relatedOrigin, relatedDest syncSide, relRes s return objects, nil } -func resolveRelatedResourceOriginNamespaces(relatedOrigin, relatedDest syncSide, origin syncagentv1alpha1.RelatedResourceOrigin, spec syncagentv1alpha1.RelatedResourceObjectSpec) (map[string]string, error) { +func resolveRelatedResourceOriginNamespaces(ctx context.Context, relatedOrigin, relatedDest syncSide, origin syncagentv1alpha1.RelatedResourceOrigin, spec syncagentv1alpha1.RelatedResourceObjectSpec) (map[string]string, error) { switch { //nolint:staticcheck // .Reference is deprecated, but we still support it for now. case spec.Reference != nil: @@ -287,7 +286,7 @@ func resolveRelatedResourceOriginNamespaces(relatedOrigin, relatedDest syncSide, LabelSelector: selector, } - if err := relatedOrigin.client.List(relatedOrigin.ctx, namespaces, opts); err != nil { + if err := relatedOrigin.client.List(ctx, namespaces, opts); err != nil { return nil, fmt.Errorf("failed to evaluate label selector: %w", err) } @@ -324,11 +323,11 @@ func resolveRelatedResourceOriginNamespaces(relatedOrigin, relatedDest syncSide, } } -func resolveRelatedResourceObjectsInNamespaces(relatedOrigin, relatedDest syncSide, relRes syncagentv1alpha1.RelatedResourceSpec, spec syncagentv1alpha1.RelatedResourceObjectSpec, namespaceMap map[string]string) ([]resolvedObject, error) { +func resolveRelatedResourceObjectsInNamespaces(ctx context.Context, relatedOrigin, relatedDest syncSide, relRes syncagentv1alpha1.RelatedResourceSpec, spec syncagentv1alpha1.RelatedResourceObjectSpec, namespaceMap map[string]string) ([]resolvedObject, error) { result := []resolvedObject{} for originNamespace, destNamespace := range namespaceMap { - nameMap, err := resolveRelatedResourceObjectsInNamespace(relatedOrigin, relatedDest, relRes, spec, originNamespace) + nameMap, err := resolveRelatedResourceObjectsInNamespace(ctx, relatedOrigin, relatedDest, relRes, spec, originNamespace) if err != nil { return nil, fmt.Errorf("failed to find objects on origin side: %w", err) } @@ -338,7 +337,7 @@ func resolveRelatedResourceObjectsInNamespaces(relatedOrigin, relatedDest syncSi originObj.SetAPIVersion("v1") // we only support ConfigMaps and Secrets, both are in core/v1 originObj.SetKind(relRes.Kind) - err = relatedOrigin.client.Get(relatedOrigin.ctx, types.NamespacedName{Name: originName, Namespace: originNamespace}, originObj) + err = relatedOrigin.client.Get(ctx, types.NamespacedName{Name: originName, Namespace: originNamespace}, originObj) if err != nil { // this should rarely happen, only if an object was deleted in between the .List() call // above and the .Get() call here. @@ -362,7 +361,7 @@ func resolveRelatedResourceObjectsInNamespaces(relatedOrigin, relatedDest syncSi return result, nil } -func resolveRelatedResourceObjectsInNamespace(relatedOrigin, relatedDest syncSide, relRes syncagentv1alpha1.RelatedResourceSpec, spec syncagentv1alpha1.RelatedResourceObjectSpec, namespace string) (map[string]string, error) { +func resolveRelatedResourceObjectsInNamespace(ctx context.Context, relatedOrigin, relatedDest syncSide, relRes syncagentv1alpha1.RelatedResourceSpec, spec syncagentv1alpha1.RelatedResourceObjectSpec, namespace string) (map[string]string, error) { switch { //nolint:staticcheck case spec.Reference != nil: @@ -408,7 +407,7 @@ func resolveRelatedResourceObjectsInNamespace(relatedOrigin, relatedDest syncSid Namespace: namespace, } - if err := relatedOrigin.client.List(relatedOrigin.ctx, originObjects, opts); err != nil { + if err := relatedOrigin.client.List(ctx, originObjects, opts); err != nil { return nil, fmt.Errorf("failed to select origin objects based on label selector: %w", err) } diff --git a/internal/sync/syncer_related_test.go b/internal/sync/syncer_related_test.go index 1761644..92dcd24 100644 --- a/internal/sync/syncer_related_test.go +++ b/internal/sync/syncer_related_test.go @@ -60,19 +60,16 @@ func TestResolveRelatedResourceObjects(t *testing.T) { kcpClient := buildFakeClient(primaryObject) serviceClusterClient := buildFakeClient(primaryObjectCopy, dummySecret) - ctx := t.Context() // Now we configure origin/dest as if we're syncing a Secret up from the service cluster to kcp, // i.e. origin=service. originSide := syncSide{ - ctx: ctx, client: serviceClusterClient, object: primaryObjectCopy, } destSide := syncSide{ - ctx: ctx, client: kcpClient, object: primaryObject, // Since this is a just a regular kube client, we do not need to set clusterName/clusterPath. @@ -222,7 +219,7 @@ func TestResolveRelatedResourceObjects(t *testing.T) { Object: testcase.objectSpec, } - foundObjects, err := resolveRelatedResourceObjects(originSide, destSide, pubRes) + foundObjects, err := resolveRelatedResourceObjects(t.Context(), originSide, destSide, pubRes) if err != nil { t.Fatalf("Failed to resolve related objects: %v", err) } diff --git a/internal/sync/syncer_test.go b/internal/sync/syncer_test.go index e34dbbf..20275f7 100644 --- a/internal/sync/syncer_test.go +++ b/internal/sync/syncer_test.go @@ -38,7 +38,6 @@ import ( yamlutil "k8s.io/apimachinery/pkg/util/yaml" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/kontext" ) func buildFakeClient(objs ...*unstructured.Unstructured) ctrlruntimeclient.Client { @@ -907,9 +906,8 @@ func TestSyncerProcessingSingleResourceWithoutStatus(t *testing.T) { t.Fatalf("Failed to create syncer: %v", err) } - localCtx := t.Context() - remoteCtx := kontext.WithCluster(localCtx, clusterName) - ctx := NewContext(localCtx, remoteCtx) + ctx := t.Context() + cInfo := NewClusterInfo(clusterName) // setup a custom state backend that we can prime var backend *kubernetesBackend @@ -918,7 +916,7 @@ func TestSyncerProcessingSingleResourceWithoutStatus(t *testing.T) { if backend == nil { backend = newKubernetesBackend(stateNamespace, primaryObject, stateCluster) if testcase.existingState != "" { - if err := backend.Put(testcase.remoteObject, clusterName, []byte(testcase.existingState)); err != nil { + if err := backend.Put(ctx, testcase.remoteObject, clusterName, []byte(testcase.existingState)); err != nil { t.Fatalf("Failed to prime state store: %v", err) } } @@ -939,7 +937,7 @@ func TestSyncerProcessingSingleResourceWithoutStatus(t *testing.T) { t.Fatalf("Detected potential infinite loop, stopping after %d requeues.", i) } - requeue, err = syncer.Process(ctx, target) + requeue, err = syncer.Process(ctx, cInfo, target) if err != nil { break } @@ -948,7 +946,7 @@ func TestSyncerProcessingSingleResourceWithoutStatus(t *testing.T) { break } - if err = remoteClient.Get(remoteCtx, ctrlruntimeclient.ObjectKeyFromObject(target), target); err != nil { + if err = remoteClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(target), target); err != nil { // it's possible for the processing to have deleted the remote object, // so a NotFound is valid here if apierrors.IsNotFound(err) { @@ -959,15 +957,15 @@ func TestSyncerProcessingSingleResourceWithoutStatus(t *testing.T) { } } } else { - requeue, err = syncer.Process(ctx, testcase.remoteObject) + requeue, err = syncer.Process(ctx, cInfo, testcase.remoteObject) } - finalRemoteObject, getErr := getFinalObjectVersion(remoteCtx, remoteClient, testcase.remoteObject, testcase.expectedRemoteObject) + finalRemoteObject, getErr := getFinalObjectVersion(ctx, remoteClient, testcase.remoteObject, testcase.expectedRemoteObject) if getErr != nil { t.Fatalf("Failed to get final remote object: %v", getErr) } - finalLocalObject, getErr := getFinalObjectVersion(localCtx, localClient, testcase.localObject, testcase.expectedLocalObject) + finalLocalObject, getErr := getFinalObjectVersion(ctx, localClient, testcase.localObject, testcase.expectedLocalObject) if getErr != nil { t.Fatalf("Failed to get final local object: %v", getErr) } @@ -987,7 +985,7 @@ func TestSyncerProcessingSingleResourceWithoutStatus(t *testing.T) { t.Fatal("Cannot check object state, state store was never instantiated.") } - finalState, err := backend.Get(testcase.expectedRemoteObject, clusterName) + finalState, err := backend.Get(ctx, testcase.expectedRemoteObject, clusterName) if err != nil { t.Fatalf("Failed to get final state: %v", err) } else if !bytes.Equal(finalState, []byte(testcase.expectedState)) { @@ -1213,9 +1211,8 @@ func TestSyncerProcessingSingleResourceWithStatus(t *testing.T) { t.Fatalf("Failed to create syncer: %v", err) } - localCtx := t.Context() - remoteCtx := kontext.WithCluster(localCtx, clusterName) - ctx := NewContext(localCtx, remoteCtx) + ctx := t.Context() + cInfo := NewClusterInfo(clusterName) // setup a custom state backend that we can prime var backend *kubernetesBackend @@ -1224,7 +1221,7 @@ func TestSyncerProcessingSingleResourceWithStatus(t *testing.T) { if backend == nil { backend = newKubernetesBackend(stateNamespace, primaryObject, stateCluster) if testcase.existingState != "" { - if err := backend.Put(testcase.remoteObject, clusterName, []byte(testcase.existingState)); err != nil { + if err := backend.Put(ctx, testcase.remoteObject, clusterName, []byte(testcase.existingState)); err != nil { t.Fatalf("Failed to prime state store: %v", err) } } @@ -1245,7 +1242,7 @@ func TestSyncerProcessingSingleResourceWithStatus(t *testing.T) { t.Fatalf("Detected potential infinite loop, stopping after %d requeues.", i) } - requeue, err = syncer.Process(ctx, target) + requeue, err = syncer.Process(ctx, cInfo, target) if err != nil { break } @@ -1254,7 +1251,7 @@ func TestSyncerProcessingSingleResourceWithStatus(t *testing.T) { break } - if err = remoteClient.Get(remoteCtx, ctrlruntimeclient.ObjectKeyFromObject(target), target); err != nil { + if err = remoteClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(target), target); err != nil { // it's possible for the processing to have deleted the remote object, // so a NotFound is valid here if apierrors.IsNotFound(err) { @@ -1265,15 +1262,15 @@ func TestSyncerProcessingSingleResourceWithStatus(t *testing.T) { } } } else { - requeue, err = syncer.Process(ctx, testcase.remoteObject) + requeue, err = syncer.Process(ctx, cInfo, testcase.remoteObject) } - finalRemoteObject, getErr := getFinalObjectVersion(remoteCtx, remoteClient, testcase.remoteObject, testcase.expectedRemoteObject) + finalRemoteObject, getErr := getFinalObjectVersion(ctx, remoteClient, testcase.remoteObject, testcase.expectedRemoteObject) if getErr != nil { t.Fatalf("Failed to get final remote object: %v", getErr) } - finalLocalObject, getErr := getFinalObjectVersion(localCtx, localClient, testcase.localObject, testcase.expectedLocalObject) + finalLocalObject, getErr := getFinalObjectVersion(ctx, localClient, testcase.localObject, testcase.expectedLocalObject) if getErr != nil { t.Fatalf("Failed to get final local object: %v", getErr) } @@ -1293,7 +1290,7 @@ func TestSyncerProcessingSingleResourceWithStatus(t *testing.T) { t.Fatal("Cannot check object state, state store was never instantiated.") } - finalState, err := backend.Get(testcase.expectedRemoteObject, clusterName) + finalState, err := backend.Get(ctx, testcase.expectedRemoteObject, clusterName) if err != nil { t.Fatalf("Failed to get final state: %v", err) } else if !bytes.Equal(finalState, []byte(testcase.expectedState)) { diff --git a/sdk/applyconfiguration/internal/internal.go b/sdk/applyconfiguration/internal/internal.go index 1bcc700..86ad90a 100644 --- a/sdk/applyconfiguration/internal/internal.go +++ b/sdk/applyconfiguration/internal/internal.go @@ -19,8 +19,8 @@ limitations under the License. package internal import ( - "fmt" - "sync" + fmt "fmt" + sync "sync" typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) diff --git a/sdk/applyconfiguration/syncagent/v1alpha1/publishedresource.go b/sdk/applyconfiguration/syncagent/v1alpha1/publishedresource.go index 2e8e6dd..09270f6 100644 --- a/sdk/applyconfiguration/syncagent/v1alpha1/publishedresource.go +++ b/sdk/applyconfiguration/syncagent/v1alpha1/publishedresource.go @@ -47,7 +47,7 @@ func PublishedResource(name string) *PublishedResourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithKind(value string) *PublishedResourceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *PublishedResourceApplyConfiguration) WithKind(value string) *PublishedR // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithAPIVersion(value string) *PublishedResourceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *PublishedResourceApplyConfiguration) WithAPIVersion(value string) *Publ // If called multiple times, the Name field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithName(value string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *PublishedResourceApplyConfiguration) WithName(value string) *PublishedR // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithGenerateName(value string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *PublishedResourceApplyConfiguration) WithGenerateName(value string) *Pu // If called multiple times, the Namespace field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithNamespace(value string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *PublishedResourceApplyConfiguration) WithNamespace(value string) *Publi // If called multiple times, the UID field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithUID(value types.UID) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *PublishedResourceApplyConfiguration) WithUID(value types.UID) *Publishe // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithResourceVersion(value string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *PublishedResourceApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithGeneration(value int64) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *PublishedResourceApplyConfiguration) WithGeneration(value int64) *Publi // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *PublishedResourceApplyConfiguration) WithCreationTimestamp(value metav1 // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *PublishedResourceApplyConfiguration) WithDeletionTimestamp(value metav1 // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PublishedResourceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *PublishedResourceApplyConfiguration) WithDeletionGracePeriodSeconds(val // overwriting an existing map entries in Labels field with the same key. func (b *PublishedResourceApplyConfiguration) WithLabels(entries map[string]string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *PublishedResourceApplyConfiguration) WithLabels(entries map[string]stri // overwriting an existing map entries in Annotations field with the same key. func (b *PublishedResourceApplyConfiguration) WithAnnotations(entries map[string]string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *PublishedResourceApplyConfiguration) WithOwnerReferences(values ...*v1. if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *PublishedResourceApplyConfiguration) WithOwnerReferences(values ...*v1. func (b *PublishedResourceApplyConfiguration) WithFinalizers(values ...string) *PublishedResourceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -220,5 +220,5 @@ func (b *PublishedResourceApplyConfiguration) WithStatus(value *PublishedResourc // GetName retrieves the value of the Name field in the declarative configuration. func (b *PublishedResourceApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() - return b.Name + return b.ObjectMetaApplyConfiguration.Name } diff --git a/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobject.go b/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobject.go index 311205c..525b98a 100644 --- a/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobject.go +++ b/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobject.go @@ -35,7 +35,7 @@ func RelatedResourceObject() *RelatedResourceObjectApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. func (b *RelatedResourceObjectApplyConfiguration) WithSelector(value *RelatedResourceObjectSelectorApplyConfiguration) *RelatedResourceObjectApplyConfiguration { - b.Selector = value + b.RelatedResourceObjectSpecApplyConfiguration.Selector = value return b } @@ -43,7 +43,7 @@ func (b *RelatedResourceObjectApplyConfiguration) WithSelector(value *RelatedRes // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reference field is set to the value of the last call. func (b *RelatedResourceObjectApplyConfiguration) WithReference(value *RelatedResourceObjectReferenceApplyConfiguration) *RelatedResourceObjectApplyConfiguration { - b.Reference = value + b.RelatedResourceObjectSpecApplyConfiguration.Reference = value return b } @@ -51,7 +51,7 @@ func (b *RelatedResourceObjectApplyConfiguration) WithReference(value *RelatedRe // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Template field is set to the value of the last call. func (b *RelatedResourceObjectApplyConfiguration) WithTemplate(value *TemplateExpressionApplyConfiguration) *RelatedResourceObjectApplyConfiguration { - b.Template = value + b.RelatedResourceObjectSpecApplyConfiguration.Template = value return b } diff --git a/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobjectselector.go b/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobjectselector.go index 40f6231..55c0bda 100644 --- a/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobjectselector.go +++ b/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourceobjectselector.go @@ -40,11 +40,11 @@ func RelatedResourceObjectSelector() *RelatedResourceObjectSelectorApplyConfigur // If called multiple times, the entries provided by each call will be put on the MatchLabels field, // overwriting an existing map entries in MatchLabels field with the same key. func (b *RelatedResourceObjectSelectorApplyConfiguration) WithMatchLabels(entries map[string]string) *RelatedResourceObjectSelectorApplyConfiguration { - if b.MatchLabels == nil && len(entries) > 0 { - b.MatchLabels = make(map[string]string, len(entries)) + if b.LabelSelectorApplyConfiguration.MatchLabels == nil && len(entries) > 0 { + b.LabelSelectorApplyConfiguration.MatchLabels = make(map[string]string, len(entries)) } for k, v := range entries { - b.MatchLabels[k] = v + b.LabelSelectorApplyConfiguration.MatchLabels[k] = v } return b } @@ -57,7 +57,7 @@ func (b *RelatedResourceObjectSelectorApplyConfiguration) WithMatchExpressions(v if values[i] == nil { panic("nil value passed to WithMatchExpressions") } - b.MatchExpressions = append(b.MatchExpressions, *values[i]) + b.LabelSelectorApplyConfiguration.MatchExpressions = append(b.LabelSelectorApplyConfiguration.MatchExpressions, *values[i]) } return b } diff --git a/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourcespec.go b/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourcespec.go index 8ea51a6..644e3de 100644 --- a/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourcespec.go +++ b/sdk/applyconfiguration/syncagent/v1alpha1/relatedresourcespec.go @@ -19,14 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" ) // RelatedResourceSpecApplyConfiguration represents a declarative configuration of the RelatedResourceSpec type for use // with apply. type RelatedResourceSpecApplyConfiguration struct { Identifier *string `json:"identifier,omitempty"` - Origin *v1alpha1.RelatedResourceOrigin `json:"origin,omitempty"` + Origin *syncagentv1alpha1.RelatedResourceOrigin `json:"origin,omitempty"` Kind *string `json:"kind,omitempty"` Object *RelatedResourceObjectApplyConfiguration `json:"object,omitempty"` Mutation *ResourceMutationSpecApplyConfiguration `json:"mutation,omitempty"` @@ -49,7 +49,7 @@ func (b *RelatedResourceSpecApplyConfiguration) WithIdentifier(value string) *Re // WithOrigin sets the Origin field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Origin field is set to the value of the last call. -func (b *RelatedResourceSpecApplyConfiguration) WithOrigin(value v1alpha1.RelatedResourceOrigin) *RelatedResourceSpecApplyConfiguration { +func (b *RelatedResourceSpecApplyConfiguration) WithOrigin(value syncagentv1alpha1.RelatedResourceOrigin) *RelatedResourceSpecApplyConfiguration { b.Origin = &value return b } diff --git a/sdk/applyconfiguration/syncagent/v1alpha1/resourceprojection.go b/sdk/applyconfiguration/syncagent/v1alpha1/resourceprojection.go index 84e268a..c5ec55b 100644 --- a/sdk/applyconfiguration/syncagent/v1alpha1/resourceprojection.go +++ b/sdk/applyconfiguration/syncagent/v1alpha1/resourceprojection.go @@ -19,20 +19,20 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" ) // ResourceProjectionApplyConfiguration represents a declarative configuration of the ResourceProjection type for use // with apply. type ResourceProjectionApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Version *string `json:"version,omitempty"` - Versions map[string]string `json:"versions,omitempty"` - Scope *v1alpha1.ResourceScope `json:"scope,omitempty"` - Kind *string `json:"kind,omitempty"` - Plural *string `json:"plural,omitempty"` - ShortNames []string `json:"shortNames,omitempty"` - Categories []string `json:"categories,omitempty"` + Group *string `json:"group,omitempty"` + Version *string `json:"version,omitempty"` + Versions map[string]string `json:"versions,omitempty"` + Scope *syncagentv1alpha1.ResourceScope `json:"scope,omitempty"` + Kind *string `json:"kind,omitempty"` + Plural *string `json:"plural,omitempty"` + ShortNames []string `json:"shortNames,omitempty"` + Categories []string `json:"categories,omitempty"` } // ResourceProjectionApplyConfiguration constructs a declarative configuration of the ResourceProjection type for use with @@ -74,7 +74,7 @@ func (b *ResourceProjectionApplyConfiguration) WithVersions(entries map[string]s // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *ResourceProjectionApplyConfiguration) WithScope(value v1alpha1.ResourceScope) *ResourceProjectionApplyConfiguration { +func (b *ResourceProjectionApplyConfiguration) WithScope(value syncagentv1alpha1.ResourceScope) *ResourceProjectionApplyConfiguration { b.Scope = &value return b } diff --git a/sdk/clientset/versioned/clientset.go b/sdk/clientset/versioned/clientset.go index 3a4af51..ed65d3b 100644 --- a/sdk/clientset/versioned/clientset.go +++ b/sdk/clientset/versioned/clientset.go @@ -19,8 +19,8 @@ limitations under the License. package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" diff --git a/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_publishedresource.go b/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_publishedresource.go index 6ad8915..9f09073 100644 --- a/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_publishedresource.go +++ b/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_publishedresource.go @@ -19,121 +19,35 @@ limitations under the License. package fake import ( - "context" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + gentype "k8s.io/client-go/gentype" v1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/clientset/versioned/typed/syncagent/v1alpha1" ) -// FakePublishedResources implements PublishedResourceInterface -type FakePublishedResources struct { +// fakePublishedResources implements PublishedResourceInterface +type fakePublishedResources struct { + *gentype.FakeClientWithList[*v1alpha1.PublishedResource, *v1alpha1.PublishedResourceList] Fake *FakeSyncagentV1alpha1 } -var publishedresourcesResource = v1alpha1.SchemeGroupVersion.WithResource("publishedresources") - -var publishedresourcesKind = v1alpha1.SchemeGroupVersion.WithKind("PublishedResource") - -// Get takes name of the publishedResource, and returns the corresponding publishedResource object, and an error if there is any. -func (c *FakePublishedResources) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PublishedResource, err error) { - emptyResult := &v1alpha1.PublishedResource{} - obj, err := c.Fake. - Invokes(testing.NewRootGetActionWithOptions(publishedresourcesResource, name, options), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PublishedResource), err -} - -// List takes label and field selectors, and returns the list of PublishedResources that match those selectors. -func (c *FakePublishedResources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PublishedResourceList, err error) { - emptyResult := &v1alpha1.PublishedResourceList{} - obj, err := c.Fake. - Invokes(testing.NewRootListActionWithOptions(publishedresourcesResource, publishedresourcesKind, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PublishedResourceList{ListMeta: obj.(*v1alpha1.PublishedResourceList).ListMeta} - for _, item := range obj.(*v1alpha1.PublishedResourceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested publishedResources. -func (c *FakePublishedResources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchActionWithOptions(publishedresourcesResource, opts)) -} - -// Create takes the representation of a publishedResource and creates it. Returns the server's representation of the publishedResource, and an error, if there is any. -func (c *FakePublishedResources) Create(ctx context.Context, publishedResource *v1alpha1.PublishedResource, opts v1.CreateOptions) (result *v1alpha1.PublishedResource, err error) { - emptyResult := &v1alpha1.PublishedResource{} - obj, err := c.Fake. - Invokes(testing.NewRootCreateActionWithOptions(publishedresourcesResource, publishedResource, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PublishedResource), err -} - -// Update takes the representation of a publishedResource and updates it. Returns the server's representation of the publishedResource, and an error, if there is any. -func (c *FakePublishedResources) Update(ctx context.Context, publishedResource *v1alpha1.PublishedResource, opts v1.UpdateOptions) (result *v1alpha1.PublishedResource, err error) { - emptyResult := &v1alpha1.PublishedResource{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateActionWithOptions(publishedresourcesResource, publishedResource, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PublishedResource), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePublishedResources) UpdateStatus(ctx context.Context, publishedResource *v1alpha1.PublishedResource, opts v1.UpdateOptions) (result *v1alpha1.PublishedResource, err error) { - emptyResult := &v1alpha1.PublishedResource{} - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceActionWithOptions(publishedresourcesResource, "status", publishedResource, opts), emptyResult) - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha1.PublishedResource), err -} - -// Delete takes name of the publishedResource and deletes it. Returns an error if one occurs. -func (c *FakePublishedResources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(publishedresourcesResource, name, opts), &v1alpha1.PublishedResource{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePublishedResources) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionActionWithOptions(publishedresourcesResource, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PublishedResourceList{}) - return err -} - -// Patch applies the patch and returns the patched publishedResource. -func (c *FakePublishedResources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PublishedResource, err error) { - emptyResult := &v1alpha1.PublishedResource{} - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceActionWithOptions(publishedresourcesResource, name, pt, data, opts, subresources...), emptyResult) - if obj == nil { - return emptyResult, err +func newFakePublishedResources(fake *FakeSyncagentV1alpha1) syncagentv1alpha1.PublishedResourceInterface { + return &fakePublishedResources{ + gentype.NewFakeClientWithList[*v1alpha1.PublishedResource, *v1alpha1.PublishedResourceList]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("publishedresources"), + v1alpha1.SchemeGroupVersion.WithKind("PublishedResource"), + func() *v1alpha1.PublishedResource { return &v1alpha1.PublishedResource{} }, + func() *v1alpha1.PublishedResourceList { return &v1alpha1.PublishedResourceList{} }, + func(dst, src *v1alpha1.PublishedResourceList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.PublishedResourceList) []*v1alpha1.PublishedResource { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.PublishedResourceList, items []*v1alpha1.PublishedResource) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.PublishedResource), err } diff --git a/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_syncagent_client.go b/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_syncagent_client.go index d1a552c..d7688ee 100644 --- a/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_syncagent_client.go +++ b/sdk/clientset/versioned/typed/syncagent/v1alpha1/fake/fake_syncagent_client.go @@ -30,7 +30,7 @@ type FakeSyncagentV1alpha1 struct { } func (c *FakeSyncagentV1alpha1) PublishedResources() v1alpha1.PublishedResourceInterface { - return &FakePublishedResources{c} + return newFakePublishedResources(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/sdk/clientset/versioned/typed/syncagent/v1alpha1/publishedresource.go b/sdk/clientset/versioned/typed/syncagent/v1alpha1/publishedresource.go index 93f799c..77c79bb 100644 --- a/sdk/clientset/versioned/typed/syncagent/v1alpha1/publishedresource.go +++ b/sdk/clientset/versioned/typed/syncagent/v1alpha1/publishedresource.go @@ -19,14 +19,14 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" gentype "k8s.io/client-go/gentype" - v1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" scheme "github.com/kcp-dev/api-syncagent/sdk/clientset/versioned/scheme" ) @@ -38,33 +38,34 @@ type PublishedResourcesGetter interface { // PublishedResourceInterface has methods to work with PublishedResource resources. type PublishedResourceInterface interface { - Create(ctx context.Context, publishedResource *v1alpha1.PublishedResource, opts v1.CreateOptions) (*v1alpha1.PublishedResource, error) - Update(ctx context.Context, publishedResource *v1alpha1.PublishedResource, opts v1.UpdateOptions) (*v1alpha1.PublishedResource, error) + Create(ctx context.Context, publishedResource *syncagentv1alpha1.PublishedResource, opts v1.CreateOptions) (*syncagentv1alpha1.PublishedResource, error) + Update(ctx context.Context, publishedResource *syncagentv1alpha1.PublishedResource, opts v1.UpdateOptions) (*syncagentv1alpha1.PublishedResource, error) // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, publishedResource *v1alpha1.PublishedResource, opts v1.UpdateOptions) (*v1alpha1.PublishedResource, error) + UpdateStatus(ctx context.Context, publishedResource *syncagentv1alpha1.PublishedResource, opts v1.UpdateOptions) (*syncagentv1alpha1.PublishedResource, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PublishedResource, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PublishedResourceList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*syncagentv1alpha1.PublishedResource, error) + List(ctx context.Context, opts v1.ListOptions) (*syncagentv1alpha1.PublishedResourceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PublishedResource, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *syncagentv1alpha1.PublishedResource, err error) PublishedResourceExpansion } // publishedResources implements PublishedResourceInterface type publishedResources struct { - *gentype.ClientWithList[*v1alpha1.PublishedResource, *v1alpha1.PublishedResourceList] + *gentype.ClientWithList[*syncagentv1alpha1.PublishedResource, *syncagentv1alpha1.PublishedResourceList] } // newPublishedResources returns a PublishedResources func newPublishedResources(c *SyncagentV1alpha1Client) *publishedResources { return &publishedResources{ - gentype.NewClientWithList[*v1alpha1.PublishedResource, *v1alpha1.PublishedResourceList]( + gentype.NewClientWithList[*syncagentv1alpha1.PublishedResource, *syncagentv1alpha1.PublishedResourceList]( "publishedresources", c.RESTClient(), scheme.ParameterCodec, "", - func() *v1alpha1.PublishedResource { return &v1alpha1.PublishedResource{} }, - func() *v1alpha1.PublishedResourceList { return &v1alpha1.PublishedResourceList{} }), + func() *syncagentv1alpha1.PublishedResource { return &syncagentv1alpha1.PublishedResource{} }, + func() *syncagentv1alpha1.PublishedResourceList { return &syncagentv1alpha1.PublishedResourceList{} }, + ), } } diff --git a/sdk/clientset/versioned/typed/syncagent/v1alpha1/syncagent_client.go b/sdk/clientset/versioned/typed/syncagent/v1alpha1/syncagent_client.go index 2d94a8f..c016500 100644 --- a/sdk/clientset/versioned/typed/syncagent/v1alpha1/syncagent_client.go +++ b/sdk/clientset/versioned/typed/syncagent/v1alpha1/syncagent_client.go @@ -19,12 +19,12 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" rest "k8s.io/client-go/rest" - v1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" - "github.com/kcp-dev/api-syncagent/sdk/clientset/versioned/scheme" + syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + scheme "github.com/kcp-dev/api-syncagent/sdk/clientset/versioned/scheme" ) type SyncagentV1alpha1Interface interface { @@ -86,10 +86,10 @@ func New(c rest.Interface) *SyncagentV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := syncagentv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/test/e2e/sync/primary_test.go b/test/e2e/sync/primary_test.go index 0958f19..8d643c5 100644 --- a/test/e2e/sync/primary_test.go +++ b/test/e2e/sync/primary_test.go @@ -43,7 +43,6 @@ import ( yamlutil "k8s.io/apimachinery/pkg/util/yaml" ctrlruntime "sigs.k8s.io/controller-runtime" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/kontext" ) func TestSyncSimpleObject(t *testing.T) { @@ -95,9 +94,12 @@ func TestSyncSimpleObject(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:team-1", orgWorkspace))) - kcpClient := utils.GetKcpAdminClusterClient(t) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, schema.GroupVersionResource{ + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ Group: kcpGroupName, Version: "v1", Resource: "crontabs", @@ -116,7 +118,7 @@ spec: image: ubuntu:latest `) - if err := kcpClient.Create(teamCtx, crontab); err != nil { + if err := teamClient.Create(ctx, crontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -185,9 +187,12 @@ func TestSyncSimpleObjectOldNaming(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:team-1", orgWorkspace))) - kcpClient := utils.GetKcpAdminClusterClient(t) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, schema.GroupVersionResource{ + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ Group: kcpGroupName, Version: "v1", Resource: "crontabs", @@ -206,7 +211,7 @@ spec: image: ubuntu:latest `) - if err := kcpClient.Create(teamCtx, crontab); err != nil { + if err := teamClient.Create(ctx, crontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -269,7 +274,8 @@ func TestSyncWithDefaultNamingRules(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - kcpClient := utils.GetKcpAdminClusterClient(t) + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + crontabsGVR := schema.GroupVersionResource{ Group: "kcp.example.com", Version: "v1", @@ -291,10 +297,12 @@ spec: t.Log("Creating CronTabs in kcp…") for _, team := range []string{"team-1", "team-2"} { - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:%s", orgWorkspace, team))) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, crontabsGVR) + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join(team) + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, crontabsGVR) - if err := kcpClient.Create(teamCtx, yamlToUnstructured(t, crontabYAML)); err != nil { + if err := teamClient.Create(ctx, yamlToUnstructured(t, crontabYAML)); err != nil { t.Fatalf("Failed to create %s's CronTab in kcp: %v", team, err) } } @@ -369,9 +377,12 @@ func TestLocalChangesAreKept(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:team-1", orgWorkspace))) - kcpClient := utils.GetKcpAdminClusterClient(t) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, schema.GroupVersionResource{ + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ Group: kcpGroupName, Version: "v1", Resource: "crontabs", @@ -390,7 +401,7 @@ spec: image: ubuntu:latest `) - if err := kcpClient.Create(teamCtx, crontab); err != nil { + if err := teamClient.Create(ctx, crontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -423,7 +434,7 @@ spec: // make some changes in kcp, these should be applied to the local object without overwriting the cronSpec // refresh the current object state - if err := kcpClient.Get(teamCtx, ctrlruntimeclient.ObjectKeyFromObject(crontab), crontab); err != nil { + if err := teamClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(crontab), crontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -431,7 +442,7 @@ spec: unstructured.SetNestedField(crontab.Object, newImage, "spec", "image") t.Logf("Modifying object in kcp…") - if err := kcpClient.Update(teamCtx, crontab); err != nil { + if err := teamClient.Update(ctx, crontab); err != nil { t.Fatalf("Failed to update source object in kcp: %v", err) } @@ -473,14 +484,14 @@ spec: // Now we actually change the cronSpec in kcp, and this change _must_ make it to the service cluster. t.Logf("Modify object in kcp again…") - if err := kcpClient.Get(teamCtx, ctrlruntimeclient.ObjectKeyFromObject(crontab), crontab); err != nil { + if err := teamClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(crontab), crontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } kcpNewCronSpec := "users-new-desired-cronspec" unstructured.SetNestedField(crontab.Object, kcpNewCronSpec, "spec", "cronSpec") - if err := kcpClient.Update(teamCtx, crontab); err != nil { + if err := teamClient.Update(ctx, crontab); err != nil { t.Fatalf("Failed to update source object in kcp: %v", err) } @@ -582,9 +593,12 @@ func TestResourceFilter(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:team-1", orgWorkspace))) - kcpClient := utils.GetKcpAdminClusterClient(t) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, schema.GroupVersionResource{ + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ Group: kcpGroupName, Version: "v1", Resource: "crontabs", @@ -602,7 +616,7 @@ spec: image: ubuntu:latest `) - if err := kcpClient.Create(teamCtx, ignoredCrontab); err != nil { + if err := teamClient.Create(ctx, ignoredCrontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -618,7 +632,7 @@ spec: image: debian:12 `) - if err := kcpClient.Create(teamCtx, includedCrontab); err != nil { + if err := teamClient.Create(ctx, includedCrontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -696,9 +710,12 @@ func TestSyncingOverlyLongNames(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:team-1", orgWorkspace))) - kcpClient := utils.GetKcpAdminClusterClient(t) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, schema.GroupVersionResource{ + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ Group: kcpGroupName, Version: "v1", Resource: "crontabs", @@ -708,7 +725,7 @@ func TestSyncingOverlyLongNames(t *testing.T) { namespace := &corev1.Namespace{} namespace.Name = strings.Repeat("yadda", 3) // 250 chars in total - if err := kcpClient.Create(teamCtx, namespace); err != nil { + if err := teamClient.Create(ctx, namespace); err != nil { t.Fatalf("Failed to create namespace in kcp: %v", err) } @@ -725,7 +742,7 @@ spec: ignoredCrontab.SetNamespace(namespace.Name) ignoredCrontab.SetName(strings.Repeat("yotta", 50)) - if err := kcpClient.Create(teamCtx, ignoredCrontab); err != nil { + if err := teamClient.Create(ctx, ignoredCrontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } diff --git a/test/e2e/sync/related_test.go b/test/e2e/sync/related_test.go index fe59a34..2bdcfc7 100644 --- a/test/e2e/sync/related_test.go +++ b/test/e2e/sync/related_test.go @@ -20,7 +20,6 @@ package sync import ( "context" - "fmt" "maps" "strings" "testing" @@ -41,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ctrlruntime "sigs.k8s.io/controller-runtime" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/kontext" ) func TestSyncRelatedObjects(t *testing.T) { @@ -53,7 +51,7 @@ func TestSyncRelatedObjects(t *testing.T) { // the name of this testcase name string //the org workspace everything should happen in - workspace logicalcluster.Name + workspace string // the configuration for the related resource relatedConfig syncagentv1alpha1.RelatedResourceSpec // the primary object created by the user in kcp @@ -516,9 +514,12 @@ func TestSyncRelatedObjects(t *testing.T) { utils.RunAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) // wait until the API is available - teamCtx := kontext.WithCluster(ctx, logicalcluster.Name(fmt.Sprintf("root:%s:team-1", testcase.workspace))) - kcpClient := utils.GetKcpAdminClusterClient(t) - utils.WaitForBoundAPI(t, teamCtx, kcpClient, schema.GroupVersionResource{ + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(testcase.workspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ Group: apiExportName, Version: "v1", Resource: "crontabs", @@ -531,7 +532,7 @@ func TestSyncRelatedObjects(t *testing.T) { crontab.SetAPIVersion("kcp.example.com/v1") crontab.SetKind("CronTab") - if err := kcpClient.Create(teamCtx, crontab); err != nil { + if err := teamClient.Create(ctx, crontab); err != nil { t.Fatalf("Failed to create CronTab in kcp: %v", err) } @@ -539,25 +540,22 @@ func TestSyncRelatedObjects(t *testing.T) { t.Logf("Creating credential Secret on the %s side…", testcase.relatedConfig.Origin) originClient := envtestClient - originContext := ctx - destClient := kcpClient - destContext := teamCtx + destClient := teamClient if testcase.relatedConfig.Origin == "kcp" { originClient, destClient = destClient, originClient - originContext, destContext = destContext, originContext } - ensureNamespace(t, originContext, originClient, testcase.sourceRelatedObject.Namespace) + ensureNamespace(t, ctx, originClient, testcase.sourceRelatedObject.Namespace) - if err := originClient.Create(originContext, &testcase.sourceRelatedObject); err != nil { + if err := originClient.Create(ctx, &testcase.sourceRelatedObject); err != nil { t.Fatalf("Failed to create Secret: %v", err) } // wait for the agent to do its magic t.Log("Wait for Secret to be synced…") copySecret := &corev1.Secret{} - err := wait.PollUntilContextTimeout(destContext, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { + err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { copyKey := ctrlruntimeclient.ObjectKeyFromObject(&testcase.expectedSyncedRelatedObject) return destClient.Get(ctx, copyKey, copySecret) == nil, nil }) diff --git a/test/utils/fixtures.go b/test/utils/fixtures.go index 4cc6cf2..9f264b5 100644 --- a/test/utils/fixtures.go +++ b/test/utils/fixtures.go @@ -39,29 +39,29 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/yaml" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/kontext" ) func CreateOrganization( t *testing.T, ctx context.Context, - workspaceName logicalcluster.Name, + workspaceName string, apiExportName string, ) string { t.Helper() - kcpClient := GetKcpAdminClusterClient(t) + kcpClusterClient := GetKcpAdminClusterClient(t) agent := rbacv1.Subject{ Kind: "User", Name: "api-syncagent-e2e", } // setup workspaces - orgClusterName := CreateWorkspace(t, ctx, kcpClient, "root", workspaceName) + clusterPath := logicalcluster.NewPath("root") + orgClusterName := CreateWorkspace(t, ctx, kcpClusterClient.Cluster(clusterPath), workspaceName) // grant access and allow the agent to resolve its own workspace path - homeCtx := kontext.WithCluster(ctx, orgClusterName) - GrantWorkspaceAccess(t, homeCtx, kcpClient, string(workspaceName), agent, rbacv1.PolicyRule{ + orgClient := kcpClusterClient.Cluster(clusterPath.Join(workspaceName)) + GrantWorkspaceAccess(t, ctx, orgClient, agent, rbacv1.PolicyRule{ APIGroups: []string{"core.kcp.io"}, Resources: []string{"logicalclusters"}, ResourceNames: []string{"cluster"}, @@ -70,34 +70,31 @@ func CreateOrganization( // add some consumer workspaces teamClusters := []logicalcluster.Name{ - CreateWorkspace(t, ctx, kcpClient, orgClusterName, "team-1"), - CreateWorkspace(t, ctx, kcpClient, orgClusterName, "team-2"), + CreateWorkspace(t, ctx, orgClient, "team-1"), + CreateWorkspace(t, ctx, orgClient, "team-2"), } // setup the APIExport and wait for it to be ready - apiExport := CreateAPIExport(t, homeCtx, kcpClient, apiExportName, &agent) + apiExport := CreateAPIExport(t, ctx, orgClient, apiExportName, &agent) // bind it in all team workspaces, so the virtual workspace is ready inside kcp for _, teamCluster := range teamClusters { - teamCtx := kontext.WithCluster(ctx, teamCluster) - BindToAPIExport(t, teamCtx, kcpClient, apiExport) + BindToAPIExport(t, ctx, kcpClusterClient.Cluster(teamCluster.Path()), apiExport) } return CreateKcpAgentKubeconfig(t, fmt.Sprintf("/clusters/%s", orgClusterName)) } -func CreateWorkspace(t *testing.T, ctx context.Context, client ctrlruntimeclient.Client, parent logicalcluster.Name, workspaceName logicalcluster.Name) logicalcluster.Name { +func CreateWorkspace(t *testing.T, ctx context.Context, client ctrlruntimeclient.Client, workspaceName string) logicalcluster.Name { t.Helper() testWs := &kcptenancyv1alpha1.Workspace{ ObjectMeta: metav1.ObjectMeta{ - Name: workspaceName.String(), + Name: workspaceName, }, } - ctx = kontext.WithCluster(ctx, parent) - - t.Logf("Creating workspace %s:%s…", parent, workspaceName) + t.Logf("Creating workspace %s…", workspaceName) if err := client.Create(ctx, testWs); err != nil { t.Fatalf("Failed to create %q workspace: %v", workspaceName, err) } @@ -196,7 +193,7 @@ func CreateAPIExport(t *testing.T, ctx context.Context, client ctrlruntimeclient return apiExport } -func GrantWorkspaceAccess(t *testing.T, ctx context.Context, client ctrlruntimeclient.Client, workspaceName string, rbacSubject rbacv1.Subject, extraRules ...rbacv1.PolicyRule) { +func GrantWorkspaceAccess(t *testing.T, ctx context.Context, client ctrlruntimeclient.Client, rbacSubject rbacv1.Subject, extraRules ...rbacv1.PolicyRule) { t.Helper() clusterRoleName := fmt.Sprintf("access-workspace:%s", strings.ToLower(rbacSubject.Name)) diff --git a/test/utils/utils.go b/test/utils/utils.go index f48a7f1..edc9836 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -27,6 +27,7 @@ import ( kcpapisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" kcptenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" + mcclient "github.com/kcp-dev/multicluster-provider/client" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -37,7 +38,6 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/kcp" ) func GetKcpAdminKubeconfig(t *testing.T) string { @@ -69,12 +69,12 @@ func newScheme(t *testing.T) *runtime.Scheme { var clusterPathSuffix = regexp.MustCompile(`/clusters/[a-z0-9:*]+$`) -func GetKcpAdminClusterClient(t *testing.T) ctrlruntimeclient.Client { +func GetKcpAdminClusterClient(t *testing.T) mcclient.ClusterClient { t.Helper() return GetClusterClient(t, GetKcpAdminKubeconfig(t)) } -func GetClusterClient(t *testing.T, kubeconfig string) ctrlruntimeclient.Client { +func GetClusterClient(t *testing.T, kubeconfig string) mcclient.ClusterClient { t.Helper() config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) @@ -86,7 +86,7 @@ func GetClusterClient(t *testing.T, kubeconfig string) ctrlruntimeclient.Client // to point to the base URL (either of kcp or a virtual workspace) config.Host = clusterPathSuffix.ReplaceAllLiteralString(config.Host, "") - client, err := kcp.NewClusterAwareClient(config, ctrlruntimeclient.Options{ + client, err := mcclient.New(config, ctrlruntimeclient.Options{ Scheme: newScheme(t), }) if err != nil {