Skip to content

Commit c238f41

Browse files
committed
Add a new load-balancer-status flag for setting ingress details
Signed-off-by: Haitao Li <[email protected]>
1 parent cc248dc commit c238f41

File tree

14 files changed

+414
-108
lines changed

14 files changed

+414
-108
lines changed

apis/projectcontour/v1alpha1/contourconfig.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -239,11 +239,15 @@ type EnvoyConfig struct {
239239
// +optional
240240
Service *NamespacedName `json:"service,omitempty"`
241241

242-
// Ingress holds Envoy service parameters for setting Ingress status.
242+
// LoadBalancer specifies how Contour should set the ingress status address.
243+
// If provided, the value can be in one of the formats:
244+
// - address:<address,...>: Contour will use the provided comma separated list of addresses directly. The address can be a fully qualified domain name or an IP address.
245+
// - service:<namespace>/<name>: Contour will use the address of the designated service.
246+
// - ingress:<namespace>/<name>: Contour will use the address of the designated ingress.
243247
//
244-
// Contour's default is { namespace: "projectcontour", name: "envoy" }.
248+
// Contour's default is an empty string.
245249
// +optional
246-
Ingress *NamespacedName `json:"ingress,omitempty"`
250+
LoadBalancer string `json:"loadBalancer,omitempty"`
247251

248252
// Defines the HTTP Listener for Envoy.
249253
//

cmd/contour/serve.go

Lines changed: 115 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"net/http"
2121
"os"
2222
"strconv"
23+
"strings"
2324
"time"
2425

2526
"github.com/alecthomas/kingpin/v2"
@@ -146,8 +147,6 @@ func registerServe(app *kingpin.Application) (*kingpin.CmdClause, *serveContext)
146147
serve.Flag("envoy-service-https-port", "Kubernetes Service port for HTTPS requests.").PlaceHolder("<port>").IntVar(&ctx.httpsPort)
147148
serve.Flag("envoy-service-name", "Name of the Envoy service to inspect for Ingress status details.").PlaceHolder("<name>").StringVar(&ctx.Config.EnvoyServiceName)
148149
serve.Flag("envoy-service-namespace", "Envoy Service Namespace.").PlaceHolder("<namespace>").StringVar(&ctx.Config.EnvoyServiceNamespace)
149-
serve.Flag("envoy-ingress-name", "Name of the Envoy ingress to inspect for Ingress status details.").PlaceHolder("<name>").StringVar(&ctx.Config.EnvoyIngressName)
150-
serve.Flag("envoy-ingress-namespace", "Envoy Ingress Namespace.").PlaceHolder("<namespace>").StringVar(&ctx.Config.EnvoyIngressNamespace)
151150

152151
serve.Flag("health-address", "Address the health HTTP endpoint will bind to.").PlaceHolder("<ipaddr>").StringVar(&ctx.healthAddr)
153152
serve.Flag("health-port", "Port the health HTTP endpoint will bind to.").PlaceHolder("<port>").IntVar(&ctx.healthPort)
@@ -170,6 +169,8 @@ func registerServe(app *kingpin.Application) (*kingpin.CmdClause, *serveContext)
170169
serve.Flag("leader-election-resource-namespace", "The namespace of the resource (Lease) leader election will lease.").Default(config.GetenvOr("CONTOUR_NAMESPACE", "projectcontour")).StringVar(&ctx.LeaderElection.Namespace)
171170
serve.Flag("leader-election-retry-period", "The interval which Contour will attempt to acquire leadership lease.").Default("2s").DurationVar(&ctx.LeaderElection.RetryPeriod)
172171

172+
serve.Flag("load-balancer-status", "Address to set or the source to inspect for ingress status.").PlaceHolder("<kind:namespace/name|address>").StringVar(&ctx.Config.LoadBalancerStatus)
173+
173174
serve.Flag("root-namespaces", "Restrict contour to searching these namespaces for root ingress routes.").PlaceHolder("<ns,ns>").StringVar(&ctx.rootNamespaces)
174175

175176
serve.Flag("stats-address", "Envoy /stats interface address.").PlaceHolder("<ipaddr>").StringVar(&ctx.statsAddr)
@@ -675,13 +676,46 @@ func (s *Server) doServe() error {
675676
}
676677

677678
// Set up ingress load balancer status writer.
679+
if err := s.setupIngressLoadBalancerStatusWriter(contourConfiguration, ingressClassNames, gatewayRef, sh.Writer()); err != nil {
680+
return err
681+
}
682+
683+
xdsServer := &xdsServer{
684+
log: s.log,
685+
registry: s.registry,
686+
config: *contourConfiguration.XDSServer,
687+
snapshotHandler: snapshotHandler,
688+
resources: resources,
689+
initialDagBuilt: contourHandler.HasBuiltInitialDag,
690+
}
691+
if err := s.mgr.Add(xdsServer); err != nil {
692+
return err
693+
}
694+
695+
notifier := &leadership.Notifier{
696+
ToNotify: []leadership.NeedLeaderElectionNotification{contourHandler, observer},
697+
}
698+
if err := s.mgr.Add(notifier); err != nil {
699+
return err
700+
}
701+
702+
// GO!
703+
return s.mgr.Start(signals.SetupSignalHandler())
704+
}
705+
706+
func (s *Server) setupIngressLoadBalancerStatusWriter(
707+
contourConfiguration contour_v1alpha1.ContourConfigurationSpec,
708+
ingressClassNames []string,
709+
gatewayRef *types.NamespacedName,
710+
statusUpdater k8s.StatusUpdater,
711+
) error {
678712
lbsw := &loadBalancerStatusWriter{
679713
log: s.log.WithField("context", "loadBalancerStatusWriter"),
680714
cache: s.mgr.GetCache(),
681715
lbStatus: make(chan core_v1.LoadBalancerStatus, 1),
682716
ingressClassNames: ingressClassNames,
683717
gatewayRef: gatewayRef,
684-
statusUpdater: sh.Writer(),
718+
statusUpdater: statusUpdater,
685719
statusAddress: contourConfiguration.Ingress.StatusAddress,
686720
serviceName: contourConfiguration.Envoy.Service.Name,
687721
serviceNamespace: contourConfiguration.Envoy.Service.Namespace,
@@ -690,71 +724,113 @@ func (s *Server) doServe() error {
690724
return err
691725
}
692726

693-
// Register an informer to watch envoy's service if we haven't been given static details.
727+
elbs := &envoyLoadBalancerStatus{}
694728
if lbAddress := contourConfiguration.Ingress.StatusAddress; len(lbAddress) > 0 {
695-
s.log.WithField("loadbalancer-address", lbAddress).Info("Using supplied information for Ingress status")
696-
lbsw.lbStatus <- parseStatusFlag(lbAddress)
729+
elbs.Kind = "hostname"
730+
elbs.FQDNs = lbAddress
731+
} else if contourConfiguration.Envoy.LoadBalancer != "" {
732+
status, err := parseEnvoyLoadBalancerStatus(contourConfiguration.Envoy.LoadBalancer)
733+
if err != nil {
734+
return err
735+
}
736+
elbs = status
697737
} else {
738+
elbs.Kind = "service"
739+
elbs.Namespace = contourConfiguration.Envoy.Service.Namespace
740+
elbs.Name = contourConfiguration.Envoy.Service.Name
741+
}
742+
switch strings.ToLower(elbs.Kind) {
743+
case "hostname":
744+
s.log.WithField("loadbalancer-fqdns", lbAddress).Info("Using supplied hostname for Ingress status")
745+
lbsw.lbStatus <- parseStatusFlag(elbs.FQDNs)
746+
case "service":
747+
// Register an informer to watch supplied service
698748
serviceHandler := &k8s.ServiceStatusLoadBalancerWatcher{
699-
ServiceName: contourConfiguration.Envoy.Service.Name,
749+
ServiceName: elbs.Name,
700750
LBStatus: lbsw.lbStatus,
701751
Log: s.log.WithField("context", "serviceStatusLoadBalancerWatcher"),
702752
}
703753

704754
var handler cache.ResourceEventHandler = serviceHandler
705-
if contourConfiguration.Envoy.Service.Namespace != "" {
706-
handler = k8s.NewNamespaceFilter([]string{contourConfiguration.Envoy.Service.Namespace}, handler)
755+
if elbs.Namespace != "" {
756+
handler = k8s.NewNamespaceFilter([]string{elbs.Namespace}, handler)
707757
}
708758

709-
if err := s.informOnResource(&corev1.Service{}, handler); err != nil {
710-
s.log.WithError(err).WithField("resource", "services").Fatal("failed to create informer")
759+
if err := s.informOnResource(&core_v1.Service{}, handler); err != nil {
760+
s.log.WithError(err).WithField("resource", "services").Fatal("failed to create services informer")
711761
}
712-
762+
s.log.Infof("Watching %s for Ingress status", elbs)
763+
case "ingress":
764+
// Register an informer to watch supplied ingress
713765
ingressHandler := &k8s.IngressStatusLoadBalancerWatcher{
714-
ServiceName: contourConfiguration.Envoy.Service.Name,
766+
IngressName: elbs.Name,
715767
LBStatus: lbsw.lbStatus,
716768
Log: s.log.WithField("context", "ingressStatusLoadBalancerWatcher"),
717769
}
718770

719-
var ingressEventHandler cache.ResourceEventHandler = ingressHandler
720-
if contourConfiguration.Envoy.Ingress.Namespace != "" {
721-
handler = k8s.NewNamespaceFilter([]string{contourConfiguration.Envoy.Ingress.Namespace}, handler)
771+
var handler cache.ResourceEventHandler = ingressHandler
772+
if elbs.Namespace != "" {
773+
handler = k8s.NewNamespaceFilter([]string{elbs.Namespace}, handler)
722774
}
723775

724-
if err := informOnResource(&networking_v1.Ingress{}, ingressEventHandler, s.mgr.GetCache()); err != nil {
776+
if err := s.informOnResource(&networking_v1.Ingress{}, handler); err != nil {
725777
s.log.WithError(err).WithField("resource", "ingresses").Fatal("failed to create ingresses informer")
726778
}
779+
s.log.Infof("Watching %s for Ingress status", elbs)
780+
default:
781+
return fmt.Errorf("unsupported ingress kind: %s", elbs.Kind)
782+
}
727783

728-
s.log.WithField("envoy-service-name", contourConfiguration.Envoy.Service.Name).
729-
WithField("envoy-service-namespace", contourConfiguration.Envoy.Service.Namespace).
730-
Info("Watching Service for Ingress status")
784+
return nil
785+
}
731786

732-
s.log.WithField("envoy-ingress-name", contourConfiguration.Envoy.Ingress.Name).
733-
WithField("envoy-ingress-namespace", contourConfiguration.Envoy.Ingress.Namespace).
734-
Info("Watching Ingress for Ingress status")
735-
}
787+
type envoyLoadBalancerStatus struct {
788+
Kind string
789+
FQDNs string
790+
config.NamespacedName
791+
}
736792

737-
xdsServer := &xdsServer{
738-
log: s.log,
739-
registry: s.registry,
740-
config: *contourConfiguration.XDSServer,
741-
snapshotHandler: snapshotHandler,
742-
resources: resources,
743-
initialDagBuilt: contourHandler.HasBuiltInitialDag,
793+
func (elbs *envoyLoadBalancerStatus) String() string {
794+
if elbs.Kind == "hostname" {
795+
return fmt.Sprintf("%s:%s", elbs.Kind, elbs.FQDNs)
744796
}
745-
if err := s.mgr.Add(xdsServer); err != nil {
746-
return err
797+
return fmt.Sprintf("%s:%s/%s", elbs.Kind, elbs.Namespace, elbs.Name)
798+
}
799+
800+
func parseEnvoyLoadBalancerStatus(s string) (*envoyLoadBalancerStatus, error) {
801+
parts := strings.SplitN(s, ":", 2)
802+
if len(parts) != 2 {
803+
return nil, fmt.Errorf("invalid load-balancer-status: %s", s)
747804
}
748805

749-
notifier := &leadership.Notifier{
750-
ToNotify: []leadership.NeedLeaderElectionNotification{contourHandler, observer},
806+
if parts[1] == "" {
807+
return nil, fmt.Errorf("invalid load-balancer-status: empty object reference")
751808
}
752-
if err := s.mgr.Add(notifier); err != nil {
753-
return err
809+
810+
elbs := envoyLoadBalancerStatus{}
811+
812+
elbs.Kind = strings.ToLower(parts[0])
813+
switch elbs.Kind {
814+
case "ingress", "service":
815+
parts = strings.Split(parts[1], "/")
816+
if len(parts) != 2 {
817+
return nil, fmt.Errorf("invalid load-balancer-status: %s is not in the format of <namespace>/<name>", s)
818+
}
819+
820+
if parts[0] == "" || parts[1] == "" {
821+
return nil, fmt.Errorf("invalid load-balancer-status: <namespace> or <name> is empty")
822+
}
823+
elbs.Namespace = parts[0]
824+
elbs.Name = parts[1]
825+
case "hostname":
826+
elbs.FQDNs = parts[1]
827+
case "":
828+
return nil, fmt.Errorf("invalid load-balancer-status: kind is empty")
829+
default:
830+
return nil, fmt.Errorf("invalid load-balancer-status: unsupported kind: %s", elbs.Kind)
754831
}
755832

756-
// GO!
757-
return s.mgr.Start(signals.SetupSignalHandler())
833+
return &elbs, nil
758834
}
759835

760836
func (s *Server) getExtensionSvcConfig(name, namespace string) (xdscache_v3.ExtensionServiceConfig, error) {

cmd/contour/serve_test.go

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424

2525
contour_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1"
2626
"github.com/projectcontour/contour/internal/dag"
27+
"github.com/projectcontour/contour/pkg/config"
2728
)
2829

2930
func TestGetDAGBuilder(t *testing.T) {
@@ -256,3 +257,108 @@ func mustGetIngressProcessor(t *testing.T, builder *dag.Builder) *dag.IngressPro
256257
require.FailNow(t, "IngressProcessor not found in list of DAG builder's processors")
257258
return nil
258259
}
260+
261+
func TestParseEnvoyLoadBalancerStatus(t *testing.T) {
262+
tests := []struct {
263+
name string
264+
status string
265+
want envoyLoadBalancerStatus
266+
}{
267+
{
268+
name: "Service",
269+
status: "service:namespace-1/name-1",
270+
want: envoyLoadBalancerStatus{
271+
Kind: "service",
272+
NamespacedName: config.NamespacedName{
273+
Name: "name-1",
274+
Namespace: "namespace-1",
275+
},
276+
},
277+
},
278+
{
279+
name: "Ingress",
280+
status: "ingress:namespace-1/name-1",
281+
want: envoyLoadBalancerStatus{
282+
Kind: "ingress",
283+
NamespacedName: config.NamespacedName{
284+
Name: "name-1",
285+
Namespace: "namespace-1",
286+
},
287+
},
288+
},
289+
{
290+
name: "hostname",
291+
status: "hostname:example.com",
292+
want: envoyLoadBalancerStatus{
293+
Kind: "hostname",
294+
FQDNs: "example.com",
295+
},
296+
},
297+
}
298+
for _, tt := range tests {
299+
t.Run(tt.name, func(t *testing.T) {
300+
r, err := parseEnvoyLoadBalancerStatus(tt.status)
301+
require.NoError(t, err)
302+
assert.Equal(t, tt.want, *r)
303+
})
304+
}
305+
306+
tests2 := []struct {
307+
name string
308+
status string
309+
error string
310+
}{
311+
{
312+
name: "Empty",
313+
status: "",
314+
error: "invalid",
315+
},
316+
{
317+
name: "No kind",
318+
status: ":n",
319+
error: "kind is empty",
320+
},
321+
{
322+
name: "Invalid kind",
323+
status: "test:n",
324+
error: "unsupported kind",
325+
},
326+
{
327+
name: "No reference",
328+
status: "service:",
329+
error: "empty object reference",
330+
},
331+
{
332+
name: "No colon",
333+
status: "service",
334+
error: "invalid",
335+
},
336+
{
337+
name: "No slash",
338+
status: "service:name-1",
339+
error: "not in the format",
340+
},
341+
{
342+
name: "starts with slash",
343+
status: "service:/name-1",
344+
error: "is empty",
345+
},
346+
{
347+
name: "ends with slash",
348+
status: "service:name-1/",
349+
error: "is empty",
350+
},
351+
{
352+
name: "two many slashes",
353+
status: "service:name/x/y",
354+
error: "not in the format",
355+
},
356+
}
357+
for _, tt := range tests2 {
358+
t.Run(tt.name, func(t *testing.T) {
359+
_, err := parseEnvoyLoadBalancerStatus(tt.status)
360+
require.Error(t, err)
361+
assert.Contains(t, err.Error(), tt.error)
362+
})
363+
}
364+
}

cmd/contour/servecontext.go

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -571,10 +571,6 @@ func (ctx *serveContext) convertToContourConfigurationSpec() contour_v1alpha1.Co
571571
Name: ctx.Config.EnvoyServiceName,
572572
Namespace: ctx.Config.EnvoyServiceNamespace,
573573
},
574-
Ingress: &contour_v1alpha1.NamespacedName{
575-
Name: ctx.Config.EnvoyIngressName,
576-
Namespace: ctx.Config.EnvoyIngressNamespace,
577-
},
578574
HTTPListener: &contour_v1alpha1.EnvoyListener{
579575
Address: ctx.httpAddr,
580576
Port: ctx.httpPort,
@@ -616,6 +612,7 @@ func (ctx *serveContext) convertToContourConfigurationSpec() contour_v1alpha1.Co
616612
EnvoyStripTrailingHostDot: &ctx.Config.Network.EnvoyStripTrailingHostDot,
617613
},
618614
OMEnforcedHealth: envoyOMEnforcedHealthListenerConfig,
615+
LoadBalancer: ctx.Config.LoadBalancerStatus,
619616
},
620617
Gateway: gatewayConfig,
621618
HTTPProxy: &contour_v1alpha1.HTTPProxyConfig{

0 commit comments

Comments
 (0)