Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
178 changes: 108 additions & 70 deletions pkg/handlers/overview_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@ import (
"github.com/zxh326/kite/pkg/model"
"github.com/zxh326/kite/pkg/utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/controller-runtime/pkg/client"
"golang.org/x/sync/errgroup"
)

type OverviewData struct {
Expand All @@ -24,111 +23,150 @@ type OverviewData struct {
Resource common.ResourceMetric `json:"resource"`
}

// nodeMetrics holds aggregated metrics computed from the node list.
type nodeMetrics struct {
total int
ready int
cpuAllocatable int64 // millicores
memAllocatable int64 // milli-bytes (matches original MilliValue() contract)
}

// podMetrics holds aggregated metrics computed from the pod list.
type podMetrics struct {
total int
running int
cpuRequested int64 // millicores
memRequested int64 // milli-bytes (matches original MilliValue() contract)
cpuLimited int64 // millicores
memLimited int64 // milli-bytes (matches original MilliValue() contract)
}

func GetOverview(c *gin.Context) {
ctx := c.Request.Context()

cs := c.MustGet("cluster").(*cluster.ClientSet)
user := c.MustGet("user").(model.User)
if len(user.Roles) == 0 {
c.JSON(http.StatusForbidden, gin.H{"error": "Access denied"})
return // Fix: was missing, caused 4 queries to run for unauthorized users
}

// Get nodes
nodes := &v1.NodeList{}
if err := cs.K8sClient.List(ctx, nodes, &client.ListOptions{}); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Solution : Fetch and compute all 4 resource types in parallel.
// Each goroutine owns its data — no shared state, no mutexes needed.
var nm nodeMetrics
var pm podMetrics
var nsCount, svcCount int

readyNodes := 0
var cpuAllocatable, memAllocatable resource.Quantity
var cpuRequested, memRequested resource.Quantity
var cpuLimited, memLimited resource.Quantity
for _, node := range nodes.Items {
cpuAllocatable.Add(*node.Status.Allocatable.Cpu())
memAllocatable.Add(*node.Status.Allocatable.Memory())
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue {
readyNodes++
break
g, gctx := errgroup.WithContext(ctx)

// Goroutine 1: List nodes + compute allocatable resources + ready count
g.Go(func() error {
var nodes v1.NodeList
if err := cs.K8sClient.List(gctx, &nodes); err != nil {
return err
}
nm.total = len(nodes.Items)
// Solution : Use int64 arithmetic instead of resource.Quantity.Add()
// (avoids big.Int operations — ~10-50x faster for the accumulation loop)
for i := range nodes.Items {
node := &nodes.Items[i]
nm.cpuAllocatable += node.Status.Allocatable.Cpu().MilliValue()
nm.memAllocatable += node.Status.Allocatable.Memory().MilliValue()
for _, cond := range node.Status.Conditions {
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue {
nm.ready++
break
}
}
}
}

// Get pods
pods := &v1.PodList{}
if err := cs.K8sClient.List(ctx, pods, &client.ListOptions{}); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
return nil
})

runningPods := 0
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
cpuRequested.Add(*container.Resources.Requests.Cpu())
memRequested.Add(*container.Resources.Requests.Memory())
// Goroutine 2: List pods + compute resource requests/limits + running count
g.Go(func() error {
var pods v1.PodList
if err := cs.K8sClient.List(gctx, &pods); err != nil {
return err
}
pm.total = len(pods.Items)
// Solution : int64 accumulation instead of resource.Quantity.Add()
for i := range pods.Items {
pod := &pods.Items[i]
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
pm.cpuRequested += container.Resources.Requests.Cpu().MilliValue()
pm.memRequested += container.Resources.Requests.Memory().MilliValue()

if container.Resources.Limits != nil {
if cpuLimit := container.Resources.Limits.Cpu(); cpuLimit != nil {
cpuLimited.Add(*cpuLimit)
}
if memLimit := container.Resources.Limits.Memory(); memLimit != nil {
memLimited.Add(*memLimit)
if container.Resources.Limits != nil {
if cpu := container.Resources.Limits.Cpu(); cpu != nil {
pm.cpuLimited += cpu.MilliValue()
}
if mem := container.Resources.Limits.Memory(); mem != nil {
pm.memLimited += mem.MilliValue()
}
}
}
if utils.IsPodReady(pod) || pod.Status.Phase == v1.PodSucceeded {
pm.running++
}
}
if utils.IsPodReady(&pod) || pod.Status.Phase == v1.PodSucceeded {
runningPods++
return nil
})

// Goroutine 3: List namespaces (count only)
g.Go(func() error {
var namespaces v1.NamespaceList
if err := cs.K8sClient.List(gctx, &namespaces); err != nil {
return err
}
}
nsCount = len(namespaces.Items)
return nil
})

// Get namespaces
namespaces := &v1.NamespaceList{}
if err := cs.K8sClient.List(ctx, namespaces, &client.ListOptions{}); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Goroutine 4: List services (count only)
g.Go(func() error {
var services v1.ServiceList
if err := cs.K8sClient.List(gctx, &services); err != nil {
return err
}
svcCount = len(services.Items)
return nil
})

// Get services
services := &v1.ServiceList{}
if err := cs.K8sClient.List(ctx, services, &client.ListOptions{}); err != nil {
// Wait for all goroutines; if any fails the context is cancelled
if err := g.Wait(); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}

// Memory is reported in bytes from Value(); convert to milli for the API
// (consistent with the original behavior that used MilliValue() on Quantity)
overview := OverviewData{
TotalNodes: len(nodes.Items),
ReadyNodes: readyNodes,
TotalPods: len(pods.Items),
RunningPods: runningPods,
TotalNamespaces: len(namespaces.Items),
TotalServices: len(services.Items),
TotalNodes: nm.total,
ReadyNodes: nm.ready,
TotalPods: pm.total,
RunningPods: pm.running,
TotalNamespaces: nsCount,
TotalServices: svcCount,
PromEnabled: cs.PromClient != nil,
Resource: common.ResourceMetric{
CPU: common.Resource{
Allocatable: cpuAllocatable.MilliValue(),
Requested: cpuRequested.MilliValue(),
Limited: cpuLimited.MilliValue(),
Allocatable: nm.cpuAllocatable,
Requested: pm.cpuRequested,
Limited: pm.cpuLimited,
},
Mem: common.Resource{
Allocatable: memAllocatable.MilliValue(),
Requested: memRequested.MilliValue(),
Limited: memLimited.MilliValue(),
Allocatable: nm.memAllocatable,
Requested: pm.memRequested,
Limited: pm.memLimited,
},
},
}

c.JSON(http.StatusOK, overview)
}

// var (
// initialized bool
// )

func InitCheck(c *gin.Context) {
// if initialized {
// c.JSON(http.StatusOK, gin.H{"initialized": true})
// return
// }
step := 0
uc, _ := model.CountUsers()
if uc == 0 && !common.AnonymousUserEnabled {
Expand Down