@@ -123,10 +123,10 @@ func makePodWithPIDStats(name string, priority int32, processCount uint64) (*v1.
123123 return pod , podStats
124124}
125125
126- func makePodWithDiskStats (name string , priority int32 , requests v1.ResourceList , limits v1.ResourceList , rootFsUsed , logsUsed , perLocalVolumeUsed string ) (* v1.Pod , statsapi.PodStats ) {
126+ func makePodWithDiskStats (name string , priority int32 , requests v1.ResourceList , limits v1.ResourceList , rootFsUsed , logsUsed , perLocalVolumeUsed string , volumes []v1. Volume ) (* v1.Pod , statsapi.PodStats ) {
127127 pod := newPod (name , priority , []v1.Container {
128128 newContainer (name , requests , limits ),
129- }, nil )
129+ }, volumes )
130130 podStats := newPodDiskStats (pod , parseQuantity (rootFsUsed ), parseQuantity (logsUsed ), parseQuantity (perLocalVolumeUsed ))
131131 return pod , podStats
132132}
@@ -505,7 +505,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
505505 Quantity : quantityMustParse ("2Gi" ),
506506 },
507507 },
508- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 2Gi, available: 1536Mi. " ,
508+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 2Gi, available: 1536Mi. Container above-requests was using 700Mi, request is 100Mi, has larger consumption of ephemeral-storage. " ,
509509 podToMakes : []podToMake {
510510 {name : "below-requests" , requests : newResourceList ("" , "" , "1Gi" ), limits : newResourceList ("" , "" , "1Gi" ), rootFsUsed : "900Mi" },
511511 {name : "above-requests" , requests : newResourceList ("" , "" , "100Mi" ), limits : newResourceList ("" , "" , "1Gi" ), rootFsUsed : "700Mi" },
@@ -516,7 +516,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
516516 nodeFsStats : "1Gi" ,
517517 imageFsStats : "10Gi" ,
518518 containerFsStats : "10Gi" ,
519- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. " ,
519+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. " ,
520520 thresholdToMonitor : evictionapi.Threshold {
521521 Signal : evictionapi .SignalImageFsAvailable ,
522522 Operator : evictionapi .OpLessThan ,
@@ -537,7 +537,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
537537 nodeFsStats : "1Gi" ,
538538 imageFsStats : "100Gi" ,
539539 containerFsStats : "10Gi" ,
540- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. " ,
540+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi.Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. " ,
541541 thresholdToMonitor : evictionapi.Threshold {
542542 Signal : evictionapi .SignalContainerFsAvailable ,
543543 Operator : evictionapi .OpLessThan ,
@@ -557,7 +557,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
557557 nodeFsStats : "10Gi" ,
558558 imageFsStats : "100Gi" ,
559559 containerFsStats : "10Gi" ,
560- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. " ,
560+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. " ,
561561 thresholdToMonitor : evictionapi.Threshold {
562562 Signal : evictionapi .SignalNodeFsAvailable ,
563563 Operator : evictionapi .OpLessThan ,
@@ -588,7 +588,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
588588 pods := []* v1.Pod {}
589589 podStats := map [* v1.Pod ]statsapi.PodStats {}
590590 for _ , podToMake := range podsToMake {
591- pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed )
591+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , nil )
592592 pods = append (pods , pod )
593593 podStats [pod ] = podStat
594594 }
@@ -835,8 +835,8 @@ func TestMemoryPressure(t *testing.T) {
835835 t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
836836 }
837837 observedGracePeriod = * podKiller .gracePeriodOverride
838- if observedGracePeriod != int64 (0 ) {
839- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
838+ if observedGracePeriod != int64 (1 ) {
839+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
840840 }
841841
842842 // the best-effort pod should not admit, burstable should
@@ -1106,8 +1106,8 @@ func TestPIDPressure(t *testing.T) {
11061106 t .Errorf ("Manager chose to kill pod but should have had a grace period override." )
11071107 }
11081108 observedGracePeriod = * podKiller .gracePeriodOverride
1109- if observedGracePeriod != int64 (0 ) {
1110- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1109+ if observedGracePeriod != int64 (1 ) {
1110+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
11111111 }
11121112
11131113 // try to admit our pod (should fail)
@@ -1336,7 +1336,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
13361336 pods := []* v1.Pod {}
13371337 podStats := map [* v1.Pod ]statsapi.PodStats {}
13381338 for _ , podToMake := range podsToMake {
1339- pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed )
1339+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , nil )
13401340 pods = append (pods , pod )
13411341 podStats [pod ] = podStat
13421342 }
@@ -1379,7 +1379,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
13791379 }
13801380
13811381 // create a best effort pod to test admission
1382- podToAdmit , _ := podMaker ("pod-to-admit" , defaultPriority , newResourceList ("" , "" , "" ), newResourceList ("" , "" , "" ), "0Gi" , "0Gi" , "0Gi" )
1382+ podToAdmit , _ := podMaker ("pod-to-admit" , defaultPriority , newResourceList ("" , "" , "" ), newResourceList ("" , "" , "" ), "0Gi" , "0Gi" , "0Gi" , nil )
13831383
13841384 // synchronize
13851385 _ , err := manager .synchronize (diskInfoProvider , activePodsFunc )
@@ -1494,8 +1494,8 @@ func TestDiskPressureNodeFs(t *testing.T) {
14941494 t .Fatalf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
14951495 }
14961496 observedGracePeriod = * podKiller .gracePeriodOverride
1497- if observedGracePeriod != int64 (0 ) {
1498- t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1497+ if observedGracePeriod != int64 (1 ) {
1498+ t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
14991499 }
15001500
15011501 // try to admit our pod (should fail)
@@ -1644,8 +1644,8 @@ func TestMinReclaim(t *testing.T) {
16441644 t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
16451645 }
16461646 observedGracePeriod := * podKiller .gracePeriodOverride
1647- if observedGracePeriod != int64 (0 ) {
1648- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1647+ if observedGracePeriod != int64 (1 ) {
1648+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
16491649 }
16501650
16511651 // reduce memory pressure, but not below the min-reclaim amount
@@ -1668,8 +1668,8 @@ func TestMinReclaim(t *testing.T) {
16681668 t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
16691669 }
16701670 observedGracePeriod = * podKiller .gracePeriodOverride
1671- if observedGracePeriod != int64 (0 ) {
1672- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1671+ if observedGracePeriod != int64 (1 ) {
1672+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
16731673 }
16741674
16751675 // reduce memory pressure and ensure the min-reclaim amount
@@ -1858,7 +1858,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
18581858 pods := []* v1.Pod {}
18591859 podStats := map [* v1.Pod ]statsapi.PodStats {}
18601860 for _ , podToMake := range podsToMake {
1861- pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed )
1861+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , nil )
18621862 pods = append (pods , pod )
18631863 podStats [pod ] = podStat
18641864 }
@@ -2060,8 +2060,8 @@ func TestNodeReclaimFuncs(t *testing.T) {
20602060 t .Fatalf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
20612061 }
20622062 observedGracePeriod := * podKiller .gracePeriodOverride
2063- if observedGracePeriod != int64 (0 ) {
2064- t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
2063+ if observedGracePeriod != int64 (1 ) {
2064+ t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
20652065 }
20662066
20672067 // reduce disk pressure
@@ -2458,8 +2458,8 @@ func TestInodePressureFsInodes(t *testing.T) {
24582458 t .Fatalf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
24592459 }
24602460 observedGracePeriod = * podKiller .gracePeriodOverride
2461- if observedGracePeriod != int64 (0 ) {
2462- t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
2461+ if observedGracePeriod != int64 (1 ) {
2462+ t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
24632463 }
24642464
24652465 // try to admit our pod (should fail)
@@ -2666,6 +2666,111 @@ func TestStaticCriticalPodsAreNotEvicted(t *testing.T) {
26662666 }
26672667}
26682668
2669+ func TestStorageLimitEvictions (t * testing.T ) {
2670+ volumeSizeLimit := resource .MustParse ("1Gi" )
2671+
2672+ testCases := map [string ]struct {
2673+ pod podToMake
2674+ volumes []v1.Volume
2675+ }{
2676+ "eviction due to rootfs above limit" : {
2677+ pod : podToMake {name : "rootfs-above-limits" , priority : defaultPriority , requests : newResourceList ("" , "" , "1Gi" ), limits : newResourceList ("" , "" , "1Gi" ), rootFsUsed : "2Gi" },
2678+ },
2679+ "eviction due to logsfs above limit" : {
2680+ pod : podToMake {name : "logsfs-above-limits" , priority : defaultPriority , requests : newResourceList ("" , "" , "1Gi" ), limits : newResourceList ("" , "" , "1Gi" ), logsFsUsed : "2Gi" },
2681+ },
2682+ "eviction due to local volume above limit" : {
2683+ pod : podToMake {name : "localvolume-above-limits" , priority : defaultPriority , requests : newResourceList ("" , "" , "" ), limits : newResourceList ("" , "" , "" ), perLocalVolumeUsed : "2Gi" },
2684+ volumes : []v1.Volume {{
2685+ Name : "emptyDirVolume" ,
2686+ VolumeSource : v1.VolumeSource {
2687+ EmptyDir : & v1.EmptyDirVolumeSource {
2688+ SizeLimit : & volumeSizeLimit ,
2689+ },
2690+ },
2691+ }},
2692+ },
2693+ }
2694+ for name , tc := range testCases {
2695+ t .Run (name , func (t * testing.T ) {
2696+ podMaker := makePodWithDiskStats
2697+ summaryStatsMaker := makeDiskStats
2698+ podsToMake := []podToMake {
2699+ tc .pod ,
2700+ }
2701+ pods := []* v1.Pod {}
2702+ podStats := map [* v1.Pod ]statsapi.PodStats {}
2703+ for _ , podToMake := range podsToMake {
2704+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , tc .volumes )
2705+ pods = append (pods , pod )
2706+ podStats [pod ] = podStat
2707+ }
2708+
2709+ podToEvict := pods [0 ]
2710+ activePodsFunc := func () []* v1.Pod {
2711+ return pods
2712+ }
2713+
2714+ fakeClock := testingclock .NewFakeClock (time .Now ())
2715+ podKiller := & mockPodKiller {}
2716+ diskInfoProvider := & mockDiskInfoProvider {dedicatedImageFs : ptr .To (false )}
2717+ diskGC := & mockDiskGC {err : nil }
2718+ nodeRef := & v1.ObjectReference {
2719+ Kind : "Node" , Name : "test" , UID : types .UID ("test" ), Namespace : "" ,
2720+ }
2721+
2722+ config := Config {
2723+ MaxPodGracePeriodSeconds : 5 ,
2724+ PressureTransitionPeriod : time .Minute * 5 ,
2725+ Thresholds : []evictionapi.Threshold {
2726+ {
2727+ Signal : evictionapi .SignalNodeFsAvailable ,
2728+ Operator : evictionapi .OpLessThan ,
2729+ Value : evictionapi.ThresholdValue {
2730+ Quantity : quantityMustParse ("1Gi" ),
2731+ },
2732+ },
2733+ },
2734+ }
2735+
2736+ diskStat := diskStats {
2737+ rootFsAvailableBytes : "200Mi" ,
2738+ imageFsAvailableBytes : "200Mi" ,
2739+ podStats : podStats ,
2740+ }
2741+ summaryProvider := & fakeSummaryProvider {result : summaryStatsMaker (diskStat )}
2742+ manager := & managerImpl {
2743+ clock : fakeClock ,
2744+ killPodFunc : podKiller .killPodNow ,
2745+ imageGC : diskGC ,
2746+ containerGC : diskGC ,
2747+ config : config ,
2748+ recorder : & record.FakeRecorder {},
2749+ summaryProvider : summaryProvider ,
2750+ nodeRef : nodeRef ,
2751+ nodeConditionsLastObservedAt : nodeConditionsObservedAt {},
2752+ thresholdsFirstObservedAt : thresholdsObservedAt {},
2753+ localStorageCapacityIsolation : true ,
2754+ }
2755+
2756+ _ , err := manager .synchronize (diskInfoProvider , activePodsFunc )
2757+ if err != nil {
2758+ t .Fatalf ("Manager expects no error but got %v" , err )
2759+ }
2760+
2761+ if podKiller .pod == nil {
2762+ t .Fatalf ("Manager should have selected a pod for eviction" )
2763+ }
2764+ if podKiller .pod != podToEvict {
2765+ t .Errorf ("Manager should have killed pod: %v, but instead killed: %v" , podToEvict .Name , podKiller .pod .Name )
2766+ }
2767+ if * podKiller .gracePeriodOverride != 1 {
2768+ t .Errorf ("Manager should have evicted with gracePeriodOverride of 1, but used: %v" , * podKiller .gracePeriodOverride )
2769+ }
2770+ })
2771+ }
2772+ }
2773+
26692774// TestAllocatableMemoryPressure
26702775func TestAllocatableMemoryPressure (t * testing.T ) {
26712776 podMaker := makePodWithMemoryStats
@@ -2767,8 +2872,8 @@ func TestAllocatableMemoryPressure(t *testing.T) {
27672872 t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
27682873 }
27692874 observedGracePeriod := * podKiller .gracePeriodOverride
2770- if observedGracePeriod != int64 (0 ) {
2771- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
2875+ if observedGracePeriod != int64 (1 ) {
2876+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
27722877 }
27732878 // reset state
27742879 podKiller .pod = nil
0 commit comments