Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion pkg/kubelet/cm/cpumanager/cpu_assignment.go
Original file line number Diff line number Diff line change
Expand Up @@ -556,13 +556,31 @@ func (a *cpuAccumulator) takeFullUncore() {
}

func (a *cpuAccumulator) takePartialUncore(uncoreID int) {
numCoresNeeded := a.numCPUsNeeded / a.topo.CPUsPerCore()
// determine the number of cores needed whether SMT/hyperthread is enabled or disabled
numCoresNeeded := (a.numCPUsNeeded + a.topo.CPUsPerCore() - 1) / a.topo.CPUsPerCore()

// determine the N number of free cores (physical cpus) within the UncoreCache, then
// determine the M number of free cpus (virtual cpus) that correspond with the free cores
freeCores := a.details.CoresNeededInUncoreCache(numCoresNeeded, uncoreID)
freeCPUs := a.details.CPUsInCores(freeCores.UnsortedList()...)

// when SMT/hyperthread is enabled and remaining cpu requirement is an odd integer value:
// sort the free CPUs that were determined based on the cores that have available cpus.
// if the amount of free cpus is greather than the cpus needed, we can drop the last cpu
// since the odd integer request will only require one out of the two free cpus that
// correspond to the last core
if a.numCPUsNeeded%2 != 0 && a.topo.CPUsPerCore() > 1 {
// we sort freeCPUs to ensure we pack virtual cpu allocations, meaning we allocate
// whole core's worth of cpus as much as possible to reduce smt-misalignment
sortFreeCPUs := freeCPUs.List()
if len(sortFreeCPUs) > a.numCPUsNeeded {
// if we are in takePartialUncore, the accumulator is not satisfied after
// takeFullUncore, so freeCPUs.Size() can't be < 1
sortFreeCPUs = sortFreeCPUs[:freeCPUs.Size()-1]
}
freeCPUs = cpuset.New(sortFreeCPUs...)
}

// claim the cpus if the free cpus within the UncoreCache can satisfy the needed cpus
claimed := (a.numCPUsNeeded == freeCPUs.Size())
klog.V(4).InfoS("takePartialUncore: trying to claim partial uncore",
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/cm/cpumanager/cpu_assignment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ func TestTakeByTopologyNUMAPacked(t *testing.T) {
cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
"",
cpuset.New(2),
cpuset.New(1),
},
{
"take first available UncoreCache from first socket",
Expand Down
163 changes: 163 additions & 0 deletions pkg/kubelet/cm/cpumanager/policy_static_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1704,6 +1704,169 @@ func TestStaticPolicyAddWithUncoreAlignment(t *testing.T) {
),
expUncoreCache: cpuset.New(0, 1), // best-effort across uncore cache 0 and 1
},
{
// odd integer cpu required on smt-disabled processor
description: "odd integer cpu required on smt-disabled",
topo: topoSmallSingleSocketSingleNumaPerSocketNoSMTUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 2, 3), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSmallSingleSocketSingleNumaPerSocketNoSMTUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"5000m", "5000m"}, // full uncore cache worth of cpus
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(8, 9, 10, 11, 12),
},
{
// odd integer cpu requirement on smt-enabled
description: "odd integer required on smt-enabled",
topo: topoSingleSocketSingleNumaPerSocketSMTSmallUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 64, 65), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"3000m", "3000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 66),
},
{
// odd integer cpu required on smt-enabled and odd integer free cpus available on uncore
description: "odd integer required on odd integer partial uncore",
topo: topoSingleSocketSingleNumaPerSocketSMTSmallUncore, // 8 cpus per uncore
numReservedCPUs: 3,
reserved: cpuset.New(0, 1, 64), // note 3 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"3000m", "3000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 65, 66),
},
{
// even integer requested on smt-enabled processor with odd integer available cpus on uncore
// even integer cpu containers will not be placed on uncore caches with odd integer free cpus
description: "even integer required on odd integer partial uncore",
topo: topoSingleSocketSingleNumaPerSocketSMTSmallUncore, // 8 cpus per uncore
numReservedCPUs: 3,
reserved: cpuset.New(0, 1, 64), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"4000m", "4000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(4, 5, 68, 69),
},
{
// large odd integer cpu required on smt-enabled
description: "large odd integer required on smt-enabled",
topo: topoSingleSocketSingleNumaPerSocketSMTSmallUncore, // 8 cpus per uncore
numReservedCPUs: 3,
reserved: cpuset.New(0, 1, 64), // note 3 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"11000m", "11000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 65, 66, 4, 5, 6, 7, 68, 69, 70, 71), // full uncore 1 and partial uncore 0
},
{
// odd integer cpu required on hyperthread-enabled and monolithic uncore cache
description: "odd integer required on HT monolithic uncore",
topo: topoDualSocketSubNumaPerSocketHTMonolithicUncore,
numReservedCPUs: 3,
reserved: cpuset.New(0, 1, 120), // note 3 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"5000m", "5000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 121, 122, 123),
},
{
// even integer cpu required on hyperthread-enabled and monolithic uncore cache
description: "even integer required on HT monolithic uncore",
topo: topoDualSocketSubNumaPerSocketHTMonolithicUncore,
numReservedCPUs: 3,
reserved: cpuset.New(0, 1, 120), // note 3 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"4000m", "4000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 122, 123), // takeFullCores
},
}

for _, testCase := range testCases {
Expand Down