Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
89 commits
Select commit Hold shift + click to select a range
15d6a51
perf/x86/intel: Factor out the initialization code for ADL e-core
Aug 29, 2023
5d749e6
perf/x86/intel: Apply the common initialization code for ADL
Aug 29, 2023
1e7dc12
perf/x86/intel: Clean up the hybrid CPU type handling code
Aug 29, 2023
1c16f24
perf/x86/intel: Add common intel_pmu_init_hybrid()
Aug 29, 2023
a3798d1
perf/x86/intel: Fix broken fixed event constraints extension
Sep 11, 2023
87181be
tools headers UAPI: Sync include/uapi/linux/perf_event.h header with …
Oct 25, 2023
c781572
perf/x86/intel: Correct incorrect 'or' operation for PMU capabilities
Nov 21, 2023
52e0519
powercap: intel_rapl: Sort header files
zhang-rui Apr 8, 2024
cf5f8dd
powercap: intel_rapl: Introduce APIs for PMU support
zhang-rui Apr 28, 2024
4968941
powercap: intel_rapl_tpmi: Enable PMU support
zhang-rui Apr 28, 2024
fab4768
perf/x86/intel: Support the PEBS event mask
Jun 26, 2024
7f166a0
perf/x86: Support counter mask
Jun 26, 2024
4321c87
perf/x86: Add Lunar Lake and Arrow Lake support
Jun 26, 2024
e4263d4
perf/x86/intel: Rename model-specific pebs_latency_data functions
Jun 26, 2024
0af83c6
perf/x86/intel: Support new data source for Lunar Lake
Jun 26, 2024
32378de
perf/x86: Add config_mask to represent EVENTSEL bitmask
Jun 26, 2024
c578aa1
perf/x86/intel: Support PERFEVTSEL extension
Jun 26, 2024
8022edd
perf/x86/intel: Support Perfmon MSRs aliasing
Jun 26, 2024
b2bd7e9
perf/x86/intel/ds: Clarify adaptive PEBS processing
Nov 19, 2024
a503248
perf/x86/intel/ds: Factor out functions for PEBS records processing
Nov 19, 2024
ae83a65
perf/x86/intel/ds: Simplify the PEBS records processing for adaptive …
Nov 19, 2024
f787c13
perf/x86/intel: Support RDPMC metrics clear mode
Dec 11, 2024
02b88e2
perf/x86/intel/uncore: Add Clearwater Forest support
Dec 11, 2024
0125f66
perf/x86/intel/ds: Add PEBS format 6
Dec 16, 2024
4be2e3a
perf/x86/intel/uncore: Clean up func_id
Jan 8, 2025
39bc088
perf/x86/intel/uncore: Support more units on Granite Rapids
Jan 8, 2025
7fe7d1a
perf/x86/intel: Support PEBS counters snapshotting
Jan 21, 2025
e717f9a
perf vendor events: Add Clearwaterforest events
captain5050 Feb 11, 2025
2731067
perf: Extend per event callchain limit to branch stack
Mar 10, 2025
2eba511
perf/x86: Add dynamic constraint
Mar 27, 2025
bd4718d
perf/x86/intel: Track the num of events needs late setup
Mar 27, 2025
4d44536
perf: Extend the bit width of the arch-specific flag
Mar 27, 2025
f597246
perf/x86/intel: Add CPUID enumeration for the auto counter reload
Mar 27, 2025
7126c10
perf/x86/intel: Support auto counter reload
Mar 27, 2025
d5fa424
perf/x86/intel: Don't clear perf metrics overflow bit unconditionally
Apr 15, 2025
c63693c
perf/x86/intel: Add PMU support for Clearwater Forest
Apr 15, 2025
875ff59
perf/x86/intel: Parse CPUID archPerfmonExt leaves for non-hybrid CPUs
Apr 15, 2025
c82f0bc
perf/x86/intel: Only check the group flag for X86 leader
Apr 24, 2025
2499d55
perf/x86/intel: Check the X86 leader for pebs_counter_event_group
Apr 24, 2025
9bf3958
perf/x86/intel: Check the X86 leader for ACR group
Apr 24, 2025
16077a3
perf/x86: Optimize the is_x86_event
Apr 24, 2025
9f600c2
tools/include: Sync uapi/linux/perf.h with the kernel sources
namhyung Aug 6, 2024
38fc52e
tools headers: Update the uapi/linux/perf_event.h copy with the kerne…
namhyung Apr 10, 2025
20ef492
perf/uapi: Clean up <uapi/linux/perf_event.h> a bit
ingomolnar May 22, 2025
6b7dd50
perf/x86/intel/uncore: Support MSR portal for discovery tables
Jul 7, 2025
00ae609
perf/x86/intel/uncore: Support customized MMIO map size
Jul 7, 2025
41cd0c4
perf/x86/intel: Fix crash in icl_update_topdown_event()
Jul 24, 2025
e4a7612
perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq
ahunter6 May 8, 2025
1178697
perf/x86/intel/ds: Fix counter backwards of non-precise events counte…
Apr 24, 2025
fb394b6
perf/x86/intel: Fix event constraints for LNC
Feb 19, 2025
892ce0a
perf/x86/intel: Fix bitmask of OCR and FRONTEND events for LNC
Dec 16, 2024
5ed5827
perf/x86/intel: Fix incorrect MSR index calculations in intel_pmu_con…
May 29, 2025
e8fc242
tools/include: Sync x86 headers with the kernel sources
namhyung Aug 6, 2024
d0b461c
x86/msr: Standardize on u64 in <asm/msr-index.h>
ingomolnar Apr 9, 2025
b94badb
tools arch x86: Sync the msr-index.h copy with the kernel sources
acmel Jun 12, 2025
f342d34
perf/x86/intel: Fix IA32_PMC_x_CFG_B MSRs access error
Aug 20, 2025
107aaf5
KVM: x86/pmu: Add common define to capture fixed counters offset
sean-jc Nov 10, 2023
a315112
KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
Apr 30, 2024
8f60dcc
KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
Apr 30, 2024
dec6610
perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK
Aug 20, 2025
8669393
perf/x86: Print PMU counters bitmap in x86_pmu_show_pmu_cap()
Aug 20, 2025
011ff3c
perf mem: Fix printing PERF_MEM_LVLNUM_{L2_MHB|MSC}
tlfalcon Sep 26, 2024
1077fb2
perf/x86/intel: Decouple BTS initialization from PEBS initialization
Apr 15, 2025
ba380fa
perf/x86/intel: Rename x86_pmu.pebs to x86_pmu.ds_pebs
Apr 15, 2025
014d65b
perf/x86/intel: Introduce pairs of PEBS static calls
Apr 15, 2025
6f051d8
perf/x86/intel: Use early_initcall() to hook bts_init()
Aug 20, 2025
459b6f7
perf/x86: Add PERF_CAP_PEBS_TIMING_INFO flag
Aug 20, 2025
608dfd2
perf/x86/intel/ds: Remove redundant assignments to sample.period
May 6, 2025
1b463b3
perf/core: Check sample_type in perf_sample_save_callchain
yabinc May 15, 2024
a99a5a9
perf/core: Check sample_type in perf_sample_save_brstack
yabinc May 15, 2024
108cf76
perf/x86/intel: Fix KASAN global-out-of-bounds warning
Oct 28, 2025
ef68108
perf/x86: Remove redundant is_x86_event() prototype
Oct 29, 2025
738201d
perf/x86: Fix NULL event access and potential PEBS record loss
Oct 29, 2025
3424e47
perf/x86/intel: Replace x86_pmu.drain_pebs calling with static call
Oct 29, 2025
68e8c27
perf/x86/intel: Correct large PEBS flag check
Oct 29, 2025
f0dc3c5
perf/x86/intel: Initialize architectural PEBS
Oct 29, 2025
614cfbb
perf/x86/intel/ds: Factor out PEBS record processing code to functions
Oct 29, 2025
95c8122
perf/x86/intel/ds: Factor out PEBS group processing code to functions
Oct 29, 2025
3ac61c4
perf/x86/intel: Process arch-PEBS records or record fragments
Oct 29, 2025
2f907e6
perf/x86/intel: Allocate arch-PEBS buffer and initialize PEBS_BASE MSR
Oct 29, 2025
992a5a5
perf/x86/intel: Update dyn_constraint base on PEBS event precise level
Oct 29, 2025
caca094
perf/x86/intel: Setup PEBS data configuration and enable legacy groups
Oct 29, 2025
39baca8
perf/x86/intel: Add counter group support for arch-PEBS
Oct 29, 2025
d42f315
perf/x86/intel: Add a check for dynamic constraints
May 12, 2025
de5ea71
perf/x86/intel: Check PEBS dyn_constraints
Nov 7, 2025
8c936a2
perf/x86/intel: Optimize PEBS extended config
Nov 7, 2025
147220d
perf/x86/intel: Fix and clean up intel_pmu_drain_arch_pebs() type use
ingomolnar Nov 12, 2025
ded4acf
perf/x86/intel: Fix NULL event dereference crash in handle_pmi_common()
Dec 12, 2025
ea6ad52
tools headers: Sync x86 headers with kernel sources
namhyung Dec 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 13 additions & 14 deletions arch/x86/events/amd/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (cmpxchg(nb->owners + i, event, NULL) == event)
break;
}
Expand Down Expand Up @@ -499,7 +499,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
if (new == -1 || hwc->idx == idx)
/* assign free slot, prefer hwc->idx */
old = cmpxchg(nb->owners + idx, NULL, event);
Expand Down Expand Up @@ -542,7 +542,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
/*
* initialize all possible NB constraints
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
Expand Down Expand Up @@ -735,7 +735,7 @@ static void amd_pmu_check_overflow(void)
* counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand All @@ -755,7 +755,7 @@ static void amd_pmu_enable_all(int added)

amd_brs_enable_all();

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
/* only activate events which are marked as active */
if (!test_bit(idx, cpuc->active_mask))
continue;
Expand Down Expand Up @@ -948,7 +948,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
/* Clear any reserved bits set by buggy microcode */
status &= amd_pmu_global_cntr_mask;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand All @@ -968,8 +968,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event))
continue;

if (has_branch_stack(event))
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);

if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
Expand Down Expand Up @@ -1288,7 +1287,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.addr_offset = amd_pmu_addr_offset,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS,
.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
.add = amd_pmu_add_event,
.del = amd_pmu_del_event,
.cntval_bits = 48,
Expand Down Expand Up @@ -1387,7 +1386,7 @@ static int __init amd_core_pmu_init(void)
*/
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);

/* Check for Performance Monitoring v2 support */
if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
Expand All @@ -1397,9 +1396,9 @@ static int __init amd_core_pmu_init(void)
x86_pmu.version = 2;

/* Find the number of available Core PMCs */
x86_pmu.num_counters = ebx.split.num_core_pmc;
x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);

amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;

/* Update PMC handling functions */
x86_pmu.enable_all = amd_pmu_v2_enable_all;
Expand Down Expand Up @@ -1427,12 +1426,12 @@ static int __init amd_core_pmu_init(void)
* even numbered counter that has a consecutive adjacent odd
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
even_ctr_mask |= BIT_ULL(i);

pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
x86_pmu.num_counters / 2, 0,
x86_pmu_max_num_counters(NULL) / 2, 0,
PERF_X86_EVENT_PAIR);

x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/events/amd/ibs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1138,8 +1138,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
* recorded as part of interrupt regs. Thus we need to use rip from
* interrupt regs while unwinding call stack.
*/
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
perf_sample_save_callchain(&data, event, iregs);
perf_sample_save_callchain(&data, event, iregs);

throttle = perf_event_overflow(event, &data, &regs);
out:
Expand Down
Loading