From 2d4de8376ff1d94a5070cfa9092c59bfdc4e693e Mon Sep 17 00:00:00 2001 From: Vikas Shivappa Date: Thu, 10 Mar 2016 15:32:11 -0800 Subject: [PATCH] perf/x86/mbm: Implement RMID recycling RMID could be allocated or deallocated as part of RMID recycling. When an RMID is allocated for MBM event, the MBM counter needs to be initialized because next time we read the counter we need the previous value to account for total bytes that went to the memory controller. Similarly, when RMID is deallocated we need to update the ->count variable. Signed-off-by: Vikas Shivappa Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tony Luck Acked-by: Thomas Gleixner Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Brian Gerst Cc: David Ahern Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Linus Torvalds Cc: Matt Fleming Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Vince Weaver Cc: fenghua.yu@intel.com Cc: h.peter.anvin@intel.com Cc: ravi.v.shankar@intel.com Cc: vikas.shivappa@intel.com Link: http://lkml.kernel.org/r/1457652732-4499-6-git-send-email-vikas.shivappa@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/cqm.c | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c index 610bd8ab37e4..a98f472bf6b2 100644 --- a/arch/x86/events/intel/cqm.c +++ b/arch/x86/events/intel/cqm.c @@ -450,6 +450,7 @@ struct rmid_read { static void __intel_cqm_event_count(void *info); static void init_mbm_sample(u32 rmid, u32 evt_type); +static void __intel_mbm_event_count(void *info); static bool is_mbm_event(int e) { @@ -476,8 +477,14 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid) .rmid = old_rmid, }; - on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, - &rr, 1); + if (is_mbm_event(group->attr.config)) { + rr.evt_type = group->attr.config; + on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, + &rr, 1); + } else { + on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, + &rr, 1); + } local64_set(&group->count, atomic64_read(&rr.value)); } @@ -489,6 +496,22 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid) raw_spin_unlock_irq(&cache_lock); + /* + * If the allocation is for mbm, init the mbm stats. + * Need to check if each event in the group is mbm event + * because there could be multiple type of events in the same group. + */ + if (__rmid_valid(rmid)) { + event = group; + if (is_mbm_event(event->attr.config)) + init_mbm_sample(rmid, event->attr.config); + + list_for_each_entry(event, head, hw.cqm_group_entry) { + if (is_mbm_event(event->attr.config)) + init_mbm_sample(rmid, event->attr.config); + } + } + return old_rmid; } @@ -978,7 +1001,7 @@ static void intel_cqm_setup_event(struct perf_event *event, /* All tasks in a group share an RMID */ event->hw.cqm_rmid = rmid; *group = iter; - if (is_mbm_event(event->attr.config)) + if (is_mbm_event(event->attr.config) && __rmid_valid(rmid)) init_mbm_sample(rmid, event->attr.config); return; } @@ -996,7 +1019,7 @@ static void intel_cqm_setup_event(struct perf_event *event, else rmid = __get_rmid(); - if (is_mbm_event(event->attr.config)) + if (is_mbm_event(event->attr.config) && __rmid_valid(rmid)) init_mbm_sample(rmid, event->attr.config); event->hw.cqm_rmid = rmid; -- 2.30.2