aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel/perf_event_v7.c
diff options
context:
space:
mode:
authorDave Martin <dave.martin@linaro.org>2012-12-12 18:13:44 +0000
committerAndrey Konovalov <andrey.konovalov@linaro.org>2013-05-25 13:21:21 +0400
commite7a123aeb0117cc41f5dfde53d3554308a1d592e (patch)
tree0e772cebf90fff374802c79893a8fb5f95d34248 /arch/arm/kernel/perf_event_v7.c
parentb52a4b4ba21d5d27ac49a2ff2f40f621b23db8f9 (diff)
downloadvexpress-lsk-e7a123aeb0117cc41f5dfde53d3554308a1d592e.tar.gz
ARM: perf: Allow multiple CPU PMUs per CPU
In a system where Linux logical CPUs can migrate between different physical CPUs, multiple CPU PMUs can logically count events for each logical CPU, as logical CPUs migrate from one cluster to another. This patch allows multiple PMUs to be registered against each CPU. The pairing of a PMU and a CPU is reperesented by a struct arm_cpu_pmu, with existing per-CPU state used by perf moving into this structure. arm_cpu_pmus are per-cpu-allocated, and hang off the relevant arm_pmu structure. This arrangement allows us to find all the CPU-PMU pairings for a given PMU, but not for a given CPU. Do do the latter, a list of all registered CPU PMUs is maintained, and we iterate over that when we need to find all of a CPU's CPU PMUs. This is not elegent, but it shouldn't be a heavy cost since the number of different CPU PMUs across the system is currently expected to be low (i.e., 2 or fewer). This could be improved later. As a side-effect, the get_hw_events() method no longer has enough context to provide an answer, because there may be multiple candidate PMUs for a CPU. This patch adds the struct arm_pmu * for the relevant PMU to this interface to resolve this problem, resulting in trivial changes to various ARM PMU implementations. Signed-off-by: Dave Martin <dave.martin@linaro.org>
Diffstat (limited to 'arch/arm/kernel/perf_event_v7.c')
-rw-r--r--arch/arm/kernel/perf_event_v7.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index dd1ad0ec29e..d2b75fb71e3 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1000,7 +1000,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -1046,7 +1046,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -1078,7 +1078,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
u32 pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -1138,7 +1138,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static void armv7pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
@@ -1149,7 +1149,7 @@ static void armv7pmu_start(struct arm_pmu *cpu_pmu)
static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */