aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDave Martin <dave.martin@linaro.org>2012-12-12 18:13:44 +0000
committerAndrey Konovalov <andrey.konovalov@linaro.org>2013-05-25 13:22:44 +0400
commitda0a0ae272e5c117f43da3522a01a28219ca9a31 (patch)
tree460f7484f43657867e84d5a809e2016744876953 /arch
parentb115c4933bebff32c29da495cd0c8ade248548c3 (diff)
downloadvexpress-lsk-da0a0ae272e5c117f43da3522a01a28219ca9a31.tar.gz
ARM: perf: [WIP] Manipulate the right shadow register for PM*CLR
Where the ARM Architecture exposes PM*SET and PM*CLR, these really manipulate the same underlying register. This patch uses the PM*SET register for storing the logical state when the PMU is not active, and mainpulates that state when the code attempts to access the corresponding PM*CLR register. PMOVSR is a special case: this is a reset-only register, so the logical copy of PMOVSR is always used. These changes result a small number of unused fields in the armv7_pmu_logical_state structre. For now, this is considered to be harmless -- it may be tidied up later. Signed-off-by: Dave Martin <dave.martin@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/perf_event_v7.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index c860e1e2a86..1ed779ab52b 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -150,23 +150,23 @@ __def_v7_pmu_reg(PMOVSSET, RW, 0, c14, 3)
__v7_pmu_read_logical(cpupmu, name) \
)
-#define __v7_pmu_reg_set(cpupmu, name, mask) do { \
+#define __v7_pmu_reg_set(cpupmu, name, logical_name, mask) do { \
if ((cpupmu)->active) \
__v7_pmu_write_physical(name, mask); \
else { \
u32 __value; \
- __value =__v7_pmu_read_logical(cpupmu, name) | (mask); \
- __v7_pmu_write_logical(cpupmu, name, __value); \
+ __value =__v7_pmu_read_logical(cpupmu, logical_name) | (mask); \
+ __v7_pmu_write_logical(cpupmu, logical_name, __value); \
} \
} while(0)
-#define __v7_pmu_reg_clr(cpupmu, name, mask) do { \
+#define __v7_pmu_reg_clr(cpupmu, name, logical_name, mask) do { \
if ((cpupmu)->active) \
__v7_pmu_write_physical(name, mask); \
else { \
u32 __value; \
- __value = __v7_pmu_read_logical(cpupmu, name) & ~(mask); \
- __v7_pmu_write_logical(cpupmu, name, __value); \
+ __value = __v7_pmu_read_logical(cpupmu, logical_name) & ~(mask); \
+ __v7_pmu_write_logical(cpupmu, logical_name, __value); \
} \
} while(0)
@@ -1026,31 +1026,31 @@ static inline void armv7_pmnc_write_evtsel(struct arm_cpu_pmu *cpupmu, int idx,
static inline int armv7_pmnc_enable_counter(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- __v7_pmu_reg_set(cpupmu, PMCNTENSET, BIT(counter));
+ __v7_pmu_reg_set(cpupmu, PMCNTENSET, PMCNTENSET, BIT(counter));
return idx;
}
static inline int armv7_pmnc_disable_counter(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- __v7_pmu_reg_clr(cpupmu, PMCNTENCLR, BIT(counter));
+ __v7_pmu_reg_clr(cpupmu, PMCNTENCLR, PMCNTENSET, BIT(counter));
return idx;
}
static inline int armv7_pmnc_enable_intens(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- __v7_pmu_reg_set(cpupmu, PMINTENSET, BIT(counter));
+ __v7_pmu_reg_set(cpupmu, PMINTENSET, PMCNTENSET, BIT(counter));
return idx;
}
static inline int armv7_pmnc_disable_intens(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- __v7_pmu_reg_clr(cpupmu, PMINTENCLR, BIT(counter));
+ __v7_pmu_reg_clr(cpupmu, PMINTENCLR, PMINTENSET, BIT(counter));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
- __v7_pmu_reg_clr(cpupmu, PMOVSR, BIT(counter));
+ __v7_pmu_reg_clr(cpupmu, PMOVSR, PMOVSR, BIT(counter));
isb();
return idx;
@@ -1065,7 +1065,7 @@ static inline u32 armv7_pmnc_getreset_flags(struct arm_cpu_pmu *cpupmu)
/* Write to clear flags */
val &= ARMV7_FLAG_MASK;
- __v7_pmu_reg_clr(cpupmu, PMOVSR, val);
+ __v7_pmu_reg_clr(cpupmu, PMOVSR, PMOVSR, val);
return val;
}