* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
-DEFINE_SPINLOCK(pmu_lock);
+static DEFINE_SPINLOCK(pmu_lock);
/*
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add
*/
unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
};
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct arm_pmu {
enum arm_perf_pmu_ids id;
* This code has been adapted from the ARM OProfile support.
*/
struct frame_tail {
- struct frame_tail *fp;
- unsigned long sp;
- unsigned long lr;
+ struct frame_tail __user *fp;
+ unsigned long sp;
+ unsigned long lr;
} __attribute__((packed));
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
-static struct frame_tail *
-user_backtrace(struct frame_tail *tail,
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct frame_tail buftail;
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
- struct frame_tail *tail;
+ struct frame_tail __user *tail;
- tail = (struct frame_tail *)regs->ARM_fp - 1;
+ tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while (tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
-void
+static void
armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
.max_period = (1LLU << 32) - 1,
};
-const struct arm_pmu *__init armv6pmu_init(void)
+static const struct arm_pmu *__init armv6pmu_init(void)
{
return &armv6pmu;
}
.max_period = (1LLU << 32) - 1,
};
-const struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return &armv6mpcore_pmu;
}
#else
-const struct arm_pmu *__init armv6pmu_init(void)
+static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
-const struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
}
#endif
-void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
return nb_cnt + 1;
}
-const struct arm_pmu *__init armv7_a8_pmu_init(void)
+static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
return &armv7pmu;
}
-const struct arm_pmu *__init armv7_a9_pmu_init(void)
+static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
return &armv7pmu;
}
#else
-const struct arm_pmu *__init armv7_a8_pmu_init(void)
+static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
-const struct arm_pmu *__init armv7_a9_pmu_init(void)
+static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
.max_period = (1LLU << 32) - 1,
};
-const struct arm_pmu *__init xscale1pmu_init(void)
+static const struct arm_pmu *__init xscale1pmu_init(void)
{
return &xscale1pmu;
}
.max_period = (1LLU << 32) - 1,
};
-const struct arm_pmu *__init xscale2pmu_init(void)
+static const struct arm_pmu *__init xscale2pmu_init(void)
{
return &xscale2pmu;
}
#else
-const struct arm_pmu *__init xscale1pmu_init(void)
+static const struct arm_pmu *__init xscale1pmu_init(void)
{
return NULL;
}
-const struct arm_pmu *__init xscale2pmu_init(void)
+static const struct arm_pmu *__init xscale2pmu_init(void)
{
return NULL;
}