arm_pmu: add armpmu_alloc_atomic()
authorMark Rutland <mark.rutland@arm.com>
Mon, 5 Feb 2018 16:41:58 +0000 (16:41 +0000)
committerWill Deacon <will.deacon@arm.com>
Tue, 20 Feb 2018 11:34:54 +0000 (11:34 +0000)
In ACPI systems, we don't know the makeup of CPUs until we hotplug them
on, and thus have to allocate the PMU datastructures at hotplug time.
Thus, we must use GFP_ATOMIC allocations.

Let's add an armpmu_alloc_atomic() that we can use in this case.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
drivers/perf/arm_pmu.c
drivers/perf/arm_pmu_acpi.c
include/linux/perf/arm_pmu.h

index 373dfd7d8a1dac70f110288835ad7c157cd002bf..4f73c5e8d6239139b925038154632751a086d4ef 100644 (file)
@@ -760,18 +760,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
                                            &cpu_pmu->node);
 }
 
-struct arm_pmu *armpmu_alloc(void)
+static struct arm_pmu *__armpmu_alloc(gfp_t flags)
 {
        struct arm_pmu *pmu;
        int cpu;
 
-       pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+       pmu = kzalloc(sizeof(*pmu), flags);
        if (!pmu) {
                pr_info("failed to allocate PMU device!\n");
                goto out;
        }
 
-       pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+       pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
        if (!pmu->hw_events) {
                pr_info("failed to allocate per-cpu PMU data.\n");
                goto out_free_pmu;
@@ -817,6 +817,17 @@ out:
        return NULL;
 }
 
+struct arm_pmu *armpmu_alloc(void)
+{
+       return __armpmu_alloc(GFP_KERNEL);
+}
+
+struct arm_pmu *armpmu_alloc_atomic(void)
+{
+       return __armpmu_alloc(GFP_ATOMIC);
+}
+
+
 void armpmu_free(struct arm_pmu *pmu)
 {
        free_percpu(pmu->hw_events);
index 705f1a390e3123a2d305e0468cec8194d972ed39..30c5f2bbce59a3a040800b497ccb47f23db77c9b 100644 (file)
@@ -127,7 +127,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
                return pmu;
        }
 
-       pmu = armpmu_alloc();
+       pmu = armpmu_alloc_atomic();
        if (!pmu) {
                pr_warn("Unable to allocate PMU for CPU%d\n",
                        smp_processor_id());
index 899bc7ef08812e0ff9a2fae60aec99437c46d7d9..1f8bb83ef42fcbcdc1d2845771161209beae4380 100644 (file)
@@ -157,6 +157,7 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
+struct arm_pmu *armpmu_alloc_atomic(void);
 void armpmu_free(struct arm_pmu *pmu);
 int armpmu_register(struct arm_pmu *pmu);
 int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);