perf: Factor out event accounting code to account_event()/__free_event()
authorFrederic Weisbecker <fweisbec@gmail.com>
Tue, 23 Jul 2013 00:31:01 +0000 (02:31 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 30 Jul 2013 20:29:12 +0000 (22:29 +0200)
Gather all the event accounting code to a single place,
once all the prerequisites are completed. This simplifies
the refcounting.

Original-patch-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-4-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/events/core.c

index 3b998626b7a0c3621b1769dc07d6b069c39ac19b..158fd5789e58b76580ca4cfb176adf58ac2fcf7a 100644 (file)
@@ -3128,6 +3128,21 @@ static void free_event_rcu(struct rcu_head *head)
 static void ring_buffer_put(struct ring_buffer *rb);
 static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
 
+static void __free_event(struct perf_event *event)
+{
+       if (!event->parent) {
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
+       }
+
+       if (event->destroy)
+               event->destroy(event);
+
+       if (event->ctx)
+               put_ctx(event->ctx);
+
+       call_rcu(&event->rcu_head, free_event_rcu);
+}
 static void free_event(struct perf_event *event)
 {
        irq_work_sync(&event->pending);
@@ -3141,8 +3156,6 @@ static void free_event(struct perf_event *event)
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
-               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
-                       put_callchain_buffers();
                if (is_cgroup_event(event)) {
                        atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
                        static_key_slow_dec_deferred(&perf_sched_events);
@@ -3180,13 +3193,8 @@ static void free_event(struct perf_event *event)
        if (is_cgroup_event(event))
                perf_detach_cgroup(event);
 
-       if (event->destroy)
-               event->destroy(event);
-
-       if (event->ctx)
-               put_ctx(event->ctx);
 
-       call_rcu(&event->rcu_head, free_event_rcu);
+       __free_event(event);
 }
 
 int perf_event_release_kernel(struct perf_event *event)
@@ -6443,6 +6451,29 @@ unlock:
        return pmu;
 }
 
+static void account_event(struct perf_event *event)
+{
+       if (event->attach_state & PERF_ATTACH_TASK)
+               static_key_slow_inc(&perf_sched_events.key);
+       if (event->attr.mmap || event->attr.mmap_data)
+               atomic_inc(&nr_mmap_events);
+       if (event->attr.comm)
+               atomic_inc(&nr_comm_events);
+       if (event->attr.task)
+               atomic_inc(&nr_task_events);
+       if (has_branch_stack(event)) {
+               static_key_slow_inc(&perf_sched_events.key);
+               if (!(event->attach_state & PERF_ATTACH_TASK))
+                       atomic_inc(&per_cpu(perf_branch_stack_events,
+                                           event->cpu));
+       }
+
+       if (is_cgroup_event(event)) {
+               atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
+               static_key_slow_inc(&perf_sched_events.key);
+       }
+}
+
 /*
  * Allocate and initialize a event structure
  */
@@ -6556,21 +6587,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                        if (err)
                                goto err_pmu;
                }
-
-               if (event->attach_state & PERF_ATTACH_TASK)
-                       static_key_slow_inc(&perf_sched_events.key);
-               if (event->attr.mmap || event->attr.mmap_data)
-                       atomic_inc(&nr_mmap_events);
-               if (event->attr.comm)
-                       atomic_inc(&nr_comm_events);
-               if (event->attr.task)
-                       atomic_inc(&nr_task_events);
-               if (has_branch_stack(event)) {
-                       static_key_slow_inc(&perf_sched_events.key);
-                       if (!(event->attach_state & PERF_ATTACH_TASK))
-                               atomic_inc(&per_cpu(perf_branch_stack_events,
-                                                   event->cpu));
-               }
        }
 
        return event;
@@ -6865,17 +6881,14 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (flags & PERF_FLAG_PID_CGROUP) {
                err = perf_cgroup_connect(pid, event, &attr, group_leader);
-               if (err)
-                       goto err_alloc;
-               /*
-                * one more event:
-                * - that has cgroup constraint on event->cpu
-                * - that may need work on context switch
-                */
-               atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
-               static_key_slow_inc(&perf_sched_events.key);
+               if (err) {
+                       __free_event(event);
+                       goto err_task;
+               }
        }
 
+       account_event(event);
+
        /*
         * Special case software events and allow them to be part of
         * any hardware group.
@@ -7071,6 +7084,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
                goto err;
        }
 
+       account_event(event);
+
        ctx = find_get_context(event->pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);