ring-buffer: Add nesting for adding events within events
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 7 Feb 2018 22:26:32 +0000 (17:26 -0500)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Sat, 10 Mar 2018 21:06:04 +0000 (16:06 -0500)
The ring-buffer code has recusion protection in case tracing ends up tracing
itself, the ring-buffer will detect that it was called at the same context
(normal, softirq, interrupt or NMI), and not continue to record the event.

With the histogram synthetic events, they are called while tracing another
event at the same context. The recusion protection triggers because it
detects tracing at the same context and stops it.

Add ring_buffer_nest_start() and ring_buffer_nest_end() that will notify the
ring buffer that a trace is about to happen within another trace and that it
is intended, and not to trigger the recursion blocking.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
include/linux/ring_buffer.h
kernel/trace/ring_buffer.c

index 7cb84774c20d76c9b72856b4f9851c0cc775a52b..a0233edc0718e66728d54060c78a0bf52b366e3e 100644 (file)
@@ -117,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
 int ring_buffer_write(struct ring_buffer *buffer,
                      unsigned long length, void *data);
 
+void ring_buffer_nest_start(struct ring_buffer *buffer);
+void ring_buffer_nest_end(struct ring_buffer *buffer);
+
 struct ring_buffer_event *
 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
                 unsigned long *lost_events);
index 33073cdebb269606869b85a33c7c2cf130322fd8..a2fd3893cc02a7e42b3626cefeba608e21cb1c0a 100644 (file)
@@ -477,6 +477,7 @@ struct ring_buffer_per_cpu {
        struct buffer_page              *reader_page;
        unsigned long                   lost_events;
        unsigned long                   last_overrun;
+       unsigned long                   nest;
        local_t                         entries_bytes;
        local_t                         entries;
        local_t                         overrun;
@@ -2624,10 +2625,10 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
                bit = pc & NMI_MASK ? RB_CTX_NMI :
                        pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
 
-       if (unlikely(val & (1 << bit)))
+       if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
                return 1;
 
-       val |= (1 << bit);
+       val |= (1 << (bit + cpu_buffer->nest));
        cpu_buffer->current_context = val;
 
        return 0;
@@ -2636,7 +2637,57 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+       cpu_buffer->current_context &=
+               cpu_buffer->current_context - (1 << cpu_buffer->nest);
+}
+
+/* The recursive locking above uses 4 bits */
+#define NESTED_BITS 4
+
+/**
+ * ring_buffer_nest_start - Allow to trace while nested
+ * @buffer: The ring buffer to modify
+ *
+ * The ring buffer has a safty mechanism to prevent recursion.
+ * But there may be a case where a trace needs to be done while
+ * tracing something else. In this case, calling this function
+ * will allow this function to nest within a currently active
+ * ring_buffer_lock_reserve().
+ *
+ * Call this function before calling another ring_buffer_lock_reserve() and
+ * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
+ */
+void ring_buffer_nest_start(struct ring_buffer *buffer)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       int cpu;
+
+       /* Enabled by ring_buffer_nest_end() */
+       preempt_disable_notrace();
+       cpu = raw_smp_processor_id();
+       cpu_buffer = buffer->buffers[cpu];
+       /* This is the shift value for the above recusive locking */
+       cpu_buffer->nest += NESTED_BITS;
+}
+
+/**
+ * ring_buffer_nest_end - Allow to trace while nested
+ * @buffer: The ring buffer to modify
+ *
+ * Must be called after ring_buffer_nest_start() and after the
+ * ring_buffer_unlock_commit().
+ */
+void ring_buffer_nest_end(struct ring_buffer *buffer)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       int cpu;
+
+       /* disabled by ring_buffer_nest_start() */
+       cpu = raw_smp_processor_id();
+       cpu_buffer = buffer->buffers[cpu];
+       /* This is the shift value for the above recusive locking */
+       cpu_buffer->nest -= NESTED_BITS;
+       preempt_enable_notrace();
 }
 
 /**