When a ring-buffer is memory mapped by user-space, no trace or
ring-buffer swap is possible. This means the snapshot feature is
mutually exclusive with the memory mapping. Having a refcount on
snapshot users will help to know if a mapping is possible or not.

Instead of relying on the global trace_types_lock, a new spinlock is
introduced to serialize accesses to trace_array->snapshot. This intends
to allow access to that variable in a context where the mmap lock is
already held.

Signed-off-by: Vincent Donnefort <vdonnef...@google.com>

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2a7c6fd934e9..4ebf4d0bd14c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1300,6 +1300,50 @@ static void free_snapshot(struct trace_array *tr)
        tr->allocated_snapshot = false;
 }
 
+static int tracing_arm_snapshot_locked(struct trace_array *tr)
+{
+       int ret;
+
+       lockdep_assert_held(&trace_types_lock);
+
+       spin_lock(&tr->snapshot_trigger_lock);
+       if (tr->snapshot == UINT_MAX) {
+               spin_unlock(&tr->snapshot_trigger_lock);
+               return -EBUSY;
+       }
+
+       tr->snapshot++;
+       spin_unlock(&tr->snapshot_trigger_lock);
+
+       ret = tracing_alloc_snapshot_instance(tr);
+       if (ret) {
+               spin_lock(&tr->snapshot_trigger_lock);
+               tr->snapshot--;
+               spin_unlock(&tr->snapshot_trigger_lock);
+       }
+
+       return ret;
+}
+
+int tracing_arm_snapshot(struct trace_array *tr)
+{
+       int ret;
+
+       mutex_lock(&trace_types_lock);
+       ret = tracing_arm_snapshot_locked(tr);
+       mutex_unlock(&trace_types_lock);
+
+       return ret;
+}
+
+void tracing_disarm_snapshot(struct trace_array *tr)
+{
+       spin_lock(&tr->snapshot_trigger_lock);
+       if (!WARN_ON(!tr->snapshot))
+               tr->snapshot--;
+       spin_unlock(&tr->snapshot_trigger_lock);
+}
+
 /**
  * tracing_alloc_snapshot - allocate snapshot buffer.
  *
@@ -1373,10 +1417,6 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, 
void *cond_data,
 
        mutex_lock(&trace_types_lock);
 
-       ret = tracing_alloc_snapshot_instance(tr);
-       if (ret)
-               goto fail_unlock;
-
        if (tr->current_trace->use_max_tr) {
                ret = -EBUSY;
                goto fail_unlock;
@@ -1395,6 +1435,10 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, 
void *cond_data,
                goto fail_unlock;
        }
 
+       ret = tracing_arm_snapshot_locked(tr);
+       if (ret)
+               goto fail_unlock;
+
        local_irq_disable();
        arch_spin_lock(&tr->max_lock);
        tr->cond_snapshot = cond_snapshot;
@@ -1439,6 +1483,8 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
        arch_spin_unlock(&tr->max_lock);
        local_irq_enable();
 
+       tracing_disarm_snapshot(tr);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
@@ -1481,6 +1527,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
 }
 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 #define free_snapshot(tr)      do { } while (0)
+#define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 void tracer_tracing_off(struct trace_array *tr)
@@ -6593,11 +6640,12 @@ int tracing_set_tracer(struct trace_array *tr, const 
char *buf)
                 */
                synchronize_rcu();
                free_snapshot(tr);
+               tracing_disarm_snapshot(tr);
        }
 
-       if (t->use_max_tr && !tr->allocated_snapshot) {
-               ret = tracing_alloc_snapshot_instance(tr);
-               if (ret < 0)
+       if (t->use_max_tr) {
+               ret = tracing_arm_snapshot_locked(tr);
+               if (ret)
                        goto out;
        }
 #else
@@ -6606,8 +6654,13 @@ int tracing_set_tracer(struct trace_array *tr, const 
char *buf)
 
        if (t->init) {
                ret = tracer_init(t, tr);
-               if (ret)
+               if (ret) {
+#ifdef CONFIG_TRACER_MAX_TRACE
+                       if (t->use_max_tr)
+                               tracing_disarm_snapshot(tr);
+#endif
                        goto out;
+               }
        }
 
        tr->current_trace = t;
@@ -7709,10 +7762,11 @@ tracing_snapshot_write(struct file *filp, const char 
__user *ubuf, size_t cnt,
                if (tr->allocated_snapshot)
                        ret = resize_buffer_duplicate_size(&tr->max_buffer,
                                        &tr->array_buffer, iter->cpu_file);
-               else
-                       ret = tracing_alloc_snapshot_instance(tr);
-               if (ret < 0)
+
+               ret = tracing_arm_snapshot_locked(tr);
+               if (ret)
                        break;
+
                /* Now, we're going to swap */
                if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
                        local_irq_disable();
@@ -7722,6 +7776,7 @@ tracing_snapshot_write(struct file *filp, const char 
__user *ubuf, size_t cnt,
                        smp_call_function_single(iter->cpu_file, 
tracing_swap_cpu_buffer,
                                                 (void *)tr, 1);
                }
+               tracing_disarm_snapshot(tr);
                break;
        default:
                if (tr->allocated_snapshot) {
@@ -8846,8 +8901,13 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, 
struct ftrace_hash *hash,
 
        ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
 
-       if (glob[0] == '!')
-               return unregister_ftrace_function_probe_func(glob+1, tr, ops);
+       if (glob[0] == '!') {
+               ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
+               if (!ret)
+                       tracing_disarm_snapshot(tr);
+
+               return ret;
+       }
 
        if (!param)
                goto out_reg;
@@ -8866,12 +8926,13 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, 
struct ftrace_hash *hash,
                return ret;
 
  out_reg:
-       ret = tracing_alloc_snapshot_instance(tr);
+       ret = tracing_arm_snapshot(tr);
        if (ret < 0)
                goto out;
 
        ret = register_ftrace_function_probe(glob, tr, ops, count);
-
+       if (ret < 0)
+               tracing_disarm_snapshot(tr);
  out:
        return ret < 0 ? ret : 0;
 }
@@ -9678,7 +9739,9 @@ trace_array_create_systems(const char *name, const char 
*systems)
        raw_spin_lock_init(&tr->start_lock);
 
        tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-
+#ifdef CONFIG_TRCER_MAX_TRACE
+       spinlock_init(&tr->snapshot_trigger_lock);
+#endif
        tr->current_trace = &nop_trace;
 
        INIT_LIST_HEAD(&tr->systems);
@@ -10648,7 +10711,9 @@ __init static int tracer_alloc_buffers(void)
        global_trace.current_trace = &nop_trace;
 
        global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-
+#ifdef CONFIG_TRACER_MAX_TRACE
+       spin_lock_init(&global_trace.snapshot_trigger_lock);
+#endif
        ftrace_init_global_array_ops(&global_trace);
 
        init_trace_flags_index(&global_trace);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 00f873910c5d..bd312e9afe25 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -334,8 +334,8 @@ struct trace_array {
         */
        struct array_buffer     max_buffer;
        bool                    allocated_snapshot;
-#endif
-#ifdef CONFIG_TRACER_MAX_TRACE
+       spinlock_t              snapshot_trigger_lock;
+       unsigned int            snapshot;
        unsigned long           max_latency;
 #ifdef CONFIG_FSNOTIFY
        struct dentry           *d_max_latency;
@@ -1973,12 +1973,16 @@ static inline void trace_event_eval_update(struct 
trace_eval_map **map, int len)
 #ifdef CONFIG_TRACER_SNAPSHOT
 void tracing_snapshot_instance(struct trace_array *tr);
 int tracing_alloc_snapshot_instance(struct trace_array *tr);
+int tracing_arm_snapshot(struct trace_array *tr);
+void tracing_disarm_snapshot(struct trace_array *tr);
 #else
 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
 {
        return 0;
 }
+static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; }
+static inline void tracing_disarm_snapshot(struct trace_array *tr) { }
 #endif
 
 #ifdef CONFIG_PREEMPT_TRACER
diff --git a/kernel/trace/trace_events_trigger.c 
b/kernel/trace/trace_events_trigger.c
index b33c3861fbbb..62e4f58b8671 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -597,20 +597,12 @@ static int register_trigger(char *glob,
        return ret;
 }
 
-/**
- * unregister_trigger - Generic event_command @unreg implementation
- * @glob: The raw string used to register the trigger
- * @test: Trigger-specific data used to find the trigger to remove
- * @file: The trace_event_file associated with the event
- *
- * Common implementation for event trigger unregistration.
- *
- * Usually used directly as the @unreg method in event command
- * implementations.
+/*
+ * True if the trigger was found and unregistered, else false.
  */
-static void unregister_trigger(char *glob,
-                              struct event_trigger_data *test,
-                              struct trace_event_file *file)
+static bool try_unregister_trigger(char *glob,
+                                  struct event_trigger_data *test,
+                                  struct trace_event_file *file)
 {
        struct event_trigger_data *data = NULL, *iter;
 
@@ -626,8 +618,32 @@ static void unregister_trigger(char *glob,
                }
        }
 
-       if (data && data->ops->free)
-               data->ops->free(data);
+       if (data) {
+               if (data->ops->free)
+                       data->ops->free(data);
+
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * unregister_trigger - Generic event_command @unreg implementation
+ * @glob: The raw string used to register the trigger
+ * @test: Trigger-specific data used to find the trigger to remove
+ * @file: The trace_event_file associated with the event
+ *
+ * Common implementation for event trigger unregistration.
+ *
+ * Usually used directly as the @unreg method in event command
+ * implementations.
+ */
+static void unregister_trigger(char *glob,
+                              struct event_trigger_data *test,
+                              struct trace_event_file *file)
+{
+       try_unregister_trigger(glob, test, file);
 }
 
 /*
@@ -1470,7 +1486,7 @@ register_snapshot_trigger(char *glob,
                          struct event_trigger_data *data,
                          struct trace_event_file *file)
 {
-       int ret = tracing_alloc_snapshot_instance(file->tr);
+       int ret = tracing_arm_snapshot(file->tr);
 
        if (ret < 0)
                return ret;
@@ -1478,6 +1494,14 @@ register_snapshot_trigger(char *glob,
        return register_trigger(glob, data, file);
 }
 
+static void unregister_snapshot_trigger(char *glob,
+                                       struct event_trigger_data *data,
+                                       struct trace_event_file *file)
+{
+       if (try_unregister_trigger(glob, data, file))
+               tracing_disarm_snapshot(file->tr);
+}
+
 static int
 snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
 {
@@ -1510,7 +1534,7 @@ static struct event_command trigger_snapshot_cmd = {
        .trigger_type           = ETT_SNAPSHOT,
        .parse                  = event_trigger_parse,
        .reg                    = register_snapshot_trigger,
-       .unreg                  = unregister_trigger,
+       .unreg                  = unregister_snapshot_trigger,
        .get_trigger_ops        = snapshot_get_trigger_ops,
        .set_filter             = set_trigger_filter,
 };
-- 
2.43.0.687.g38aa6559b0-goog


Reply via email to