drm/i915: Introduce struct intel_wakeref
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 24 Apr 2019 20:07:13 +0000 (21:07 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 24 Apr 2019 21:25:26 +0000 (22:25 +0100)
For controlling runtime pm of the GT and engines, we would like to have
a callback to do extra work the first time we wake up and the last time
we drop the wakeref. This first/last access needs serialisation and so
we encompass a mutex with the regular intel_wakeref_t tracker.

v2: Drop the _once naming and report the errors.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc; Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/Makefile.header-test
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_wakeref.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_wakeref.h [new file with mode: 0644]

index 40130cf5c003115dde28c44e37f6f870fc68de5c..233bad5e361f7cbb1342bc11f02b5d64f7e6d62a 100644 (file)
@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
          intel_device_info.o \
          intel_pm.o \
          intel_runtime_pm.o \
+         intel_wakeref.o \
          intel_uncore.o
 
 # core library code
index 96a5d90629ecb6e64db45bf970cc1b4f22d27c0a..e6b3e7588860b2e23cbe13b24973f52144d0b764 100644 (file)
@@ -31,7 +31,8 @@ header_test := \
        intel_psr.h \
        intel_sdvo.h \
        intel_sprite.h \
-       intel_tv.h
+       intel_tv.h \
+       intel_wakeref.h
 
 quiet_cmd_header_test = HDRTEST $@
       cmd_header_test = echo "\#include \"$(<F)\"" > $@
index d37832ffb4718124e961979c05f316650ac2f5a5..437e394d9fa626f0214c1660b154e078e716813d 100644 (file)
@@ -74,6 +74,7 @@
 #include "intel_opregion.h"
 #include "intel_uc.h"
 #include "intel_uncore.h"
+#include "intel_wakeref.h"
 #include "intel_wopcm.h"
 
 #include "i915_gem.h"
@@ -134,8 +135,6 @@ bool i915_error_injected(void);
        __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
                      fmt, ##__VA_ARGS__)
 
-typedef depot_stack_handle_t intel_wakeref_t;
-
 enum hpd_pin {
        HPD_NONE = 0,
        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
new file mode 100644 (file)
index 0000000..1f94bc4
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_drv.h"
+#include "intel_wakeref.h"
+
+int __intel_wakeref_get_first(struct drm_i915_private *i915,
+                             struct intel_wakeref *wf,
+                             int (*fn)(struct intel_wakeref *wf))
+{
+       /*
+        * Treat get/put as different subclasses, as we may need to run
+        * the put callback from under the shrinker and do not want to
+        * cross-contanimate that callback with any extra work performed
+        * upon acquiring the wakeref.
+        */
+       mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
+       if (!atomic_read(&wf->count)) {
+               int err;
+
+               wf->wakeref = intel_runtime_pm_get(i915);
+
+               err = fn(wf);
+               if (unlikely(err)) {
+                       intel_runtime_pm_put(i915, wf->wakeref);
+                       mutex_unlock(&wf->mutex);
+                       return err;
+               }
+
+               smp_mb__before_atomic(); /* release wf->count */
+       }
+       atomic_inc(&wf->count);
+       mutex_unlock(&wf->mutex);
+
+       return 0;
+}
+
+int __intel_wakeref_put_last(struct drm_i915_private *i915,
+                            struct intel_wakeref *wf,
+                            int (*fn)(struct intel_wakeref *wf))
+{
+       int err;
+
+       err = fn(wf);
+       if (likely(!err))
+               intel_runtime_pm_put(i915, wf->wakeref);
+       else
+               atomic_inc(&wf->count);
+       mutex_unlock(&wf->mutex);
+
+       return err;
+}
+
+void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+{
+       __mutex_init(&wf->mutex, "wakeref", key);
+       atomic_set(&wf->count, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
new file mode 100644 (file)
index 0000000..a979d63
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_WAKEREF_H
+#define INTEL_WAKEREF_H
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/stackdepot.h>
+
+struct drm_i915_private;
+
+typedef depot_stack_handle_t intel_wakeref_t;
+
+struct intel_wakeref {
+       atomic_t count;
+       struct mutex mutex;
+       intel_wakeref_t wakeref;
+};
+
+void __intel_wakeref_init(struct intel_wakeref *wf,
+                         struct lock_class_key *key);
+#define intel_wakeref_init(wf) do {                                    \
+       static struct lock_class_key __key;                             \
+                                                                       \
+       __intel_wakeref_init((wf), &__key);                             \
+} while (0)
+
+int __intel_wakeref_get_first(struct drm_i915_private *i915,
+                             struct intel_wakeref *wf,
+                             int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_put_last(struct drm_i915_private *i915,
+                            struct intel_wakeref *wf,
+                            int (*fn)(struct intel_wakeref *wf));
+
+/**
+ * intel_wakeref_get: Acquire the wakeref
+ * @i915: the drm_i915_private device
+ * @wf: the wakeref
+ * @fn: callback for acquired the wakeref, called only on first acquire.
+ *
+ * Acquire a hold on the wakeref. The first user to do so, will acquire
+ * the runtime pm wakeref and then call the @fn underneath the wakeref
+ * mutex.
+ *
+ * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
+ * will be released and the acquisition unwound, and an error reported.
+ *
+ * Returns: 0 if the wakeref was acquired successfully, or a negative error
+ * code otherwise.
+ */
+static inline int
+intel_wakeref_get(struct drm_i915_private *i915,
+                 struct intel_wakeref *wf,
+                 int (*fn)(struct intel_wakeref *wf))
+{
+       if (unlikely(!atomic_inc_not_zero(&wf->count)))
+               return __intel_wakeref_get_first(i915, wf, fn);
+
+       return 0;
+}
+
+/**
+ * intel_wakeref_put: Release the wakeref
+ * @i915: the drm_i915_private device
+ * @wf: the wakeref
+ * @fn: callback for releasing the wakeref, called only on final release.
+ *
+ * Release our hold on the wakeref. When there are no more users,
+ * the runtime pm wakeref will be released after the @fn callback is called
+ * underneath the wakeref mutex.
+ *
+ * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
+ * is retained and an error reported.
+ *
+ * Returns: 0 if the wakeref was released successfully, or a negative error
+ * code otherwise.
+ */
+static inline int
+intel_wakeref_put(struct drm_i915_private *i915,
+                 struct intel_wakeref *wf,
+                 int (*fn)(struct intel_wakeref *wf))
+{
+       if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
+               return __intel_wakeref_put_last(i915, wf, fn);
+
+       return 0;
+}
+
+/**
+ * intel_wakeref_lock: Lock the wakeref (mutex)
+ * @wf: the wakeref
+ *
+ * Locks the wakeref to prevent it being acquired or released. New users
+ * can still adjust the counter, but the wakeref itself (and callback)
+ * cannot be acquired or released.
+ */
+static inline void
+intel_wakeref_lock(struct intel_wakeref *wf)
+       __acquires(wf->mutex)
+{
+       mutex_lock(&wf->mutex);
+}
+
+/**
+ * intel_wakeref_unlock: Unlock the wakeref
+ * @wf: the wakeref
+ *
+ * Releases a previously acquired intel_wakeref_lock().
+ */
+static inline void
+intel_wakeref_unlock(struct intel_wakeref *wf)
+       __releases(wf->mutex)
+{
+       mutex_unlock(&wf->mutex);
+}
+
+/**
+ * intel_wakeref_active: Query whether the wakeref is currently held
+ * @wf: the wakeref
+ *
+ * Returns: true if the wakeref is currently held.
+ */
+static inline bool
+intel_wakeref_active(struct intel_wakeref *wf)
+{
+       return atomic_read(&wf->count);
+}
+
+#endif /* INTEL_WAKEREF_H */