Skip to content

Commit 4cd64e9

Browse files
ickle-intelnirmoy
authored andcommitted
drm/i915: Lift runtime-pm acquire callbacks out of intel_wakeref.mutex
When runtime pm is first woken, it will synchronously call the registered callbacks for the device. These callbacks may pull in their own forest of locks, which we do not want to conflate with the intel_wakeref.mutex. A second minor benefit to reducing the coverage of the mutex, is that it will reduce contention for frequent sleeps and wakes (such as when being used for soft-rc6). v2: remove usage of fetch_and_zero() and other improvements(Jani) Signed-off-by: Chris Wilson <chris.p.wilson@intel.com> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230926083742.14740-2-nirmoy.das@intel.com
1 parent 0951dce commit 4cd64e9

1 file changed

Lines changed: 27 additions & 25 deletions

File tree

drivers/gpu/drm/i915/intel_wakeref.c

Lines changed: 27 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -10,63 +10,65 @@
1010
#include "intel_wakeref.h"
1111
#include "i915_drv.h"
1212

13-
static void rpm_get(struct intel_wakeref *wf)
14-
{
15-
wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
16-
}
17-
18-
static void rpm_put(struct intel_wakeref *wf)
19-
{
20-
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
21-
22-
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
23-
INTEL_WAKEREF_BUG_ON(!wakeref);
24-
}
25-
2613
int __intel_wakeref_get_first(struct intel_wakeref *wf)
2714
{
15+
intel_wakeref_t wakeref;
16+
int ret = 0;
17+
18+
wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
2819
/*
2920
* Treat get/put as different subclasses, as we may need to run
3021
* the put callback from under the shrinker and do not want to
3122
* cross-contanimate that callback with any extra work performed
3223
* upon acquiring the wakeref.
3324
*/
3425
mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
35-
if (!atomic_read(&wf->count)) {
36-
int err;
37-
38-
rpm_get(wf);
3926

40-
err = wf->ops->get(wf);
41-
if (unlikely(err)) {
42-
rpm_put(wf);
43-
mutex_unlock(&wf->mutex);
44-
return err;
27+
if (!atomic_read(&wf->count)) {
28+
INTEL_WAKEREF_BUG_ON(wf->wakeref);
29+
wf->wakeref = wakeref;
30+
wakeref = 0;
31+
32+
ret = wf->ops->get(wf);
33+
if (ret) {
34+
wakeref = xchg(&wf->wakeref, 0);
35+
wake_up_var(&wf->wakeref);
36+
goto unlock;
4537
}
4638

4739
smp_mb__before_atomic(); /* release wf->count */
4840
}
41+
4942
atomic_inc(&wf->count);
43+
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
44+
45+
unlock:
5046
mutex_unlock(&wf->mutex);
47+
if (unlikely(wakeref))
48+
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
5149

52-
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
53-
return 0;
50+
return ret;
5451
}
5552

5653
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
5754
{
55+
intel_wakeref_t wakeref = 0;
56+
5857
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
5958
if (unlikely(!atomic_dec_and_test(&wf->count)))
6059
goto unlock;
6160

6261
/* ops->put() must reschedule its own release on error/deferral */
6362
if (likely(!wf->ops->put(wf))) {
64-
rpm_put(wf);
63+
INTEL_WAKEREF_BUG_ON(!wf->wakeref);
64+
wakeref = xchg(&wf->wakeref, 0);
6565
wake_up_var(&wf->wakeref);
6666
}
6767

6868
unlock:
6969
mutex_unlock(&wf->mutex);
70+
if (wakeref)
71+
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
7072
}
7173

7274
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)

0 commit comments

Comments
 (0)