Commit 9a6b201b authored by Joonas Lahtinen's avatar Joonas Lahtinen

Merge remote-tracking branch 'tip/locking/wwmutex' into drm-intel-gt-next

Needed by Maarten's series "drm/i915: Short-term pinning and async
eviction".

Link: https://lists.freedesktop.org/archives/intel-gfx/2021-September/277870.htmlSigned-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parents d0c56031 12235da8
...@@ -248,7 +248,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, ...@@ -248,7 +248,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
if (ctx->trylock_only) { if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx); lockdep_assert_held(&ctx->ww_ctx);
if (!ww_mutex_trylock(&lock->mutex)) if (!ww_mutex_trylock(&lock->mutex, NULL))
return -EBUSY; return -EBUSY;
else else
return 0; return 0;
......
...@@ -145,7 +145,7 @@ static inline int regulator_lock_nested(struct regulator_dev *rdev, ...@@ -145,7 +145,7 @@ static inline int regulator_lock_nested(struct regulator_dev *rdev,
mutex_lock(&regulator_nesting_mutex); mutex_lock(&regulator_nesting_mutex);
if (ww_ctx || !ww_mutex_trylock(&rdev->mutex)) { if (!ww_mutex_trylock(&rdev->mutex, ww_ctx)) {
if (rdev->mutex_owner == current) if (rdev->mutex_owner == current)
rdev->ref_cnt++; rdev->ref_cnt++;
else else
......
...@@ -173,7 +173,7 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, ...@@ -173,7 +173,7 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
*/ */
static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{ {
return ww_mutex_trylock(&obj->lock); return ww_mutex_trylock(&obj->lock, NULL);
} }
/** /**
......
...@@ -28,12 +28,10 @@ ...@@ -28,12 +28,10 @@
#ifndef CONFIG_PREEMPT_RT #ifndef CONFIG_PREEMPT_RT
#define WW_MUTEX_BASE mutex #define WW_MUTEX_BASE mutex
#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k) #define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k)
#define ww_mutex_base_trylock(l) mutex_trylock(l)
#define ww_mutex_base_is_locked(b) mutex_is_locked((b)) #define ww_mutex_base_is_locked(b) mutex_is_locked((b))
#else #else
#define WW_MUTEX_BASE rt_mutex #define WW_MUTEX_BASE rt_mutex
#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k) #define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k)
#define ww_mutex_base_trylock(l) rt_mutex_trylock(l)
#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex) #define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
#endif #endif
...@@ -339,17 +337,8 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, ...@@ -339,17 +337,8 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
extern void ww_mutex_unlock(struct ww_mutex *lock); extern void ww_mutex_unlock(struct ww_mutex *lock);
/** extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
* ww_mutex_trylock - tries to acquire the w/w mutex without acquire context struct ww_acquire_ctx *ctx);
* @lock: mutex to lock
*
* Trylocks a mutex without acquire context, so no deadlock detection is
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
*/
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
{
return ww_mutex_base_trylock(&lock->base);
}
/*** /***
* ww_mutex_destroy - mark a w/w mutex unusable * ww_mutex_destroy - mark a w/w mutex unusable
......
...@@ -94,6 +94,9 @@ static inline unsigned long __owner_flags(unsigned long owner) ...@@ -94,6 +94,9 @@ static inline unsigned long __owner_flags(unsigned long owner)
return owner & MUTEX_FLAGS; return owner & MUTEX_FLAGS;
} }
/*
* Returns: __mutex_owner(lock) on failure or NULL on success.
*/
static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
{ {
unsigned long owner, curr = (unsigned long)current; unsigned long owner, curr = (unsigned long)current;
...@@ -736,6 +739,44 @@ __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, ...@@ -736,6 +739,44 @@ __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
} }
/**
* ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
* @ww: mutex to lock
* @ww_ctx: optional w/w acquire context
*
* Trylocks a mutex with the optional acquire context; no deadlock detection is
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
*
* Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
* specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
{
if (!ww_ctx)
return mutex_trylock(&ww->base);
MUTEX_WARN_ON(ww->base.magic != &ww->base);
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
if (__mutex_trylock(&ww->base)) {
ww_mutex_set_context_fastpath(ww, ww_ctx);
mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
return 1;
}
return 0;
}
EXPORT_SYMBOL(ww_mutex_trylock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass) mutex_lock_nested(struct mutex *lock, unsigned int subclass)
......
...@@ -16,6 +16,15 @@ ...@@ -16,6 +16,15 @@
static DEFINE_WD_CLASS(ww_class); static DEFINE_WD_CLASS(ww_class);
struct workqueue_struct *wq; struct workqueue_struct *wq;
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
#define ww_acquire_init_noinject(a, b) do { \
ww_acquire_init((a), (b)); \
(a)->deadlock_inject_countdown = ~0U; \
} while (0)
#else
#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
#endif
struct test_mutex { struct test_mutex {
struct work_struct work; struct work_struct work;
struct ww_mutex mutex; struct ww_mutex mutex;
...@@ -36,7 +45,7 @@ static void test_mutex_work(struct work_struct *work) ...@@ -36,7 +45,7 @@ static void test_mutex_work(struct work_struct *work)
wait_for_completion(&mtx->go); wait_for_completion(&mtx->go);
if (mtx->flags & TEST_MTX_TRY) { if (mtx->flags & TEST_MTX_TRY) {
while (!ww_mutex_trylock(&mtx->mutex)) while (!ww_mutex_trylock(&mtx->mutex, NULL))
cond_resched(); cond_resched();
} else { } else {
ww_mutex_lock(&mtx->mutex, NULL); ww_mutex_lock(&mtx->mutex, NULL);
...@@ -109,19 +118,38 @@ static int test_mutex(void) ...@@ -109,19 +118,38 @@ static int test_mutex(void)
return 0; return 0;
} }
static int test_aa(void) static int test_aa(bool trylock)
{ {
struct ww_mutex mutex; struct ww_mutex mutex;
struct ww_acquire_ctx ctx; struct ww_acquire_ctx ctx;
int ret; int ret;
const char *from = trylock ? "trylock" : "lock";
ww_mutex_init(&mutex, &ww_class); ww_mutex_init(&mutex, &ww_class);
ww_acquire_init(&ctx, &ww_class); ww_acquire_init(&ctx, &ww_class);
ww_mutex_lock(&mutex, &ctx); if (!trylock) {
ret = ww_mutex_lock(&mutex, &ctx);
if (ret) {
pr_err("%s: initial lock failed!\n", __func__);
goto out;
}
} else {
if (!ww_mutex_trylock(&mutex, &ctx)) {
pr_err("%s: initial trylock failed!\n", __func__);
goto out;
}
}
if (ww_mutex_trylock(&mutex)) { if (ww_mutex_trylock(&mutex, NULL)) {
pr_err("%s: trylocked itself!\n", __func__); pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
ww_mutex_unlock(&mutex);
ret = -EINVAL;
goto out;
}
if (ww_mutex_trylock(&mutex, &ctx)) {
pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
ww_mutex_unlock(&mutex); ww_mutex_unlock(&mutex);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -129,17 +157,17 @@ static int test_aa(void) ...@@ -129,17 +157,17 @@ static int test_aa(void)
ret = ww_mutex_lock(&mutex, &ctx); ret = ww_mutex_lock(&mutex, &ctx);
if (ret != -EALREADY) { if (ret != -EALREADY) {
pr_err("%s: missed deadlock for recursing, ret=%d\n", pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
__func__, ret); __func__, ret, from);
if (!ret) if (!ret)
ww_mutex_unlock(&mutex); ww_mutex_unlock(&mutex);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
ww_mutex_unlock(&mutex);
ret = 0; ret = 0;
out: out:
ww_mutex_unlock(&mutex);
ww_acquire_fini(&ctx); ww_acquire_fini(&ctx);
return ret; return ret;
} }
...@@ -150,7 +178,7 @@ struct test_abba { ...@@ -150,7 +178,7 @@ struct test_abba {
struct ww_mutex b_mutex; struct ww_mutex b_mutex;
struct completion a_ready; struct completion a_ready;
struct completion b_ready; struct completion b_ready;
bool resolve; bool resolve, trylock;
int result; int result;
}; };
...@@ -160,8 +188,13 @@ static void test_abba_work(struct work_struct *work) ...@@ -160,8 +188,13 @@ static void test_abba_work(struct work_struct *work)
struct ww_acquire_ctx ctx; struct ww_acquire_ctx ctx;
int err; int err;
ww_acquire_init(&ctx, &ww_class); ww_acquire_init_noinject(&ctx, &ww_class);
ww_mutex_lock(&abba->b_mutex, &ctx); if (!abba->trylock)
ww_mutex_lock(&abba->b_mutex, &ctx);
else
WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
complete(&abba->b_ready); complete(&abba->b_ready);
wait_for_completion(&abba->a_ready); wait_for_completion(&abba->a_ready);
...@@ -181,7 +214,7 @@ static void test_abba_work(struct work_struct *work) ...@@ -181,7 +214,7 @@ static void test_abba_work(struct work_struct *work)
abba->result = err; abba->result = err;
} }
static int test_abba(bool resolve) static int test_abba(bool trylock, bool resolve)
{ {
struct test_abba abba; struct test_abba abba;
struct ww_acquire_ctx ctx; struct ww_acquire_ctx ctx;
...@@ -192,12 +225,18 @@ static int test_abba(bool resolve) ...@@ -192,12 +225,18 @@ static int test_abba(bool resolve)
INIT_WORK_ONSTACK(&abba.work, test_abba_work); INIT_WORK_ONSTACK(&abba.work, test_abba_work);
init_completion(&abba.a_ready); init_completion(&abba.a_ready);
init_completion(&abba.b_ready); init_completion(&abba.b_ready);
abba.trylock = trylock;
abba.resolve = resolve; abba.resolve = resolve;
schedule_work(&abba.work); schedule_work(&abba.work);
ww_acquire_init(&ctx, &ww_class); ww_acquire_init_noinject(&ctx, &ww_class);
ww_mutex_lock(&abba.a_mutex, &ctx); if (!trylock)
ww_mutex_lock(&abba.a_mutex, &ctx);
else
WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
complete(&abba.a_ready); complete(&abba.a_ready);
wait_for_completion(&abba.b_ready); wait_for_completion(&abba.b_ready);
...@@ -249,7 +288,7 @@ static void test_cycle_work(struct work_struct *work) ...@@ -249,7 +288,7 @@ static void test_cycle_work(struct work_struct *work)
struct ww_acquire_ctx ctx; struct ww_acquire_ctx ctx;
int err, erra = 0; int err, erra = 0;
ww_acquire_init(&ctx, &ww_class); ww_acquire_init_noinject(&ctx, &ww_class);
ww_mutex_lock(&cycle->a_mutex, &ctx); ww_mutex_lock(&cycle->a_mutex, &ctx);
complete(cycle->a_signal); complete(cycle->a_signal);
...@@ -581,7 +620,9 @@ static int stress(int nlocks, int nthreads, unsigned int flags) ...@@ -581,7 +620,9 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
static int __init test_ww_mutex_init(void) static int __init test_ww_mutex_init(void)
{ {
int ncpus = num_online_cpus(); int ncpus = num_online_cpus();
int ret; int ret, i;
printk(KERN_INFO "Beginning ww mutex selftests\n");
wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
if (!wq) if (!wq)
...@@ -591,17 +632,19 @@ static int __init test_ww_mutex_init(void) ...@@ -591,17 +632,19 @@ static int __init test_ww_mutex_init(void)
if (ret) if (ret)
return ret; return ret;
ret = test_aa(); ret = test_aa(false);
if (ret) if (ret)
return ret; return ret;
ret = test_abba(false); ret = test_aa(true);
if (ret) if (ret)
return ret; return ret;
ret = test_abba(true); for (i = 0; i < 4; i++) {
if (ret) ret = test_abba(i & 1, i & 2);
return ret; if (ret)
return ret;
}
ret = test_cycle(ncpus); ret = test_cycle(ncpus);
if (ret) if (ret)
...@@ -619,6 +662,7 @@ static int __init test_ww_mutex_init(void) ...@@ -619,6 +662,7 @@ static int __init test_ww_mutex_init(void)
if (ret) if (ret)
return ret; return ret;
printk(KERN_INFO "All ww mutex selftests passed\n");
return 0; return 0;
} }
......
...@@ -9,6 +9,31 @@ ...@@ -9,6 +9,31 @@
#define WW_RT #define WW_RT
#include "rtmutex.c" #include "rtmutex.c"
int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
{
struct rt_mutex *rtm = &lock->base;
if (!ww_ctx)
return rt_mutex_trylock(rtm);
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
if (__rt_mutex_trylock(&rtm->rtmutex)) {
ww_mutex_set_context_fastpath(lock, ww_ctx);
mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_);
return 1;
}
return 0;
}
EXPORT_SYMBOL(ww_mutex_trylock);
static int __sched static int __sched
__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx, __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
unsigned int state, unsigned long ip) unsigned int state, unsigned long ip)
......
...@@ -258,7 +258,7 @@ static void init_shared_classes(void) ...@@ -258,7 +258,7 @@ static void init_shared_classes(void)
#define WWAF(x) ww_acquire_fini(x) #define WWAF(x) ww_acquire_fini(x)
#define WWL(x, c) ww_mutex_lock(x, c) #define WWL(x, c) ww_mutex_lock(x, c)
#define WWT(x) ww_mutex_trylock(x) #define WWT(x) ww_mutex_trylock(x, NULL)
#define WWL1(x) ww_mutex_lock(x, NULL) #define WWL1(x) ww_mutex_lock(x, NULL)
#define WWU(x) ww_mutex_unlock(x) #define WWU(x) ww_mutex_unlock(x)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment