Commit 0a13cd1a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic, kref: Implement kref_put_lock()

Because home-rolling your own is _awesome_, stop doing it. Provide
kref_put_lock(), just like kref_put_mutex() but for a spinlock.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 23b19ec3
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/spinlock.h>
struct kref { struct kref {
atomic_t refcount; atomic_t refcount;
...@@ -86,12 +87,21 @@ static inline int kref_put_mutex(struct kref *kref, ...@@ -86,12 +87,21 @@ static inline int kref_put_mutex(struct kref *kref,
struct mutex *lock) struct mutex *lock)
{ {
WARN_ON(release == NULL); WARN_ON(release == NULL);
if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
mutex_lock(lock); if (atomic_dec_and_mutex_lock(&kref->refcount, lock)) {
if (unlikely(!atomic_dec_and_test(&kref->refcount))) { release(kref);
mutex_unlock(lock); return 1;
return 0; }
} return 0;
}
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
WARN_ON(release == NULL);
if (atomic_dec_and_lock(&kref->refcount, lock)) {
release(kref); release(kref);
return 1; return 1;
} }
......
...@@ -126,13 +126,18 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister); ...@@ -126,13 +126,18 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister);
static struct hlist_head auth_domain_table[DN_HASHMAX]; static struct hlist_head auth_domain_table[DN_HASHMAX];
static DEFINE_SPINLOCK(auth_domain_lock); static DEFINE_SPINLOCK(auth_domain_lock);
static void auth_domain_release(struct kref *kref)
{
struct auth_domain *dom = container_of(kref, struct auth_domain, ref);
hlist_del(&dom->hash);
dom->flavour->domain_release(dom);
spin_unlock(&auth_domain_lock);
}
void auth_domain_put(struct auth_domain *dom) void auth_domain_put(struct auth_domain *dom)
{ {
if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) { kref_put_lock(&dom->ref, auth_domain_release, &auth_domain_lock);
hlist_del(&dom->hash);
dom->flavour->domain_release(dom);
spin_unlock(&auth_domain_lock);
}
} }
EXPORT_SYMBOL_GPL(auth_domain_put); EXPORT_SYMBOL_GPL(auth_domain_put);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment