Commit 0a7e0d0e authored by Bjorn Andersson's avatar Bjorn Andersson Committed by David S. Miller

net: qrtr: Migrate node lookup tree to spinlock

Move operations on the qrtr_nodes radix tree under a separate spinlock
and make the qrtr_nodes tree GFP_ATOMIC, to allow operation from atomic
context in a subsequent patch.
Signed-off-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5fdeb0d3
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/qrtr.h> #include <linux/qrtr.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/spinlock.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <net/sock.h> #include <net/sock.h>
...@@ -98,10 +99,11 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk) ...@@ -98,10 +99,11 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
static unsigned int qrtr_local_nid = NUMA_NO_NODE; static unsigned int qrtr_local_nid = NUMA_NO_NODE;
/* for node ids */ /* for node ids */
static RADIX_TREE(qrtr_nodes, GFP_KERNEL); static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
static DEFINE_SPINLOCK(qrtr_nodes_lock);
/* broadcast list */ /* broadcast list */
static LIST_HEAD(qrtr_all_nodes); static LIST_HEAD(qrtr_all_nodes);
/* lock for qrtr_nodes, qrtr_all_nodes and node reference */ /* lock for qrtr_all_nodes and node reference */
static DEFINE_MUTEX(qrtr_node_lock); static DEFINE_MUTEX(qrtr_node_lock);
/* local port allocation management */ /* local port allocation management */
...@@ -165,10 +167,13 @@ static void __qrtr_node_release(struct kref *kref) ...@@ -165,10 +167,13 @@ static void __qrtr_node_release(struct kref *kref)
{ {
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
struct radix_tree_iter iter; struct radix_tree_iter iter;
unsigned long flags;
void __rcu **slot; void __rcu **slot;
spin_lock_irqsave(&qrtr_nodes_lock, flags);
if (node->nid != QRTR_EP_NID_AUTO) if (node->nid != QRTR_EP_NID_AUTO)
radix_tree_delete(&qrtr_nodes, node->nid); radix_tree_delete(&qrtr_nodes, node->nid);
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
list_del(&node->item); list_del(&node->item);
mutex_unlock(&qrtr_node_lock); mutex_unlock(&qrtr_node_lock);
...@@ -376,11 +381,12 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, ...@@ -376,11 +381,12 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
static struct qrtr_node *qrtr_node_lookup(unsigned int nid) static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
{ {
struct qrtr_node *node; struct qrtr_node *node;
unsigned long flags;
mutex_lock(&qrtr_node_lock); spin_lock_irqsave(&qrtr_nodes_lock, flags);
node = radix_tree_lookup(&qrtr_nodes, nid); node = radix_tree_lookup(&qrtr_nodes, nid);
node = qrtr_node_acquire(node); node = qrtr_node_acquire(node);
mutex_unlock(&qrtr_node_lock); spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
return node; return node;
} }
...@@ -392,13 +398,15 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid) ...@@ -392,13 +398,15 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
*/ */
static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
{ {
unsigned long flags;
if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO) if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
return; return;
mutex_lock(&qrtr_node_lock); spin_lock_irqsave(&qrtr_nodes_lock, flags);
radix_tree_insert(&qrtr_nodes, nid, node); radix_tree_insert(&qrtr_nodes, nid, node);
node->nid = nid; node->nid = nid;
mutex_unlock(&qrtr_node_lock); spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment