Commit dde20266 authored by Ying Xue's avatar Ying Xue Committed by David S. Miller

tipc: use node list lock to protect tipc_num_links variable

Without properly implicit or explicit read memory barrier, it's
unsafe to read an atomic variable with atomic_read() from another
thread which is different with the thread of changing the atomic
variable with atomic_inc() or atomic_dec(). So a stale tipc_num_links
may be got with atomic_read() in tipc_node_get_links(). If the
tipc_num_links variable type is converted from atomic to unsigned
integer and node list lock is used to protect it, the issue would
be avoided.
Signed-off-by: default avatarYing Xue <ying.xue@windriver.com>
Reviewed-by: default avatarErik Hugne <erik.hugne@ericsson.com>
Reviewed-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2220646a
...@@ -47,10 +47,9 @@ static void node_established_contact(struct tipc_node *n_ptr); ...@@ -47,10 +47,9 @@ static void node_established_contact(struct tipc_node *n_ptr);
static struct hlist_head node_htable[NODE_HTABLE_SIZE]; static struct hlist_head node_htable[NODE_HTABLE_SIZE];
LIST_HEAD(tipc_node_list); LIST_HEAD(tipc_node_list);
static u32 tipc_num_nodes; static u32 tipc_num_nodes;
static u32 tipc_num_links;
static DEFINE_SPINLOCK(node_list_lock); static DEFINE_SPINLOCK(node_list_lock);
static atomic_t tipc_num_links = ATOMIC_INIT(0);
/* /*
* A trivial power-of-two bitmask technique is used for speed, since this * A trivial power-of-two bitmask technique is used for speed, since this
* operation is done for every incoming TIPC packet. The number of hash table * operation is done for every incoming TIPC packet. The number of hash table
...@@ -241,7 +240,9 @@ int tipc_node_is_up(struct tipc_node *n_ptr) ...@@ -241,7 +240,9 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{ {
n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
atomic_inc(&tipc_num_links); spin_lock_bh(&node_list_lock);
tipc_num_links++;
spin_unlock_bh(&node_list_lock);
n_ptr->link_cnt++; n_ptr->link_cnt++;
} }
...@@ -253,7 +254,9 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) ...@@ -253,7 +254,9 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
if (l_ptr != n_ptr->links[i]) if (l_ptr != n_ptr->links[i])
continue; continue;
n_ptr->links[i] = NULL; n_ptr->links[i] = NULL;
atomic_dec(&tipc_num_links); spin_lock_bh(&node_list_lock);
tipc_num_links--;
spin_unlock_bh(&node_list_lock);
n_ptr->link_cnt--; n_ptr->link_cnt--;
} }
} }
...@@ -393,18 +396,17 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) ...@@ -393,18 +396,17 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
spin_lock_bh(&node_list_lock); spin_lock_bh(&node_list_lock);
/* Get space for all unicast links + broadcast link */ /* Get space for all unicast links + broadcast link */
payload_size = TLV_SPACE(sizeof(link_info)) * payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
(atomic_read(&tipc_num_links) + 1);
if (payload_size > 32768u) { if (payload_size > 32768u) {
spin_unlock_bh(&node_list_lock); spin_unlock_bh(&node_list_lock);
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (too many links)"); " (too many links)");
} }
spin_unlock_bh(&node_list_lock);
buf = tipc_cfg_reply_alloc(payload_size); buf = tipc_cfg_reply_alloc(payload_size);
if (!buf) { if (!buf)
spin_unlock_bh(&node_list_lock);
return NULL; return NULL;
}
/* Add TLV for broadcast link */ /* Add TLV for broadcast link */
link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
...@@ -432,6 +434,5 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) ...@@ -432,6 +434,5 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
tipc_node_unlock(n_ptr); tipc_node_unlock(n_ptr);
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_bh(&node_list_lock);
return buf; return buf;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment