Commit 90305829 authored by Jukka Rissanen's avatar Jukka Rissanen Committed by Marcel Holtmann

Bluetooth: 6lowpan: Converting rwlocks to use RCU

The rwlocks are converted to use RCU. This helps performance as the
irq locks are not needed any more.
Signed-off-by: default avatarJukka Rissanen <jukka.rissanen@linux.intel.com>
Signed-off-by: default avatarMarcel Holtmann <marcel@holtmann.org>
parent da213f8e
...@@ -53,7 +53,7 @@ struct skb_cb { ...@@ -53,7 +53,7 @@ struct skb_cb {
* The list contains struct lowpan_dev elements. * The list contains struct lowpan_dev elements.
*/ */
static LIST_HEAD(bt_6lowpan_devices); static LIST_HEAD(bt_6lowpan_devices);
static DEFINE_RWLOCK(devices_lock); static DEFINE_SPINLOCK(devices_lock);
/* If psm is set to 0 (default value), then 6lowpan is disabled. /* If psm is set to 0 (default value), then 6lowpan is disabled.
* Other values are used to indicate a Protocol Service Multiplexer * Other values are used to indicate a Protocol Service Multiplexer
...@@ -67,6 +67,7 @@ static struct l2cap_chan *listen_chan; ...@@ -67,6 +67,7 @@ static struct l2cap_chan *listen_chan;
struct lowpan_peer { struct lowpan_peer {
struct list_head list; struct list_head list;
struct rcu_head rcu;
struct l2cap_chan *chan; struct l2cap_chan *chan;
/* peer addresses in various formats */ /* peer addresses in various formats */
...@@ -86,6 +87,13 @@ struct lowpan_dev { ...@@ -86,6 +87,13 @@ struct lowpan_dev {
struct delayed_work notify_peers; struct delayed_work notify_peers;
}; };
static inline void peer_free(struct rcu_head *head)
{
struct lowpan_peer *e = container_of(head, struct lowpan_peer, rcu);
kfree(e);
}
static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
{ {
return netdev_priv(netdev); return netdev_priv(netdev);
...@@ -93,13 +101,14 @@ static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) ...@@ -93,13 +101,14 @@ static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
{ {
list_add(&peer->list, &dev->peers); list_add_rcu(&peer->list, &dev->peers);
atomic_inc(&dev->peer_count); atomic_inc(&dev->peer_count);
} }
static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
{ {
list_del(&peer->list); list_del_rcu(&peer->list);
call_rcu(&peer->rcu, peer_free);
module_put(THIS_MODULE); module_put(THIS_MODULE);
...@@ -114,31 +123,37 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) ...@@ -114,31 +123,37 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
bdaddr_t *ba, __u8 type) bdaddr_t *ba, __u8 type)
{ {
struct lowpan_peer *peer, *tmp; struct lowpan_peer *peer;
BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count), BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
ba, type); ba, type);
list_for_each_entry_safe(peer, tmp, &dev->peers, list) { rcu_read_lock();
list_for_each_entry_rcu(peer, &dev->peers, list) {
BT_DBG("dst addr %pMR dst type %d", BT_DBG("dst addr %pMR dst type %d",
&peer->chan->dst, peer->chan->dst_type); &peer->chan->dst, peer->chan->dst_type);
if (bacmp(&peer->chan->dst, ba)) if (bacmp(&peer->chan->dst, ba))
continue; continue;
if (type == peer->chan->dst_type) if (type == peer->chan->dst_type) {
rcu_read_unlock();
return peer; return peer;
}
} }
rcu_read_unlock();
return NULL; return NULL;
} }
static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev, static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
struct l2cap_chan *chan) struct l2cap_chan *chan)
{ {
struct lowpan_peer *peer, *tmp; struct lowpan_peer *peer;
list_for_each_entry_safe(peer, tmp, &dev->peers, list) { list_for_each_entry_rcu(peer, &dev->peers, list) {
if (peer->chan == chan) if (peer->chan == chan)
return peer; return peer;
} }
...@@ -146,12 +161,12 @@ static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev, ...@@ -146,12 +161,12 @@ static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
return NULL; return NULL;
} }
static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev, static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
struct l2cap_conn *conn) struct l2cap_conn *conn)
{ {
struct lowpan_peer *peer, *tmp; struct lowpan_peer *peer;
list_for_each_entry_safe(peer, tmp, &dev->peers, list) { list_for_each_entry_rcu(peer, &dev->peers, list) {
if (peer->chan->conn == conn) if (peer->chan->conn == conn)
return peer; return peer;
} }
...@@ -163,7 +178,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, ...@@ -163,7 +178,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
struct in6_addr *daddr, struct in6_addr *daddr,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct lowpan_peer *peer, *tmp; struct lowpan_peer *peer;
struct in6_addr *nexthop; struct in6_addr *nexthop;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
int count = atomic_read(&dev->peer_count); int count = atomic_read(&dev->peer_count);
...@@ -174,9 +189,13 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, ...@@ -174,9 +189,13 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
* send the packet. If only one peer exists, then we can send the * send the packet. If only one peer exists, then we can send the
* packet right away. * packet right away.
*/ */
if (count == 1) if (count == 1) {
return list_first_entry(&dev->peers, struct lowpan_peer, rcu_read_lock();
list); peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer,
list);
rcu_read_unlock();
return peer;
}
if (!rt) { if (!rt) {
nexthop = &lowpan_cb(skb)->gw; nexthop = &lowpan_cb(skb)->gw;
...@@ -195,53 +214,57 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, ...@@ -195,53 +214,57 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
BT_DBG("gw %pI6c", nexthop); BT_DBG("gw %pI6c", nexthop);
list_for_each_entry_safe(peer, tmp, &dev->peers, list) { rcu_read_lock();
list_for_each_entry_rcu(peer, &dev->peers, list) {
BT_DBG("dst addr %pMR dst type %d ip %pI6c", BT_DBG("dst addr %pMR dst type %d ip %pI6c",
&peer->chan->dst, peer->chan->dst_type, &peer->chan->dst, peer->chan->dst_type,
&peer->peer_addr); &peer->peer_addr);
if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
rcu_read_unlock();
return peer; return peer;
}
} }
rcu_read_unlock();
return NULL; return NULL;
} }
static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
{ {
struct lowpan_dev *entry, *tmp; struct lowpan_dev *entry;
struct lowpan_peer *peer = NULL; struct lowpan_peer *peer = NULL;
unsigned long flags;
read_lock_irqsave(&devices_lock, flags); rcu_read_lock();
list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
peer = peer_lookup_conn(entry, conn); peer = __peer_lookup_conn(entry, conn);
if (peer) if (peer)
break; break;
} }
read_unlock_irqrestore(&devices_lock, flags); rcu_read_unlock();
return peer; return peer;
} }
static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
{ {
struct lowpan_dev *entry, *tmp; struct lowpan_dev *entry;
struct lowpan_dev *dev = NULL; struct lowpan_dev *dev = NULL;
unsigned long flags;
read_lock_irqsave(&devices_lock, flags); rcu_read_lock();
list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
if (conn->hcon->hdev == entry->hdev) { if (conn->hcon->hdev == entry->hdev) {
dev = entry; dev = entry;
break; break;
} }
} }
read_unlock_irqrestore(&devices_lock, flags); rcu_read_unlock();
return dev; return dev;
} }
...@@ -264,13 +287,12 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, ...@@ -264,13 +287,12 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
u8 iphc0, iphc1; u8 iphc0, iphc1;
struct lowpan_dev *dev; struct lowpan_dev *dev;
struct lowpan_peer *peer; struct lowpan_peer *peer;
unsigned long flags;
dev = lowpan_dev(netdev); dev = lowpan_dev(netdev);
read_lock_irqsave(&devices_lock, flags); rcu_read_lock();
peer = peer_lookup_chan(dev, chan); peer = __peer_lookup_chan(dev, chan);
read_unlock_irqrestore(&devices_lock, flags); rcu_read_unlock();
if (!peer) if (!peer)
goto drop; goto drop;
...@@ -452,7 +474,6 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, ...@@ -452,7 +474,6 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
if (ipv6_addr_is_multicast(&ipv6_daddr)) { if (ipv6_addr_is_multicast(&ipv6_daddr)) {
lowpan_cb(skb)->chan = NULL; lowpan_cb(skb)->chan = NULL;
} else { } else {
unsigned long flags;
u8 addr_type; u8 addr_type;
/* Get destination BT device from skb. /* Get destination BT device from skb.
...@@ -463,19 +484,14 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, ...@@ -463,19 +484,14 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
BT_DBG("dest addr %pMR type %d IP %pI6c", &addr, BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
addr_type, &ipv6_daddr); addr_type, &ipv6_daddr);
read_lock_irqsave(&devices_lock, flags);
peer = peer_lookup_ba(dev, &addr, addr_type); peer = peer_lookup_ba(dev, &addr, addr_type);
read_unlock_irqrestore(&devices_lock, flags);
if (!peer) { if (!peer) {
/* The packet might be sent to 6lowpan interface /* The packet might be sent to 6lowpan interface
* because of routing (either via default route * because of routing (either via default route
* or user set route) so get peer according to * or user set route) so get peer according to
* the destination address. * the destination address.
*/ */
read_lock_irqsave(&devices_lock, flags);
peer = peer_lookup_dst(dev, &ipv6_daddr, skb); peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
read_unlock_irqrestore(&devices_lock, flags);
if (!peer) { if (!peer) {
BT_DBG("no such peer %pMR found", &addr); BT_DBG("no such peer %pMR found", &addr);
return -ENOENT; return -ENOENT;
...@@ -558,14 +574,13 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, ...@@ -558,14 +574,13 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
{ {
struct sk_buff *local_skb; struct sk_buff *local_skb;
struct lowpan_dev *entry, *tmp; struct lowpan_dev *entry;
unsigned long flags;
int err = 0; int err = 0;
read_lock_irqsave(&devices_lock, flags); rcu_read_lock();
list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
struct lowpan_peer *pentry, *ptmp; struct lowpan_peer *pentry;
struct lowpan_dev *dev; struct lowpan_dev *dev;
if (entry->netdev != netdev) if (entry->netdev != netdev)
...@@ -573,7 +588,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) ...@@ -573,7 +588,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
dev = lowpan_dev(entry->netdev); dev = lowpan_dev(entry->netdev);
list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { list_for_each_entry_rcu(pentry, &dev->peers, list) {
int ret; int ret;
local_skb = skb_clone(skb, GFP_ATOMIC); local_skb = skb_clone(skb, GFP_ATOMIC);
...@@ -590,7 +605,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) ...@@ -590,7 +605,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
} }
} }
read_unlock_irqrestore(&devices_lock, flags); rcu_read_unlock();
return err; return err;
} }
...@@ -792,7 +807,6 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, ...@@ -792,7 +807,6 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
struct lowpan_dev *dev) struct lowpan_dev *dev)
{ {
struct lowpan_peer *peer; struct lowpan_peer *peer;
unsigned long flags;
peer = kzalloc(sizeof(*peer), GFP_ATOMIC); peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer) if (!peer)
...@@ -815,10 +829,10 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, ...@@ -815,10 +829,10 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
*/ */
set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8); set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);
write_lock_irqsave(&devices_lock, flags); spin_lock(&devices_lock);
INIT_LIST_HEAD(&peer->list); INIT_LIST_HEAD(&peer->list);
peer_add(dev, peer); peer_add(dev, peer);
write_unlock_irqrestore(&devices_lock, flags); spin_unlock(&devices_lock);
/* Notifying peers about us needs to be done without locks held */ /* Notifying peers about us needs to be done without locks held */
INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
...@@ -831,7 +845,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) ...@@ -831,7 +845,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
{ {
struct net_device *netdev; struct net_device *netdev;
int err = 0; int err = 0;
unsigned long flags;
netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE, netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
NET_NAME_UNKNOWN, netdev_setup); NET_NAME_UNKNOWN, netdev_setup);
...@@ -861,10 +874,10 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) ...@@ -861,10 +874,10 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
(*dev)->hdev = chan->conn->hcon->hdev; (*dev)->hdev = chan->conn->hcon->hdev;
INIT_LIST_HEAD(&(*dev)->peers); INIT_LIST_HEAD(&(*dev)->peers);
write_lock_irqsave(&devices_lock, flags); spin_lock(&devices_lock);
INIT_LIST_HEAD(&(*dev)->list); INIT_LIST_HEAD(&(*dev)->list);
list_add(&(*dev)->list, &bt_6lowpan_devices); list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
write_unlock_irqrestore(&devices_lock, flags); spin_unlock(&devices_lock);
return 0; return 0;
...@@ -918,11 +931,10 @@ static void delete_netdev(struct work_struct *work) ...@@ -918,11 +931,10 @@ static void delete_netdev(struct work_struct *work)
static void chan_close_cb(struct l2cap_chan *chan) static void chan_close_cb(struct l2cap_chan *chan)
{ {
struct lowpan_dev *entry, *tmp; struct lowpan_dev *entry;
struct lowpan_dev *dev = NULL; struct lowpan_dev *dev = NULL;
struct lowpan_peer *peer; struct lowpan_peer *peer;
int err = -ENOENT; int err = -ENOENT;
unsigned long flags;
bool last = false, removed = true; bool last = false, removed = true;
BT_DBG("chan %p conn %p", chan, chan->conn); BT_DBG("chan %p conn %p", chan, chan->conn);
...@@ -937,11 +949,11 @@ static void chan_close_cb(struct l2cap_chan *chan) ...@@ -937,11 +949,11 @@ static void chan_close_cb(struct l2cap_chan *chan)
removed = false; removed = false;
} }
write_lock_irqsave(&devices_lock, flags); spin_lock(&devices_lock);
list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
dev = lowpan_dev(entry->netdev); dev = lowpan_dev(entry->netdev);
peer = peer_lookup_chan(dev, chan); peer = __peer_lookup_chan(dev, chan);
if (peer) { if (peer) {
last = peer_del(dev, peer); last = peer_del(dev, peer);
err = 0; err = 0;
...@@ -952,13 +964,12 @@ static void chan_close_cb(struct l2cap_chan *chan) ...@@ -952,13 +964,12 @@ static void chan_close_cb(struct l2cap_chan *chan)
atomic_read(&chan->kref.refcount)); atomic_read(&chan->kref.refcount));
l2cap_chan_put(chan); l2cap_chan_put(chan);
kfree(peer);
break; break;
} }
} }
if (!err && last && dev && !atomic_read(&dev->peer_count)) { if (!err && last && dev && !atomic_read(&dev->peer_count)) {
write_unlock_irqrestore(&devices_lock, flags); spin_unlock(&devices_lock);
cancel_delayed_work_sync(&dev->notify_peers); cancel_delayed_work_sync(&dev->notify_peers);
...@@ -969,7 +980,7 @@ static void chan_close_cb(struct l2cap_chan *chan) ...@@ -969,7 +980,7 @@ static void chan_close_cb(struct l2cap_chan *chan)
schedule_work(&entry->delete_netdev); schedule_work(&entry->delete_netdev);
} }
} else { } else {
write_unlock_irqrestore(&devices_lock, flags); spin_unlock(&devices_lock);
} }
return; return;
...@@ -1161,10 +1172,9 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, ...@@ -1161,10 +1172,9 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
static void disconnect_all_peers(void) static void disconnect_all_peers(void)
{ {
struct lowpan_dev *entry, *tmp_dev; struct lowpan_dev *entry;
struct lowpan_peer *peer, *tmp_peer, *new_peer; struct lowpan_peer *peer, *tmp_peer, *new_peer;
struct list_head peers; struct list_head peers;
unsigned long flags;
INIT_LIST_HEAD(&peers); INIT_LIST_HEAD(&peers);
...@@ -1173,10 +1183,10 @@ static void disconnect_all_peers(void) ...@@ -1173,10 +1183,10 @@ static void disconnect_all_peers(void)
* with the same list at the same time. * with the same list at the same time.
*/ */
read_lock_irqsave(&devices_lock, flags); rcu_read_lock();
list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) { list_for_each_entry_rcu(peer, &entry->peers, list) {
new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
if (!new_peer) if (!new_peer)
break; break;
...@@ -1188,26 +1198,36 @@ static void disconnect_all_peers(void) ...@@ -1188,26 +1198,36 @@ static void disconnect_all_peers(void)
} }
} }
read_unlock_irqrestore(&devices_lock, flags); rcu_read_unlock();
spin_lock(&devices_lock);
list_for_each_entry_safe(peer, tmp_peer, &peers, list) { list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
l2cap_chan_close(peer->chan, ENOENT); l2cap_chan_close(peer->chan, ENOENT);
kfree(peer);
list_del_rcu(&peer->list);
call_rcu(&peer->rcu, peer_free);
module_put(THIS_MODULE);
} }
spin_unlock(&devices_lock);
} }
static int lowpan_psm_set(void *data, u64 val) struct set_psm {
{ struct work_struct work;
u16 psm; u16 psm;
};
psm = val; static void do_psm_set(struct work_struct *work)
if (psm == 0 || psm_6lowpan != psm) {
struct set_psm *set_psm = container_of(work, struct set_psm, work);
if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm)
/* Disconnect existing connections if 6lowpan is /* Disconnect existing connections if 6lowpan is
* disabled (psm = 0), or if psm changes. * disabled (psm = 0), or if psm changes.
*/ */
disconnect_all_peers(); disconnect_all_peers();
psm_6lowpan = psm; psm_6lowpan = set_psm->psm;
if (listen_chan) { if (listen_chan) {
l2cap_chan_close(listen_chan, 0); l2cap_chan_close(listen_chan, 0);
...@@ -1216,6 +1236,22 @@ static int lowpan_psm_set(void *data, u64 val) ...@@ -1216,6 +1236,22 @@ static int lowpan_psm_set(void *data, u64 val)
listen_chan = bt_6lowpan_listen(); listen_chan = bt_6lowpan_listen();
kfree(set_psm);
}
static int lowpan_psm_set(void *data, u64 val)
{
struct set_psm *set_psm;
set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL);
if (!set_psm)
return -ENOMEM;
set_psm->psm = val;
INIT_WORK(&set_psm->work, do_psm_set);
schedule_work(&set_psm->work);
return 0; return 0;
} }
...@@ -1297,19 +1333,18 @@ static ssize_t lowpan_control_write(struct file *fp, ...@@ -1297,19 +1333,18 @@ static ssize_t lowpan_control_write(struct file *fp,
static int lowpan_control_show(struct seq_file *f, void *ptr) static int lowpan_control_show(struct seq_file *f, void *ptr)
{ {
struct lowpan_dev *entry, *tmp_dev; struct lowpan_dev *entry;
struct lowpan_peer *peer, *tmp_peer; struct lowpan_peer *peer;
unsigned long flags;
read_lock_irqsave(&devices_lock, flags); spin_lock(&devices_lock);
list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) { list_for_each_entry(entry, &bt_6lowpan_devices, list) {
list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) list_for_each_entry(peer, &entry->peers, list)
seq_printf(f, "%pMR (type %u)\n", seq_printf(f, "%pMR (type %u)\n",
&peer->chan->dst, peer->chan->dst_type); &peer->chan->dst, peer->chan->dst_type);
} }
read_unlock_irqrestore(&devices_lock, flags); spin_unlock(&devices_lock);
return 0; return 0;
} }
...@@ -1329,9 +1364,8 @@ static const struct file_operations lowpan_control_fops = { ...@@ -1329,9 +1364,8 @@ static const struct file_operations lowpan_control_fops = {
static void disconnect_devices(void) static void disconnect_devices(void)
{ {
struct lowpan_dev *entry, *tmp, *new_dev; struct lowpan_dev *entry, *new_dev;
struct list_head devices; struct list_head devices;
unsigned long flags;
INIT_LIST_HEAD(&devices); INIT_LIST_HEAD(&devices);
...@@ -1340,9 +1374,9 @@ static void disconnect_devices(void) ...@@ -1340,9 +1374,9 @@ static void disconnect_devices(void)
* devices list. * devices list.
*/ */
read_lock_irqsave(&devices_lock, flags); rcu_read_lock();
list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC); new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
if (!new_dev) if (!new_dev)
break; break;
...@@ -1350,12 +1384,12 @@ static void disconnect_devices(void) ...@@ -1350,12 +1384,12 @@ static void disconnect_devices(void)
new_dev->netdev = entry->netdev; new_dev->netdev = entry->netdev;
INIT_LIST_HEAD(&new_dev->list); INIT_LIST_HEAD(&new_dev->list);
list_add(&new_dev->list, &devices); list_add_rcu(&new_dev->list, &devices);
} }
read_unlock_irqrestore(&devices_lock, flags); rcu_read_unlock();
list_for_each_entry_safe(entry, tmp, &devices, list) { list_for_each_entry(entry, &devices, list) {
ifdown(entry->netdev); ifdown(entry->netdev);
BT_DBG("Unregistering netdev %s %p", BT_DBG("Unregistering netdev %s %p",
entry->netdev->name, entry->netdev); entry->netdev->name, entry->netdev);
...@@ -1368,17 +1402,15 @@ static int device_event(struct notifier_block *unused, ...@@ -1368,17 +1402,15 @@ static int device_event(struct notifier_block *unused,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct lowpan_dev *entry, *tmp; struct lowpan_dev *entry;
unsigned long flags;
if (netdev->type != ARPHRD_6LOWPAN) if (netdev->type != ARPHRD_6LOWPAN)
return NOTIFY_DONE; return NOTIFY_DONE;
switch (event) { switch (event) {
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
write_lock_irqsave(&devices_lock, flags); spin_lock(&devices_lock);
list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list_for_each_entry(entry, &bt_6lowpan_devices, list) {
list) {
if (entry->netdev == netdev) { if (entry->netdev == netdev) {
BT_DBG("Unregistered netdev %s %p", BT_DBG("Unregistered netdev %s %p",
netdev->name, netdev); netdev->name, netdev);
...@@ -1387,7 +1419,7 @@ static int device_event(struct notifier_block *unused, ...@@ -1387,7 +1419,7 @@ static int device_event(struct notifier_block *unused,
break; break;
} }
} }
write_unlock_irqrestore(&devices_lock, flags); spin_unlock(&devices_lock);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment