Commit 2aa824e5 authored by Jean Tourrilhes's avatar Jean Tourrilhes Committed by Stephen Hemminger

[IRDA]: Fix a potential dealock in sir-dev state machine.

Also, make sir-dev locking compatible with irport.
From Martin Diehl.
parent 03c2aba9
...@@ -62,24 +62,25 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) ...@@ -62,24 +62,25 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
{ {
unsigned long flags;
int ret; int ret;
if (unlikely(len > dev->tx_buff.truesize)) if (unlikely(len > dev->tx_buff.truesize))
return -ENOSPC; return -ENOSPC;
spin_lock_bh(&dev->tx_lock); /* serialize with other tx operations */ spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
while (dev->tx_buff.len > 0) { /* wait until tx idle */ while (dev->tx_buff.len > 0) { /* wait until tx idle */
spin_unlock_bh(&dev->tx_lock); spin_unlock_irqrestore(&dev->tx_lock, flags);
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(MSECS_TO_JIFFIES(10)); schedule_timeout(MSECS_TO_JIFFIES(10));
spin_lock_bh(&dev->tx_lock); spin_lock_irqsave(&dev->tx_lock, flags);
} }
dev->tx_buff.data = dev->tx_buff.head; dev->tx_buff.data = dev->tx_buff.head;
memcpy(dev->tx_buff.data, buf, len); memcpy(dev->tx_buff.data, buf, len);
ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
spin_unlock_bh(&dev->tx_lock); spin_unlock_irqrestore(&dev->tx_lock, flags);
return ret; return ret;
} }
...@@ -114,11 +115,12 @@ int sirdev_raw_read(struct sir_dev *dev, char *buf, int len) ...@@ -114,11 +115,12 @@ int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
void sirdev_write_complete(struct sir_dev *dev) void sirdev_write_complete(struct sir_dev *dev)
{ {
unsigned long flags;
struct sk_buff *skb; struct sk_buff *skb;
int actual = 0; int actual = 0;
int err; int err;
spin_lock_bh(&dev->tx_lock); spin_lock_irqsave(&dev->tx_lock, flags);
IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
__FUNCTION__, dev->tx_buff.len); __FUNCTION__, dev->tx_buff.len);
...@@ -143,7 +145,7 @@ void sirdev_write_complete(struct sir_dev *dev) ...@@ -143,7 +145,7 @@ void sirdev_write_complete(struct sir_dev *dev)
dev->tx_buff.len = 0; dev->tx_buff.len = 0;
} }
if (dev->tx_buff.len > 0) { if (dev->tx_buff.len > 0) {
spin_unlock_bh(&dev->tx_lock); spin_unlock_irqrestore(&dev->tx_lock, flags);
return; return;
} }
} }
...@@ -190,7 +192,7 @@ void sirdev_write_complete(struct sir_dev *dev) ...@@ -190,7 +192,7 @@ void sirdev_write_complete(struct sir_dev *dev)
netif_wake_queue(dev->netdev); netif_wake_queue(dev->netdev);
} }
spin_unlock_bh(&dev->tx_lock); spin_unlock_irqrestore(&dev->tx_lock, flags);
} }
/* called from client driver - likely with bh-context - to give us /* called from client driver - likely with bh-context - to give us
...@@ -258,6 +260,7 @@ static struct net_device_stats *sirdev_get_stats(struct net_device *ndev) ...@@ -258,6 +260,7 @@ static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct sir_dev *dev = ndev->priv; struct sir_dev *dev = ndev->priv;
unsigned long flags;
int actual = 0; int actual = 0;
int err; int err;
s32 speed; s32 speed;
...@@ -307,7 +310,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -307,7 +310,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
} }
/* serialize with write completion */ /* serialize with write completion */
spin_lock_bh(&dev->tx_lock); spin_lock_irqsave(&dev->tx_lock, flags);
/* Copy skb to tx_buff while wrapping, stuffing and making CRC */ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
...@@ -337,7 +340,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -337,7 +340,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
netif_wake_queue(ndev); netif_wake_queue(ndev);
} }
spin_unlock_bh(&dev->tx_lock); spin_unlock_irqrestore(&dev->tx_lock, flags);
return 0; return 0;
} }
......
...@@ -436,14 +436,13 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par ...@@ -436,14 +436,13 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
if (in_interrupt()) { if (down_trylock(&fsm->sem)) {
if (down_trylock(&fsm->sem)) { if (in_interrupt() || in_atomic() || irqs_disabled()) {
IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
return -EWOULDBLOCK; return -EWOULDBLOCK;
} } else
down(&fsm->sem);
} }
else
down(&fsm->sem);
if (fsm->state == SIRDEV_STATE_DEAD) { if (fsm->state == SIRDEV_STATE_DEAD) {
/* race with sirdev_close should never happen */ /* race with sirdev_close should never happen */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment