Commit a073fdef authored by Ivo van Doorn's avatar Ivo van Doorn Committed by John W. Linville

rt2x00: Optimize TX_STA_FIFO register reading

Add recycling functionality to rt2x00usb_register_read_async.
When the callback function returns true, resubmit the urb to
read the register again.

This optimizes the rt2800usb driver when multiple TX status reports
are pending in the register, because now we don't need to allocate
the rt2x00_async_read_data and urb structure each time.
Signed-off-by: default avatarIvo van Doorn <IvDoorn@gmail.com>
Acked-by: default avatarGertjan van Wingerde <gwingerde@gmail.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 16763478
...@@ -114,12 +114,12 @@ static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev) ...@@ -114,12 +114,12 @@ static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
return false; return false;
} }
static void rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
int urb_status, u32 tx_status) int urb_status, u32 tx_status)
{ {
if (urb_status) { if (urb_status) {
WARNING(rt2x00dev, "rt2x00usb_register_read_async failed: %d\n", urb_status); WARNING(rt2x00dev, "rt2x00usb_register_read_async failed: %d\n", urb_status);
return; return false;
} }
/* try to read all TX_STA_FIFO entries before scheduling txdone_work */ /* try to read all TX_STA_FIFO entries before scheduling txdone_work */
...@@ -129,13 +129,14 @@ static void rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, ...@@ -129,13 +129,14 @@ static void rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
"drop tx status report.\n"); "drop tx status report.\n");
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
} else } else
rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO, return true;
rt2800usb_tx_sta_fifo_read_completed);
} else if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) { } else if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) {
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
} else if (rt2800usb_txstatus_pending(rt2x00dev)) { } else if (rt2800usb_txstatus_pending(rt2x00dev)) {
mod_timer(&rt2x00dev->txstatus_timer, jiffies + msecs_to_jiffies(2)); mod_timer(&rt2x00dev->txstatus_timer, jiffies + msecs_to_jiffies(2));
} }
return false;
} }
static void rt2800usb_tx_dma_done(struct queue_entry *entry) static void rt2800usb_tx_dma_done(struct queue_entry *entry)
......
...@@ -170,19 +170,22 @@ struct rt2x00_async_read_data { ...@@ -170,19 +170,22 @@ struct rt2x00_async_read_data {
__le32 reg; __le32 reg;
struct usb_ctrlrequest cr; struct usb_ctrlrequest cr;
struct rt2x00_dev *rt2x00dev; struct rt2x00_dev *rt2x00dev;
void (*callback)(struct rt2x00_dev *,int,u32); bool (*callback)(struct rt2x00_dev *, int, u32);
}; };
static void rt2x00usb_register_read_async_cb(struct urb *urb) static void rt2x00usb_register_read_async_cb(struct urb *urb)
{ {
struct rt2x00_async_read_data *rd = urb->context; struct rt2x00_async_read_data *rd = urb->context;
rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg)); if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
kfree(urb->context); if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
kfree(rd);
} else
kfree(rd);
} }
void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
const unsigned int offset, const unsigned int offset,
void (*callback)(struct rt2x00_dev*,int,u32)) bool (*callback)(struct rt2x00_dev*, int, u32))
{ {
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct urb *urb; struct urb *urb;
......
...@@ -349,10 +349,12 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, ...@@ -349,10 +349,12 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
* be called from atomic context. The callback will be called * be called from atomic context. The callback will be called
* when the URB completes. Otherwise the function is similar * when the URB completes. Otherwise the function is similar
* to rt2x00usb_register_read(). * to rt2x00usb_register_read().
* When the callback function returns false, the memory will be cleaned up,
* when it returns true, the urb will be fired again.
*/ */
void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
const unsigned int offset, const unsigned int offset,
void (*callback)(struct rt2x00_dev*,int,u32)); bool (*callback)(struct rt2x00_dev*, int, u32));
/* /*
* Radio handlers * Radio handlers
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment