Commit b375a049 authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman

[PATCH] USB: URB_ASYNC_UNLINK flag removed from the kernel

29 July 2005, Cambridge, MA:

This afternoon Alan Stern submitted a patch to remove the URB_ASYNC_UNLINK
flag from the Linux kernel.  Mr. Stern explained, "This flag is a relic
from an earlier, less-well-designed system.  For over a year it hasn't
been used for anything other than printing warning messages."

An anonymous spokesman for the Linux kernel development community
commented, "This is exactly the sort of thing we see happening all the
time.  As the kernel evolves, support for old techniques and old code can
be jettisoned and replaced by newer, better approaches.  Proprietary
operating systems do not have the freedom or flexibility to change so
quickly."

Mr. Stern, a staff member at Harvard University's Rowland Institute who
works on Linux only as a hobby, noted that the patch (labelled as548) did
not update two files, keyspan.c and option.c, in the USB drivers' "serial"
subdirectory.  "Those files need more extensive changes," he remarked.
"They examine the status field of several URBs at times when they're not
supposed to.  That will need to be fixed before the URB_ASYNC_UNLINK flag
is removed."

Greg Kroah-Hartman, the kernel maintainer responsible for overseeing all
of Linux's USB drivers, did not respond to our inquiries or return our
calls.  His only comment was "Applied, thanks."
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent a4e62832
...@@ -1010,7 +1010,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ...@@ -1010,7 +1010,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->last_pipe = sc->send_bulk_pipe; sc->last_pipe = sc->send_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.transfer_flags = 0;
/* Fill what we shouldn't be filling, because usb-storage did so. */ /* Fill what we shouldn't be filling, because usb-storage did so. */
sc->work_urb.actual_length = 0; sc->work_urb.actual_length = 0;
...@@ -1395,7 +1395,7 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ...@@ -1395,7 +1395,7 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
page_address(sg->page) + sg->offset, sg->length, page_address(sg->page) + sg->offset, sg->length,
ub_urb_complete, sc); ub_urb_complete, sc);
sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0; sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0; sc->work_urb.error_count = 0;
sc->work_urb.status = 0; sc->work_urb.status = 0;
...@@ -1442,7 +1442,7 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ...@@ -1442,7 +1442,7 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->last_pipe = sc->recv_bulk_pipe; sc->last_pipe = sc->recv_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
&sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0; sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0; sc->work_urb.error_count = 0;
sc->work_urb.status = 0; sc->work_urb.status = 0;
...@@ -1563,7 +1563,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, ...@@ -1563,7 +1563,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_urb_complete, sc); (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0; sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0; sc->work_urb.error_count = 0;
sc->work_urb.status = 0; sc->work_urb.status = 0;
......
...@@ -267,7 +267,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self) ...@@ -267,7 +267,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
frame, IRDA_USB_SPEED_MTU, frame, IRDA_USB_SPEED_MTU,
speed_bulk_callback, self); speed_bulk_callback, self);
urb->transfer_buffer_length = USB_IRDA_HEADER; urb->transfer_buffer_length = USB_IRDA_HEADER;
urb->transfer_flags = URB_ASYNC_UNLINK; urb->transfer_flags = 0;
/* Irq disabled -> GFP_ATOMIC */ /* Irq disabled -> GFP_ATOMIC */
if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) { if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
...@@ -401,15 +401,12 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -401,15 +401,12 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
skb->data, IRDA_SKB_MAX_MTU, skb->data, IRDA_SKB_MAX_MTU,
write_bulk_callback, skb); write_bulk_callback, skb);
urb->transfer_buffer_length = skb->len; urb->transfer_buffer_length = skb->len;
/* Note : unlink *must* be Asynchronous because of the code in
* irda_usb_net_timeout() -> call in irq - Jean II */
urb->transfer_flags = URB_ASYNC_UNLINK;
/* This flag (URB_ZERO_PACKET) indicates that what we send is not /* This flag (URB_ZERO_PACKET) indicates that what we send is not
* a continuous stream of data but separate packets. * a continuous stream of data but separate packets.
* In this case, the USB layer will insert an empty USB frame (TD) * In this case, the USB layer will insert an empty USB frame (TD)
* after each of our packets that is exact multiple of the frame size. * after each of our packets that is exact multiple of the frame size.
* This is how the dongle will detect the end of packet - Jean II */ * This is how the dongle will detect the end of packet - Jean II */
urb->transfer_flags |= URB_ZERO_PACKET; urb->transfer_flags = URB_ZERO_PACKET;
/* Generate min turn time. FIXME: can we do better than this? */ /* Generate min turn time. FIXME: can we do better than this? */
/* Trying to a turnaround time at this level is trying to measure /* Trying to a turnaround time at this level is trying to measure
...@@ -630,8 +627,6 @@ static void irda_usb_net_timeout(struct net_device *netdev) ...@@ -630,8 +627,6 @@ static void irda_usb_net_timeout(struct net_device *netdev)
* in completion handler, because urb->status will * in completion handler, because urb->status will
* be -ENOENT. We will fix that at the next watchdog, * be -ENOENT. We will fix that at the next watchdog,
* leaving more time to USB to recover... * leaving more time to USB to recover...
* Also, we are in interrupt, so we need to have
* URB_ASYNC_UNLINK to work properly...
* Jean II */ * Jean II */
done = 1; done = 1;
break; break;
...@@ -1008,9 +1003,7 @@ static int irda_usb_net_close(struct net_device *netdev) ...@@ -1008,9 +1003,7 @@ static int irda_usb_net_close(struct net_device *netdev)
} }
} }
/* Cancel Tx and speed URB - need to be synchronous to avoid races */ /* Cancel Tx and speed URB - need to be synchronous to avoid races */
self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
usb_kill_urb(self->tx_urb); usb_kill_urb(self->tx_urb);
self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
usb_kill_urb(self->speed_urb); usb_kill_urb(self->speed_urb);
/* Stop and remove instance of IrLAP */ /* Stop and remove instance of IrLAP */
...@@ -1521,9 +1514,7 @@ static void irda_usb_disconnect(struct usb_interface *intf) ...@@ -1521,9 +1514,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
usb_kill_urb(self->rx_urb[i]); usb_kill_urb(self->rx_urb[i]);
/* Cancel Tx and speed URB. /* Cancel Tx and speed URB.
* Toggle flags to make sure it's synchronous. */ * Toggle flags to make sure it's synchronous. */
self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
usb_kill_urb(self->tx_urb); usb_kill_urb(self->tx_urb);
self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
usb_kill_urb(self->speed_urb); usb_kill_urb(self->speed_urb);
} }
......
...@@ -715,13 +715,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, ...@@ -715,13 +715,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE, instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done, 1); cxacru_blocking_completion, &instance->rcv_done, 1);
instance->rcv_urb->transfer_flags |= URB_ASYNC_UNLINK;
usb_fill_int_urb(instance->snd_urb, usb_fill_int_urb(instance->snd_urb,
usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE, instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done, 4); cxacru_blocking_completion, &instance->snd_done, 4);
instance->snd_urb->transfer_flags |= URB_ASYNC_UNLINK;
init_MUTEX(&instance->cm_serialize); init_MUTEX(&instance->cm_serialize);
......
...@@ -48,7 +48,6 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length) ...@@ -48,7 +48,6 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
init_completion(&done); init_completion(&done);
urb->context = &done; urb->context = &done;
urb->transfer_flags |= URB_ASYNC_UNLINK;
urb->actual_length = 0; urb->actual_length = 0;
status = usb_submit_urb(urb, GFP_NOIO); status = usb_submit_urb(urb, GFP_NOIO);
...@@ -357,8 +356,7 @@ int usb_sg_init ( ...@@ -357,8 +356,7 @@ int usb_sg_init (
if (!io->urbs) if (!io->urbs)
goto nomem; goto nomem;
urb_flags = URB_ASYNC_UNLINK | URB_NO_TRANSFER_DMA_MAP urb_flags = URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT;
| URB_NO_INTERRUPT;
if (usb_pipein (pipe)) if (usb_pipein (pipe))
urb_flags |= URB_SHORT_NOT_OK; urb_flags |= URB_SHORT_NOT_OK;
......
...@@ -309,9 +309,8 @@ int usb_submit_urb(struct urb *urb, unsigned mem_flags) ...@@ -309,9 +309,8 @@ int usb_submit_urb(struct urb *urb, unsigned mem_flags)
unsigned int allowed; unsigned int allowed;
/* enforce simple/standard policy */ /* enforce simple/standard policy */
allowed = URB_ASYNC_UNLINK; // affects later unlinks allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
allowed |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); URB_NO_INTERRUPT);
allowed |= URB_NO_INTERRUPT;
switch (temp) { switch (temp) {
case PIPE_BULK: case PIPE_BULK:
if (is_out) if (is_out)
...@@ -400,14 +399,8 @@ int usb_submit_urb(struct urb *urb, unsigned mem_flags) ...@@ -400,14 +399,8 @@ int usb_submit_urb(struct urb *urb, unsigned mem_flags)
* canceled (rather than any other code) and will quickly be removed * canceled (rather than any other code) and will quickly be removed
* from host controller data structures. * from host controller data structures.
* *
* In the past, clearing the URB_ASYNC_UNLINK transfer flag for the * This request is always asynchronous.
* URB indicated that the request was synchronous. This usage is now * Success is indicated by returning -EINPROGRESS,
* deprecated; if the flag is clear the call will be forwarded to
* usb_kill_urb() and the return value will be 0. In the future, drivers
* should call usb_kill_urb() directly for synchronous unlinking.
*
* When the URB_ASYNC_UNLINK transfer flag for the URB is set, this
* request is asynchronous. Success is indicated by returning -EINPROGRESS,
* at which time the URB will normally have been unlinked but not yet * at which time the URB will normally have been unlinked but not yet
* given back to the device driver. When it is called, the completion * given back to the device driver. When it is called, the completion
* function will see urb->status == -ECONNRESET. Failure is indicated * function will see urb->status == -ECONNRESET. Failure is indicated
...@@ -453,17 +446,6 @@ int usb_unlink_urb(struct urb *urb) ...@@ -453,17 +446,6 @@ int usb_unlink_urb(struct urb *urb)
{ {
if (!urb) if (!urb)
return -EINVAL; return -EINVAL;
if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) {
#ifdef CONFIG_DEBUG_KERNEL
if (printk_ratelimit()) {
printk(KERN_NOTICE "usb_unlink_urb() is deprecated for "
"synchronous unlinks. Use usb_kill_urb() instead.\n");
WARN_ON(1);
}
#endif
usb_kill_urb(urb);
return 0;
}
if (!(urb->dev && urb->dev->bus && urb->dev->bus->op)) if (!(urb->dev && urb->dev->bus && urb->dev->bus->op))
return -ENODEV; return -ENODEV;
return urb->dev->bus->op->unlink_urb(urb, -ECONNRESET); return urb->dev->bus->op->unlink_urb(urb, -ECONNRESET);
......
...@@ -1688,7 +1688,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) ...@@ -1688,7 +1688,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
usb_fill_int_urb(hid->urbin, dev, pipe, hid->inbuf, 0, usb_fill_int_urb(hid->urbin, dev, pipe, hid->inbuf, 0,
hid_irq_in, hid, interval); hid_irq_in, hid, interval);
hid->urbin->transfer_dma = hid->inbuf_dma; hid->urbin->transfer_dma = hid->inbuf_dma;
hid->urbin->transfer_flags |=(URB_NO_TRANSFER_DMA_MAP | URB_ASYNC_UNLINK); hid->urbin->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
} else { } else {
if (hid->urbout) if (hid->urbout)
continue; continue;
...@@ -1698,7 +1698,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) ...@@ -1698,7 +1698,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
usb_fill_int_urb(hid->urbout, dev, pipe, hid->outbuf, 0, usb_fill_int_urb(hid->urbout, dev, pipe, hid->outbuf, 0,
hid_irq_out, hid, interval); hid_irq_out, hid, interval);
hid->urbout->transfer_dma = hid->outbuf_dma; hid->urbout->transfer_dma = hid->outbuf_dma;
hid->urbout->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_ASYNC_UNLINK); hid->urbout->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
} }
} }
...@@ -1750,7 +1750,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) ...@@ -1750,7 +1750,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
hid->ctrlbuf, 1, hid_ctrl, hid); hid->ctrlbuf, 1, hid_ctrl, hid);
hid->urbctrl->setup_dma = hid->cr_dma; hid->urbctrl->setup_dma = hid->cr_dma;
hid->urbctrl->transfer_dma = hid->ctrlbuf_dma; hid->urbctrl->transfer_dma = hid->ctrlbuf_dma;
hid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | URB_ASYNC_UNLINK); hid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
return hid; return hid;
......
...@@ -426,7 +426,7 @@ static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb) ...@@ -426,7 +426,7 @@ static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
/* cancel an urb which is submitted to the chain /* cancel an urb which is submitted to the chain
the result is 0 if the urb is cancelled, or -EINPROGRESS if the result is 0 if the urb is cancelled, or -EINPROGRESS if
URB_ASYNC_UNLINK is set and the function is successfully started. the function is successfully started.
*/ */
static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb) static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
{ {
...@@ -515,7 +515,6 @@ static void auerchain_unlink_all (pauerchain_t acp) ...@@ -515,7 +515,6 @@ static void auerchain_unlink_all (pauerchain_t acp)
acep = acp->active; acep = acp->active;
if (acep) { if (acep) {
urbp = acep->urbp; urbp = acep->urbp;
urbp->transfer_flags &= ~URB_ASYNC_UNLINK;
dbg ("unlink active urb"); dbg ("unlink active urb");
usb_kill_urb (urbp); usb_kill_urb (urbp);
} }
......
...@@ -229,7 +229,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, ...@@ -229,7 +229,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len, usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len,
sisusb_bulk_completeout, &sisusb->urbout_context[index]); sisusb_bulk_completeout, &sisusb->urbout_context[index]);
urb->transfer_flags |= (tflags | URB_ASYNC_UNLINK); urb->transfer_flags |= tflags;
urb->actual_length = 0; urb->actual_length = 0;
if ((urb->transfer_dma = transfer_dma)) if ((urb->transfer_dma = transfer_dma))
...@@ -295,7 +295,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data, ...@@ -295,7 +295,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len, usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len,
sisusb_bulk_completein, sisusb); sisusb_bulk_completein, sisusb);
urb->transfer_flags |= (tflags | URB_ASYNC_UNLINK); urb->transfer_flags |= tflags;
urb->actual_length = 0; urb->actual_length = 0;
if ((urb->transfer_dma = transfer_dma)) if ((urb->transfer_dma = transfer_dma))
......
...@@ -986,7 +986,6 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param) ...@@ -986,7 +986,6 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
u->context = &context; u->context = &context;
u->complete = ctrl_complete; u->complete = ctrl_complete;
u->transfer_flags |= URB_ASYNC_UNLINK;
} }
/* queue the urbs */ /* queue the urbs */
...@@ -1052,7 +1051,6 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async) ...@@ -1052,7 +1051,6 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
urb = simple_alloc_urb (testdev_to_usbdev (dev), pipe, size); urb = simple_alloc_urb (testdev_to_usbdev (dev), pipe, size);
if (!urb) if (!urb)
return -ENOMEM; return -ENOMEM;
urb->transfer_flags |= URB_ASYNC_UNLINK;
urb->context = &completion; urb->context = &completion;
urb->complete = unlink1_callback; urb->complete = unlink1_callback;
......
...@@ -383,7 +383,6 @@ static void catc_tx_done(struct urb *urb, struct pt_regs *regs) ...@@ -383,7 +383,6 @@ static void catc_tx_done(struct urb *urb, struct pt_regs *regs)
if (urb->status == -ECONNRESET) { if (urb->status == -ECONNRESET) {
dbg("Tx Reset."); dbg("Tx Reset.");
urb->transfer_flags &= ~URB_ASYNC_UNLINK;
urb->status = 0; urb->status = 0;
catc->netdev->trans_start = jiffies; catc->netdev->trans_start = jiffies;
catc->stats.tx_errors++; catc->stats.tx_errors++;
...@@ -445,7 +444,6 @@ static void catc_tx_timeout(struct net_device *netdev) ...@@ -445,7 +444,6 @@ static void catc_tx_timeout(struct net_device *netdev)
struct catc *catc = netdev_priv(netdev); struct catc *catc = netdev_priv(netdev);
warn("Transmit timed out."); warn("Transmit timed out.");
catc->tx_urb->transfer_flags |= URB_ASYNC_UNLINK;
usb_unlink_urb(catc->tx_urb); usb_unlink_urb(catc->tx_urb);
} }
......
...@@ -787,7 +787,6 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -787,7 +787,6 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
kaweth_usb_transmit_complete, kaweth_usb_transmit_complete,
kaweth); kaweth);
kaweth->end = 0; kaweth->end = 0;
kaweth->tx_urb->transfer_flags |= URB_ASYNC_UNLINK;
if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC))) if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC)))
{ {
......
...@@ -825,7 +825,6 @@ static void pegasus_tx_timeout(struct net_device *net) ...@@ -825,7 +825,6 @@ static void pegasus_tx_timeout(struct net_device *net)
pegasus_t *pegasus = netdev_priv(net); pegasus_t *pegasus = netdev_priv(net);
if (netif_msg_timer(pegasus)) if (netif_msg_timer(pegasus))
printk(KERN_WARNING "%s: tx timeout\n", net->name); printk(KERN_WARNING "%s: tx timeout\n", net->name);
pegasus->tx_urb->transfer_flags |= URB_ASYNC_UNLINK;
usb_unlink_urb(pegasus->tx_urb); usb_unlink_urb(pegasus->tx_urb);
pegasus->stats.tx_errors++; pegasus->stats.tx_errors++;
} }
......
...@@ -653,7 +653,6 @@ static void rtl8150_tx_timeout(struct net_device *netdev) ...@@ -653,7 +653,6 @@ static void rtl8150_tx_timeout(struct net_device *netdev)
{ {
rtl8150_t *dev = netdev_priv(netdev); rtl8150_t *dev = netdev_priv(netdev);
warn("%s: Tx timeout.", netdev->name); warn("%s: Tx timeout.", netdev->name);
dev->tx_urb->transfer_flags |= URB_ASYNC_UNLINK;
usb_unlink_urb(dev->tx_urb); usb_unlink_urb(dev->tx_urb);
dev->stats.tx_errors++; dev->stats.tx_errors++;
} }
......
...@@ -2987,7 +2987,6 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags) ...@@ -2987,7 +2987,6 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags)
usb_fill_bulk_urb (urb, dev->udev, dev->in, usb_fill_bulk_urb (urb, dev->udev, dev->in,
skb->data, size, rx_complete, skb); skb->data, size, rx_complete, skb);
urb->transfer_flags |= URB_ASYNC_UNLINK;
spin_lock_irqsave (&dev->rxq.lock, lockflags); spin_lock_irqsave (&dev->rxq.lock, lockflags);
...@@ -3561,7 +3560,6 @@ static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) ...@@ -3561,7 +3560,6 @@ static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
usb_fill_bulk_urb (urb, dev->udev, dev->out, usb_fill_bulk_urb (urb, dev->udev, dev->out,
skb->data, skb->len, tx_complete, skb); skb->data, skb->len, tx_complete, skb);
urb->transfer_flags |= URB_ASYNC_UNLINK;
/* don't assume the hardware handles USB_ZERO_PACKET /* don't assume the hardware handles USB_ZERO_PACKET
* NOTE: strictly conforming cdc-ether devices should expect * NOTE: strictly conforming cdc-ether devices should expect
......
...@@ -847,7 +847,6 @@ static void zd1201_tx_timeout(struct net_device *dev) ...@@ -847,7 +847,6 @@ static void zd1201_tx_timeout(struct net_device *dev)
return; return;
dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n", dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n",
dev->name); dev->name);
zd->tx_urb->transfer_flags |= URB_ASYNC_UNLINK;
usb_unlink_urb(zd->tx_urb); usb_unlink_urb(zd->tx_urb);
zd->stats.tx_errors++; zd->stats.tx_errors++;
/* Restart the timeout to quiet the watchdog: */ /* Restart the timeout to quiet the watchdog: */
......
...@@ -96,8 +96,8 @@ ...@@ -96,8 +96,8 @@
* or before the URB_ACTIVE bit was set. If so, it's essential to cancel * or before the URB_ACTIVE bit was set. If so, it's essential to cancel
* the URB if it hasn't been cancelled already (i.e., if the URB_ACTIVE bit * the URB if it hasn't been cancelled already (i.e., if the URB_ACTIVE bit
* is still set). Either way, the function must then wait for the URB to * is still set). Either way, the function must then wait for the URB to
* finish. Note that because the URB_ASYNC_UNLINK flag is set, the URB can * finish. Note that the URB can still be in progress even after a call to
* still be in progress even after a call to usb_unlink_urb() returns. * usb_unlink_urb() returns.
* *
* The idea is that (1) once the ABORTING or DISCONNECTING bit is set, * The idea is that (1) once the ABORTING or DISCONNECTING bit is set,
* either the stop_transport() function or the submitting function * either the stop_transport() function or the submitting function
...@@ -158,8 +158,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout) ...@@ -158,8 +158,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
* hasn't been mapped for DMA. Yes, this is clunky, but it's * hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the * easier than always having the caller tell us whether the
* transfer buffer has already been mapped. */ * transfer buffer has already been mapped. */
us->current_urb->transfer_flags = us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
URB_ASYNC_UNLINK | URB_NO_SETUP_DMA_MAP;
if (us->current_urb->transfer_buffer == us->iobuf) if (us->current_urb->transfer_buffer == us->iobuf)
us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
us->current_urb->transfer_dma = us->iobuf_dma; us->current_urb->transfer_dma = us->iobuf_dma;
......
...@@ -616,7 +616,6 @@ extern int usb_disabled(void); ...@@ -616,7 +616,6 @@ extern int usb_disabled(void);
#define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */ #define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */
#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
#define URB_NO_SETUP_DMA_MAP 0x0008 /* urb->setup_dma valid on submit */ #define URB_NO_SETUP_DMA_MAP 0x0008 /* urb->setup_dma valid on submit */
#define URB_ASYNC_UNLINK 0x0010 /* usb_unlink_urb() returns asap */
#define URB_NO_FSBR 0x0020 /* UHCI-specific */ #define URB_NO_FSBR 0x0020 /* UHCI-specific */
#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */ #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */
#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt needed */ #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt needed */
...@@ -724,13 +723,7 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *); ...@@ -724,13 +723,7 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *);
* Initialization: * Initialization:
* *
* All URBs submitted must initialize the dev, pipe, transfer_flags (may be * All URBs submitted must initialize the dev, pipe, transfer_flags (may be
* zero), and complete fields. * zero), and complete fields. All URBs must also initialize
* The URB_ASYNC_UNLINK transfer flag affects later invocations of
* the usb_unlink_urb() routine. Note: Failure to set URB_ASYNC_UNLINK
* with usb_unlink_urb() is deprecated. For synchronous unlinks use
* usb_kill_urb() instead.
*
* All URBs must also initialize
* transfer_buffer and transfer_buffer_length. They may provide the * transfer_buffer and transfer_buffer_length. They may provide the
* URB_SHORT_NOT_OK transfer flag, indicating that short reads are * URB_SHORT_NOT_OK transfer flag, indicating that short reads are
* to be treated as errors; that flag is invalid for write requests. * to be treated as errors; that flag is invalid for write requests.
......
...@@ -735,10 +735,9 @@ static int deactivate_urbs(snd_usb_substream_t *subs, int force, int can_sleep) ...@@ -735,10 +735,9 @@ static int deactivate_urbs(snd_usb_substream_t *subs, int force, int can_sleep)
if (test_bit(i, &subs->active_mask)) { if (test_bit(i, &subs->active_mask)) {
if (! test_and_set_bit(i, &subs->unlink_mask)) { if (! test_and_set_bit(i, &subs->unlink_mask)) {
struct urb *u = subs->dataurb[i].urb; struct urb *u = subs->dataurb[i].urb;
if (async) { if (async)
u->transfer_flags |= URB_ASYNC_UNLINK;
usb_unlink_urb(u); usb_unlink_urb(u);
} else else
usb_kill_urb(u); usb_kill_urb(u);
} }
} }
...@@ -748,10 +747,9 @@ static int deactivate_urbs(snd_usb_substream_t *subs, int force, int can_sleep) ...@@ -748,10 +747,9 @@ static int deactivate_urbs(snd_usb_substream_t *subs, int force, int can_sleep)
if (test_bit(i+16, &subs->active_mask)) { if (test_bit(i+16, &subs->active_mask)) {
if (! test_and_set_bit(i+16, &subs->unlink_mask)) { if (! test_and_set_bit(i+16, &subs->unlink_mask)) {
struct urb *u = subs->syncurb[i].urb; struct urb *u = subs->syncurb[i].urb;
if (async) { if (async)
u->transfer_flags |= URB_ASYNC_UNLINK;
usb_unlink_urb(u); usb_unlink_urb(u);
} else else
usb_kill_urb(u); usb_kill_urb(u);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment