Commit 8dfec614 authored by Andiry Xu's avatar Andiry Xu Committed by Sarah Sharp

xHCI: dynamic ring expansion

If room_on_ring() check fails, try to expand the ring and check again.

When expand a ring, use a cached ring or allocate new segments, link
the original ring and the new ring or segments, update the original ring's
segment numbers and the last segment pointer.
Signed-off-by: default avatarAndiry Xu <andiry.xu@amd.com>
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Tested-by: default avatarPaul Zimmerman <Paul.Zimmerman@synopsys.com>
parent 186a7ef1
......@@ -119,6 +119,34 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
}
}
/*
* Link the ring to the new segments.
* Set Toggle Cycle for the new ring if needed.
*/
static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *first, struct xhci_segment *last,
unsigned int num_segs)
{
struct xhci_segment *next;
if (!ring || !first || !last)
return;
next = ring->enq_seg->next;
xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
xhci_link_segments(xhci, last, next, ring->type);
ring->num_segs += num_segs;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
&= ~cpu_to_le32(LINK_TOGGLE);
last->trbs[TRBS_PER_SEGMENT-1].link.control
|= cpu_to_le32(LINK_TOGGLE);
ring->last_seg = last;
}
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
......@@ -287,6 +315,39 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
INIT_LIST_HEAD(&ring->td_list);
}
/*
* Expand an existing ring.
* Look for a cached ring or allocate a new ring which has same segment numbers
* and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags)
{
struct xhci_segment *first;
struct xhci_segment *last;
unsigned int num_segs;
unsigned int num_segs_needed;
int ret;
num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
(TRBS_PER_SEGMENT - 1);
/* Allocate number of segments we needed, or double the ring size */
num_segs = ring->num_segs > num_segs_needed ?
ring->num_segs : num_segs_needed;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
num_segs, ring->cycle_state, ring->type, flags);
if (ret)
return -ENOMEM;
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
ring->num_segs);
return 0;
}
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
......@@ -1361,18 +1422,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
/*
* Isochronous endpoint ring needs bigger size because one isoc URB
* carries multiple packets and it will insert multiple tds to the
* ring.
* This should be replaced with dynamic ring resizing in the future.
*/
if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 8, 1, type, mem_flags);
else
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, 1, type, mem_flags);
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
......
......@@ -2490,6 +2490,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int num_trbs_needed;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
case EP_STATE_DISABLED:
......@@ -2517,11 +2519,32 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/
return -EINVAL;
}
if (!room_on_ring(xhci, ep_ring, num_trbs)) {
/* FIXME allocate more room */
xhci_err(xhci, "ERROR no room on ep ring\n");
return -ENOMEM;
}
while (1) {
if (room_on_ring(xhci, ep_ring, num_trbs))
break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
return -ENOMEM;
}
if (ep_ring->enq_seg == ep_ring->deq_seg &&
ep_ring->dequeue > ep_ring->enqueue) {
xhci_err(xhci, "Can not expand the ring while dequeue "
"pointer has not passed the link TRB\n");
return -ENOMEM;
}
xhci_dbg(xhci, "ERROR no room on ep ring, "
"try ring expansion\n");
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
mem_flags)) {
xhci_err(xhci, "Ring expansion failed\n");
return -ENOMEM;
}
};
if (enqueue_is_link_trb(ep_ring)) {
struct xhci_ring *ring = ep_ring;
......
......@@ -1622,6 +1622,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment