2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

xhci: Remove duplicate xhci urb giveback functions

We want one function that gives back urb in interrupt that can
be used in both normal cases and when killing off urbs.

Add small helpers that check if td is last_td_in_urb() and
that increase the td count of a urb inc_td_cnt()

Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Mathias Nyman 2016-11-11 15:13:24 +02:00 committed by Greg Kroah-Hartman
parent 0c03d89d0c
commit 2a72126de1

View File

@ -115,6 +115,20 @@ static bool link_trb_toggles_cycle(union xhci_trb *trb)
return le32_to_cpu(trb->link.control) & LINK_TOGGLE; return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
} }
static bool last_td_in_urb(struct xhci_td *td)
{
struct urb_priv *urb_priv = td->urb->hcpriv;
return urb_priv->td_cnt == urb_priv->length;
}
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next /* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not * TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers. * effect the ring dequeue or enqueue pointers.
@ -555,64 +569,28 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
ep->stop_cmds_pending--; ep->stop_cmds_pending--;
} }
/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status)
{
struct usb_hcd *hcd;
struct urb *urb;
struct urb_priv *urb_priv;
urb = cur_td->urb;
urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
hcd = bus_to_hcd(urb->dev->bus);
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(urb_priv);
spin_lock(&xhci->lock);
}
}
/* /*
* giveback urb, must be called with xhci->lock held. * Must be called with xhci->lock held in interrupt context,
* releases and re-aquires xhci->lock * releases and re-acquires xhci->lock
*/ */
static void xhci_giveback_urb_locked(struct xhci_hcd *xhci, struct xhci_td *td, static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
int status) struct xhci_td *cur_td, int status)
{ {
struct urb *urb = td->urb; struct urb *urb = cur_td->urb;
struct urb_priv *urb_priv = urb->hcpriv; struct urb_priv *urb_priv = urb->hcpriv;
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
xhci_urb_free_priv(urb_priv); xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
if ((urb->actual_length != urb->transfer_buffer_length &&
(urb->transfer_flags & URB_SHORT_NOT_OK)) ||
(status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
urb, urb->actual_length,
urb->transfer_buffer_length, status);
spin_unlock(&xhci->lock); spin_unlock(&xhci->lock);
/* EHCI, UHCI, and OHCI always unconditionally set the usb_hcd_giveback_urb(hcd, urb, status);
* urb->status of an isochronous endpoint to 0.
*/
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
status = 0;
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock); spin_lock(&xhci->lock);
} }
@ -763,7 +741,9 @@ remove_finished_td:
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (ep_ring && cur_td->bounce_seg) if (ep_ring && cur_td->bounce_seg)
xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
xhci_giveback_urb_in_irq(xhci, cur_td, 0); inc_td_cnt(cur_td->urb);
if (last_td_in_urb(cur_td))
xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is /* Stop processing the cancelled list if the watchdog timer is
* running. * running.
@ -788,7 +768,10 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (cur_td->bounce_seg) if (cur_td->bounce_seg)
xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
inc_td_cnt(cur_td->urb);
if (last_td_in_urb(cur_td))
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
} }
} }
@ -825,7 +808,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
cur_td = list_first_entry(&ep->cancelled_td_list, cur_td = list_first_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list); struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list); list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
inc_td_cnt(cur_td->urb);
if (last_td_in_urb(cur_td))
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
} }
} }
@ -1899,30 +1885,31 @@ td_cleanup:
* unsigned). Play it safe and say we didn't transfer anything. * unsigned). Play it safe and say we didn't transfer anything.
*/ */
if (urb->actual_length > urb->transfer_buffer_length) { if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
urb->transfer_buffer_length, urb->transfer_buffer_length, urb->actual_length);
urb->actual_length);
urb->actual_length = 0; urb->actual_length = 0;
*status = 0; *status = 0;
} }
list_del_init(&td->td_list); list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */ /* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list)) if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list); list_del_init(&td->cancelled_td_list);
urb_priv->td_cnt++; inc_td_cnt(urb);
/* Giveback the urb when all the tds are completed */ /* Giveback the urb when all the tds are completed */
if (urb_priv->td_cnt == urb_priv->length) { if (last_td_in_urb(td)) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { if ((urb->actual_length != urb->transfer_buffer_length &&
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
if (xhci->quirks & XHCI_AMD_PLL_FIX) xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
usb_amd_quirk_pll_enable(); urb, urb->actual_length,
} urb->transfer_buffer_length, *status);
}
xhci_giveback_urb_locked(xhci, td, *status);
}
/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
*status = 0;
xhci_giveback_urb_in_irq(xhci, td, *status);
}
return 0; return 0;
} }