xhci: Move allocating of command for new_dequeue_state to queue_set_tr_deq()

There are multiple reasons for this:

1) This fixes a missing check for xhci_alloc_command failing in
   xhci_handle_cmd_stop_ep()
2) This adds a warning when we cannot set the new dequeue state because of
   xhci_alloc_command failing
3) It puts the allocation of the command after the sanity checks in
   queue_set_tr_deq(), avoiding leaking the command if those fail
4) Since queue_set_tr_deq now owns the command it can free it if queue_command
   fails
5) It reduces code duplication

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Hans de Goede 2014-08-20 16:41:52 +03:00 committed by Greg Kroah-Hartman
parent fac1f48584
commit 1e3452e3f0
3 changed files with 23 additions and 20 deletions

View File

@ -572,14 +572,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
static int queue_set_tr_deq(struct xhci_hcd *xhci,
struct xhci_command *cmd, int slot_id,
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
@ -594,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
@ -743,12 +741,8 @@ remove_finished_td:
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
struct xhci_command *command;
command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
xhci_queue_new_dequeue_state(xhci, command,
slot_id, ep_index,
ep->stopped_td->urb->stream_id,
&deq_state);
xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
ep->stopped_td->urb->stream_id, &deq_state);
xhci_ring_cmd_db(xhci);
} else {
/* Otherwise ring the doorbell(s) to restart queued transfers */
@ -3929,8 +3923,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* Set Transfer Ring Dequeue Pointer command.
* This should not be used for endpoints that have streams enabled.
*/
static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id,
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state)
@ -3942,6 +3935,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
struct xhci_command *cmd;
int ret;
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) {
@ -3956,14 +3951,28 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
return 0;
}
/* This function gets called from contexts where it cannot sleep */
cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
if (!cmd) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
return 0;
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
if (stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
return queue_command(xhci, cmd,
ret = queue_command(xhci, cmd,
lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
if (ret < 0) {
xhci_free_command(xhci, cmd);
return ret;
}
return 0;
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,

View File

@ -2887,14 +2887,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
struct xhci_command *command;
/* Can't sleep if we're called from cleanup_halted_endpoint() */
command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
if (!command)
return;
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Queueing new dequeue state");
xhci_queue_new_dequeue_state(xhci, command, udev->slot_id,
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep->stopped_stream, &deq_state);
} else {
/* Better hope no one uses the input context between now and the

View File

@ -1839,7 +1839,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int stream_id, struct xhci_td *cur_td,
struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state);