2017-11-03 18:28:30 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-11-18 06:14:51 +08:00
|
|
|
/*
|
2012-05-11 22:25:54 +08:00
|
|
|
* udc.c - ChipIdea UDC driver
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Author: David Lopo
|
|
|
|
*/
|
|
|
|
|
2009-04-16 04:28:36 +08:00
|
|
|
#include <linux/delay.h>
|
2008-11-18 06:14:51 +08:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/dmapool.h>
|
2012-06-26 20:10:32 +08:00
|
|
|
#include <linux/err.h>
|
2013-03-30 08:46:18 +08:00
|
|
|
#include <linux/irqreturn.h>
|
2008-11-18 06:14:51 +08:00
|
|
|
#include <linux/kernel.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2010-12-07 20:24:04 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
2018-09-04 23:18:55 +08:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2008-11-18 06:14:51 +08:00
|
|
|
#include <linux/usb/ch9.h>
|
|
|
|
#include <linux/usb/gadget.h>
|
2014-04-23 15:56:47 +08:00
|
|
|
#include <linux/usb/otg-fsm.h>
|
2012-05-11 22:25:46 +08:00
|
|
|
#include <linux/usb/chipidea.h>
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-05-11 22:25:46 +08:00
|
|
|
#include "ci.h"
|
|
|
|
#include "udc.h"
|
|
|
|
#include "bits.h"
|
2013-08-14 17:44:07 +08:00
|
|
|
#include "otg.h"
|
2014-04-23 15:56:50 +08:00
|
|
|
#include "otg_fsm.h"
|
2011-10-11 00:38:06 +08:00
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/* control endpoint description */
|
|
|
|
static const struct usb_endpoint_descriptor
|
2011-01-11 11:49:22 +08:00
|
|
|
ctrl_endpt_out_desc = {
|
2008-11-18 06:14:51 +08:00
|
|
|
.bLength = USB_DT_ENDPOINT_SIZE,
|
|
|
|
.bDescriptorType = USB_DT_ENDPOINT,
|
|
|
|
|
2011-01-11 11:49:22 +08:00
|
|
|
.bEndpointAddress = USB_DIR_OUT,
|
|
|
|
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
|
|
|
|
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct usb_endpoint_descriptor
|
|
|
|
ctrl_endpt_in_desc = {
|
|
|
|
.bLength = USB_DT_ENDPOINT_SIZE,
|
|
|
|
.bDescriptorType = USB_DT_ENDPOINT,
|
|
|
|
|
|
|
|
.bEndpointAddress = USB_DIR_IN,
|
2008-11-18 06:14:51 +08:00
|
|
|
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
|
|
|
|
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_bit: calculates the bit number
|
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
*
|
|
|
|
* This function returns bit number
|
|
|
|
*/
|
|
|
|
static inline int hw_ep_bit(int num, int dir)
|
|
|
|
{
|
2016-08-12 01:19:13 +08:00
|
|
|
return num + ((dir == TX) ? 16 : 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static inline int ep_to_bit(struct ci_hdrc *ci, int n)
|
2011-10-11 00:38:10 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
int fill = 16 - ci->hw_ep_max / 2;
|
2011-10-11 00:38:10 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
if (n >= ci->hw_ep_max / 2)
|
2011-10-11 00:38:10 +08:00
|
|
|
n += fill;
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
2012-09-12 19:58:01 +08:00
|
|
|
* hw_device_state: enables/disables interrupts (execute without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @dma: 0 => disable, !0 => enable and set dma engine
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_device_state(struct ci_hdrc *ci, u32 dma)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
if (dma) {
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
|
2008-11-18 06:14:51 +08:00
|
|
|
/* interrupt, error, port change, reset, sleep/suspend */
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_USBINTR, ~0,
|
2008-11-18 06:14:51 +08:00
|
|
|
USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
|
|
|
|
} else {
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_USBINTR, ~0, 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_flush: flush endpoint fifo (execute without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
int n = hw_ep_bit(num, dir);
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* flush any pending transfer */
|
2014-02-19 13:46:31 +08:00
|
|
|
hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
|
2012-07-07 22:56:40 +08:00
|
|
|
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
|
2008-11-18 06:14:51 +08:00
|
|
|
cpu_relax();
|
2012-07-07 22:56:40 +08:00
|
|
|
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_disable: disables endpoint (execute without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_ENDPTCTRL + num,
|
2016-08-12 01:19:13 +08:00
|
|
|
(dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_enable: enables endpoint (execute without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
* @type: endpoint type
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
u32 mask, data;
|
|
|
|
|
2016-08-12 01:19:13 +08:00
|
|
|
if (dir == TX) {
|
2008-11-18 06:14:51 +08:00
|
|
|
mask = ENDPTCTRL_TXT; /* type */
|
2013-03-30 18:53:55 +08:00
|
|
|
data = type << __ffs(mask);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
mask |= ENDPTCTRL_TXS; /* unstall */
|
|
|
|
mask |= ENDPTCTRL_TXR; /* reset data toggle */
|
|
|
|
data |= ENDPTCTRL_TXR;
|
|
|
|
mask |= ENDPTCTRL_TXE; /* enable */
|
|
|
|
data |= ENDPTCTRL_TXE;
|
|
|
|
} else {
|
|
|
|
mask = ENDPTCTRL_RXT; /* type */
|
2013-03-30 18:53:55 +08:00
|
|
|
data = type << __ffs(mask);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
mask |= ENDPTCTRL_RXS; /* unstall */
|
|
|
|
mask |= ENDPTCTRL_RXR; /* reset data toggle */
|
|
|
|
data |= ENDPTCTRL_RXR;
|
|
|
|
mask |= ENDPTCTRL_RXE; /* enable */
|
|
|
|
data |= ENDPTCTRL_RXE;
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_ENDPTCTRL + num, mask, data);
|
2008-11-18 06:14:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_get_halt: return endpoint halt status
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
*
|
|
|
|
* This function returns 1 if endpoint halted
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2016-08-12 01:19:13 +08:00
|
|
|
u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_prime: primes endpoint (execute without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
* @is_ctrl: true if control endpoint
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
int n = hw_ep_bit(num, dir);
|
|
|
|
|
2016-07-09 22:16:38 +08:00
|
|
|
/* Synchronize before ep prime */
|
|
|
|
wmb();
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
|
2014-02-19 13:46:31 +08:00
|
|
|
hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
|
2008-11-18 06:14:51 +08:00
|
|
|
cpu_relax();
|
2012-07-07 22:56:40 +08:00
|
|
|
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
/* status shoult be tested according with manual but it doesn't work */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
|
|
|
|
* without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @num: endpoint number
|
|
|
|
* @dir: endpoint direction
|
|
|
|
* @value: true => stall, false => unstall
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
if (value != 0 && value != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
do {
|
2013-06-24 19:46:36 +08:00
|
|
|
enum ci_hw_regs reg = OP_ENDPTCTRL + num;
|
2016-08-12 01:19:13 +08:00
|
|
|
u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
|
|
|
|
u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* data toggle - reserved for EP0 but it's in ESS */
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, reg, mask_xs|mask_xr,
|
2012-05-09 04:28:59 +08:00
|
|
|
value ? mask_xs : mask_xr);
|
2012-07-07 22:56:40 +08:00
|
|
|
} while (value != hw_ep_get_halt(ci, num, dir));
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_is_port_high_speed: test if port is high speed
|
|
|
|
*
|
|
|
|
* This function returns true if high speed port
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_port_is_high_speed(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
|
|
|
|
hw_read(ci, OP_PORTSC, PORTSC_HSP);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_test_and_clear_complete: test & clear complete status (execute without
|
|
|
|
* interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2011-10-11 00:38:10 +08:00
|
|
|
* @n: endpoint number
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns complete status
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
n = ep_to_bit(ci, n);
|
|
|
|
return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_test_and_clear_intr_active: test & clear active interrupts (execute
|
|
|
|
* without interruption)
|
|
|
|
*
|
|
|
|
* This function returns active interrutps
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_USBSTS, ~0, reg);
|
2008-11-18 06:14:51 +08:00
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_test_and_clear_setup_guard: test & clear setup guard (execute without
|
|
|
|
* interruption)
|
|
|
|
*
|
|
|
|
* This function returns guard value
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_test_and_set_setup_guard: test & set setup guard (execute without
|
|
|
|
* interruption)
|
|
|
|
*
|
|
|
|
* This function returns guard value
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_usb_set_address: configures USB address (execute without interruption)
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2008-11-18 06:14:51 +08:00
|
|
|
* @value: new USB address
|
|
|
|
*
|
usb: gadget: ci13xxx: don't use "advance" feature when setting address
Newer versions of the chipidea controller support the "advance" setting
of usb address, which means instead of setting it immediately, deferring
it till the status completion. Unfortunately, older versions of the
controller don't have this feature, so in order to support those too, we
simply don't use it. It's about 4 lines of extra code, and isn't in any
way critical to performance. While at it, change the return value of the
hw_usb_set_address() to void, since it can't fail in any measurable way.
With this patch, ci13xxx_udc driver works with the chipidea controller in
kirkwood (feroceon SoC), as found in, for example, sheevaplug.
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-11 22:25:43 +08:00
|
|
|
* This function explicitly sets the address, without the "USBADRA" (advance)
|
|
|
|
* feature, which is not supported by older versions of the controller.
|
2008-11-18 06:14:51 +08:00
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
|
2013-03-30 18:53:55 +08:00
|
|
|
value << __ffs(DEVICEADDR_USBADR));
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hw_usb_reset: restart device after a bus reset (execute without
|
|
|
|
* interruption)
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int hw_usb_reset(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_usb_set_address(ci, 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* ESS flushes only at end?!? */
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* clear setup token semaphores */
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_ENDPTSETUPSTAT, 0, 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* clear complete status */
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_ENDPTCOMPLETE, 0, 0);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* wait until all bits cleared */
|
2012-07-07 22:56:40 +08:00
|
|
|
while (hw_read(ci, OP_ENDPTPRIME, ~0))
|
2008-11-18 06:14:51 +08:00
|
|
|
udelay(10); /* not RTOS friendly */
|
|
|
|
|
|
|
|
/* reset all endpoints ? */
|
|
|
|
|
|
|
|
/* reset internal status and wait for further instructions
|
|
|
|
no need to verify the port reset status (ESS does it) */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* UTIL block
|
|
|
|
*****************************************************************************/
|
2013-06-13 22:59:53 +08:00
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
|
usb: chipidea: udc: add software sg list support
The chipidea controller doesn't support short transfer for sg list,
so we still keep setting IOC per TD, otherwise, there will be no interrupt
for short transfer. Each TD has five entries for data buffer, each data
buffer could be non-countinuous 4KB buffer, so it could handle
up to 5 sg buffers one time. The benefit of this patch is avoiding
OOM for low memory system(eg, 256MB) during large USB transfers, see
below for detail. The non-sg handling has not changed.
ufb: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 2 PID: 370 Comm: ufb Not tainted 5.4.3-1.1.0+g54b3750d61fd #1
Hardware name: NXP i.MX8MNano DDR4 EVK board (DT)
Call trace:
dump_backtrace+0x0/0x140
show_stack+0x14/0x20
dump_stack+0xb4/0xf8
warn_alloc+0xec/0x158
__alloc_pages_slowpath+0x9cc/0x9f8
__alloc_pages_nodemask+0x21c/0x280
alloc_pages_current+0x7c/0xe8
kmalloc_order+0x1c/0x88
__kmalloc+0x25c/0x298
ffs_epfile_io.isra.0+0x20c/0x7d0
ffs_epfile_read_iter+0xa8/0x188
new_sync_read+0xe4/0x170
__vfs_read+0x2c/0x40
vfs_read+0xc8/0x1a0
ksys_read+0x68/0xf0
__arm64_sys_read+0x18/0x20
el0_svc_common.constprop.0+0x68/0x160
el0_svc_handler+0x20/0x80
el0_svc+0x8/0xc
Mem-Info:
active_anon:2856 inactive_anon:5269 isolated_anon:12
active_file:5238 inactive_file:18803 isolated_file:0
unevictable:0 dirty:22 writeback:416 unstable:0
slab_reclaimable:4073 slab_unreclaimable:3408
mapped:727 shmem:7393 pagetables:37 bounce:0
free:4104 free_pcp:118 free_cma:0
Node 0 active_anon:11436kB inactive_anon:21076kB active_file:20988kB inactive_file:75216kB unevictable:0kB isolated(ano
Node 0 DMA32 free:16820kB min:1808kB low:2260kB high:2712kB active_anon:11436kB inactive_anon:21076kB active_file:2098B
lowmem_reserve[]: 0 0 0
Node 0 DMA32: 508*4kB (UME) 242*8kB (UME) 730*16kB (UM) 21*32kB (UME) 5*64kB (UME) 2*128kB (M) 0*256kB 0*512kB 0*1024kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=32768kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=64kB
31455 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
65536 pages RAM
0 pages HighMem/MovableOnly
10766 pages reserved
0 pages cma reserved
0 pages hwpoisoned
Reviewed-by: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
2020-02-21 21:40:57 +08:00
|
|
|
unsigned int length, struct scatterlist *s)
|
2013-06-13 22:59:53 +08:00
|
|
|
{
|
2013-06-13 22:59:54 +08:00
|
|
|
int i;
|
|
|
|
u32 temp;
|
2013-06-13 22:59:53 +08:00
|
|
|
struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
|
|
|
|
if (node == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-09-08 20:34:31 +08:00
|
|
|
node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
|
2013-06-13 22:59:53 +08:00
|
|
|
if (node->ptr == NULL) {
|
|
|
|
kfree(node);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-06-13 22:59:54 +08:00
|
|
|
node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
|
|
|
|
node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
|
|
|
|
node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
|
usb: chipidea: udc: using MultO at TD as real mult value for ISO-TX
We have met a bug that the high bandwidth ISO-TX transfer has failed
at the last packet if it is less than 1024, the TD status shows it
is "Transaction Error".
The root cause of this problem is: the mult value at qh is not correct
for current TD's transfer length. We use TD list to queue un-transfer
TDs, and change mult for new adding TDs. If new adding TDs transfer length
less than 1024, but the queued un-transfer TDs transfer length is larger
than 1024, the transfer error will occur, and vice versa.
Usually, this problem occurs at the last packet, and the first packet for
new frame.
We fixed this problem by setting Mult at QH as the largest value (3), and
set MultO (Multiplier Override) at TD according to every transfer length.
It can cover both hardware version less than 2.3 (the real mult is MultO
if it is not 0) and 2.3+ (the real mult is min(qh.mult, td.multo)).
Since the MultO bits are only existed at TX TD, we keep the ISO-RX behavior
unchanged.
For stable tree: 3.11+.
Cc: stable <stable@vger.kernel.org>
Cc: Michael Grzeschik <m.grzeschik@pengutronix.de>
Reported-by: Matthieu Vanin <b47495@freescale.com>
Tested-by: Matthieu Vanin <b47495@freescale.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-01-10 13:51:32 +08:00
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
|
|
|
|
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
|
|
|
|
|
|
|
|
if (hwreq->req.length == 0
|
|
|
|
|| hwreq->req.length % hwep->ep.maxpacket)
|
|
|
|
mul++;
|
2016-09-14 13:53:02 +08:00
|
|
|
node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
|
usb: chipidea: udc: using MultO at TD as real mult value for ISO-TX
We have met a bug that the high bandwidth ISO-TX transfer has failed
at the last packet if it is less than 1024, the TD status shows it
is "Transaction Error".
The root cause of this problem is: the mult value at qh is not correct
for current TD's transfer length. We use TD list to queue un-transfer
TDs, and change mult for new adding TDs. If new adding TDs transfer length
less than 1024, but the queued un-transfer TDs transfer length is larger
than 1024, the transfer error will occur, and vice versa.
Usually, this problem occurs at the last packet, and the first packet for
new frame.
We fixed this problem by setting Mult at QH as the largest value (3), and
set MultO (Multiplier Override) at TD according to every transfer length.
It can cover both hardware version less than 2.3 (the real mult is MultO
if it is not 0) and 2.3+ (the real mult is min(qh.mult, td.multo)).
Since the MultO bits are only existed at TX TD, we keep the ISO-RX behavior
unchanged.
For stable tree: 3.11+.
Cc: stable <stable@vger.kernel.org>
Cc: Michael Grzeschik <m.grzeschik@pengutronix.de>
Reported-by: Matthieu Vanin <b47495@freescale.com>
Tested-by: Matthieu Vanin <b47495@freescale.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-01-10 13:51:32 +08:00
|
|
|
}
|
2013-06-13 22:59:54 +08:00
|
|
|
|
usb: chipidea: udc: add software sg list support
The chipidea controller doesn't support short transfer for sg list,
so we still keep setting IOC per TD, otherwise, there will be no interrupt
for short transfer. Each TD has five entries for data buffer, each data
buffer could be non-countinuous 4KB buffer, so it could handle
up to 5 sg buffers one time. The benefit of this patch is avoiding
OOM for low memory system(eg, 256MB) during large USB transfers, see
below for detail. The non-sg handling has not changed.
ufb: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 2 PID: 370 Comm: ufb Not tainted 5.4.3-1.1.0+g54b3750d61fd #1
Hardware name: NXP i.MX8MNano DDR4 EVK board (DT)
Call trace:
dump_backtrace+0x0/0x140
show_stack+0x14/0x20
dump_stack+0xb4/0xf8
warn_alloc+0xec/0x158
__alloc_pages_slowpath+0x9cc/0x9f8
__alloc_pages_nodemask+0x21c/0x280
alloc_pages_current+0x7c/0xe8
kmalloc_order+0x1c/0x88
__kmalloc+0x25c/0x298
ffs_epfile_io.isra.0+0x20c/0x7d0
ffs_epfile_read_iter+0xa8/0x188
new_sync_read+0xe4/0x170
__vfs_read+0x2c/0x40
vfs_read+0xc8/0x1a0
ksys_read+0x68/0xf0
__arm64_sys_read+0x18/0x20
el0_svc_common.constprop.0+0x68/0x160
el0_svc_handler+0x20/0x80
el0_svc+0x8/0xc
Mem-Info:
active_anon:2856 inactive_anon:5269 isolated_anon:12
active_file:5238 inactive_file:18803 isolated_file:0
unevictable:0 dirty:22 writeback:416 unstable:0
slab_reclaimable:4073 slab_unreclaimable:3408
mapped:727 shmem:7393 pagetables:37 bounce:0
free:4104 free_pcp:118 free_cma:0
Node 0 active_anon:11436kB inactive_anon:21076kB active_file:20988kB inactive_file:75216kB unevictable:0kB isolated(ano
Node 0 DMA32 free:16820kB min:1808kB low:2260kB high:2712kB active_anon:11436kB inactive_anon:21076kB active_file:2098B
lowmem_reserve[]: 0 0 0
Node 0 DMA32: 508*4kB (UME) 242*8kB (UME) 730*16kB (UM) 21*32kB (UME) 5*64kB (UME) 2*128kB (M) 0*256kB 0*512kB 0*1024kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=32768kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=64kB
31455 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
65536 pages RAM
0 pages HighMem/MovableOnly
10766 pages reserved
0 pages cma reserved
0 pages hwpoisoned
Reviewed-by: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
2020-02-21 21:40:57 +08:00
|
|
|
if (s) {
|
|
|
|
temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
|
|
|
|
node->td_remaining_size = CI_MAX_BUF_SIZE - length;
|
|
|
|
} else {
|
|
|
|
temp = (u32) (hwreq->req.dma + hwreq->req.actual);
|
|
|
|
}
|
|
|
|
|
2013-06-13 22:59:54 +08:00
|
|
|
if (length) {
|
|
|
|
node->ptr->page[0] = cpu_to_le32(temp);
|
|
|
|
for (i = 1; i < TD_PAGE_COUNT; i++) {
|
2013-06-24 19:46:36 +08:00
|
|
|
u32 page = temp + i * CI_HDRC_PAGE_SIZE;
|
2013-06-13 22:59:54 +08:00
|
|
|
page &= ~TD_RESERVED_MASK;
|
|
|
|
node->ptr->page[i] = cpu_to_le32(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.actual += length;
|
2013-06-13 22:59:53 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (!list_empty(&hwreq->tds)) {
|
2013-06-13 22:59:53 +08:00
|
|
|
/* get the last entry */
|
2013-06-13 23:00:03 +08:00
|
|
|
lastnode = list_entry(hwreq->tds.prev,
|
2013-06-13 22:59:53 +08:00
|
|
|
struct td_node, td);
|
|
|
|
lastnode->ptr->next = cpu_to_le32(node->dma);
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&node->td);
|
2013-06-13 23:00:03 +08:00
|
|
|
list_add_tail(&node->td, &hwreq->tds);
|
2013-06-13 22:59:53 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
|
|
|
* _usb_addr: calculates endpoint address from direction & number
|
|
|
|
* @ep: endpoint
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static inline u8 _usb_addr(struct ci_hw_ep *ep)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
|
|
|
|
}
|
|
|
|
|
usb: chipidea: udc: add software sg list support
The chipidea controller doesn't support short transfer for sg list,
so we still keep setting IOC per TD, otherwise, there will be no interrupt
for short transfer. Each TD has five entries for data buffer, each data
buffer could be non-countinuous 4KB buffer, so it could handle
up to 5 sg buffers one time. The benefit of this patch is avoiding
OOM for low memory system(eg, 256MB) during large USB transfers, see
below for detail. The non-sg handling has not changed.
ufb: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 2 PID: 370 Comm: ufb Not tainted 5.4.3-1.1.0+g54b3750d61fd #1
Hardware name: NXP i.MX8MNano DDR4 EVK board (DT)
Call trace:
dump_backtrace+0x0/0x140
show_stack+0x14/0x20
dump_stack+0xb4/0xf8
warn_alloc+0xec/0x158
__alloc_pages_slowpath+0x9cc/0x9f8
__alloc_pages_nodemask+0x21c/0x280
alloc_pages_current+0x7c/0xe8
kmalloc_order+0x1c/0x88
__kmalloc+0x25c/0x298
ffs_epfile_io.isra.0+0x20c/0x7d0
ffs_epfile_read_iter+0xa8/0x188
new_sync_read+0xe4/0x170
__vfs_read+0x2c/0x40
vfs_read+0xc8/0x1a0
ksys_read+0x68/0xf0
__arm64_sys_read+0x18/0x20
el0_svc_common.constprop.0+0x68/0x160
el0_svc_handler+0x20/0x80
el0_svc+0x8/0xc
Mem-Info:
active_anon:2856 inactive_anon:5269 isolated_anon:12
active_file:5238 inactive_file:18803 isolated_file:0
unevictable:0 dirty:22 writeback:416 unstable:0
slab_reclaimable:4073 slab_unreclaimable:3408
mapped:727 shmem:7393 pagetables:37 bounce:0
free:4104 free_pcp:118 free_cma:0
Node 0 active_anon:11436kB inactive_anon:21076kB active_file:20988kB inactive_file:75216kB unevictable:0kB isolated(ano
Node 0 DMA32 free:16820kB min:1808kB low:2260kB high:2712kB active_anon:11436kB inactive_anon:21076kB active_file:2098B
lowmem_reserve[]: 0 0 0
Node 0 DMA32: 508*4kB (UME) 242*8kB (UME) 730*16kB (UM) 21*32kB (UME) 5*64kB (UME) 2*128kB (M) 0*256kB 0*512kB 0*1024kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=32768kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=64kB
31455 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
65536 pages RAM
0 pages HighMem/MovableOnly
10766 pages reserved
0 pages cma reserved
0 pages hwpoisoned
Reviewed-by: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
2020-02-21 21:40:57 +08:00
|
|
|
static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
|
|
|
|
struct ci_hw_req *hwreq)
|
|
|
|
{
|
|
|
|
unsigned int rest = hwreq->req.length;
|
|
|
|
int pages = TD_PAGE_COUNT;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (rest == 0) {
|
|
|
|
ret = add_td_to_list(hwep, hwreq, 0, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The first buffer could be not page aligned.
|
|
|
|
* In that case we have to span into one extra td.
|
|
|
|
*/
|
|
|
|
if (hwreq->req.dma % PAGE_SIZE)
|
|
|
|
pages--;
|
|
|
|
|
|
|
|
while (rest > 0) {
|
|
|
|
unsigned int count = min(hwreq->req.length - hwreq->req.actual,
|
|
|
|
(unsigned int)(pages * CI_HDRC_PAGE_SIZE));
|
|
|
|
|
|
|
|
ret = add_td_to_list(hwep, hwreq, count, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
rest -= count;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
|
|
|
|
&& (hwreq->req.length % hwep->ep.maxpacket == 0)) {
|
|
|
|
ret = add_td_to_list(hwep, hwreq, 0, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
|
|
|
|
struct scatterlist *s)
|
|
|
|
{
|
|
|
|
unsigned int rest = sg_dma_len(s);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
hwreq->req.actual = 0;
|
|
|
|
while (rest > 0) {
|
|
|
|
unsigned int count = min_t(unsigned int, rest,
|
|
|
|
CI_MAX_BUF_SIZE);
|
|
|
|
|
|
|
|
ret = add_td_to_list(hwep, hwreq, count, s);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
rest -= count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
|
|
|
|
{
|
|
|
|
int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
|
|
|
|
/ CI_HDRC_PAGE_SIZE;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
node->ptr->token +=
|
|
|
|
cpu_to_le32(sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
|
|
|
|
|
|
|
|
for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
|
|
|
|
u32 page = (u32) sg_dma_address(s) +
|
|
|
|
(i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
|
|
|
|
|
|
|
|
page &= ~TD_RESERVED_MASK;
|
|
|
|
node->ptr->page[i] = cpu_to_le32(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
|
|
|
|
{
|
|
|
|
struct usb_request *req = &hwreq->req;
|
|
|
|
struct scatterlist *s = req->sg;
|
|
|
|
int ret = 0, i = 0;
|
|
|
|
struct td_node *node = NULL;
|
|
|
|
|
|
|
|
if (!s || req->zero || req->length == 0) {
|
|
|
|
dev_err(hwep->ci->dev, "not supported operation for sg\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (i++ < req->num_mapped_sgs) {
|
|
|
|
if (sg_dma_address(s) % PAGE_SIZE) {
|
|
|
|
dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (node && (node->td_remaining_size >= sg_dma_len(s))) {
|
|
|
|
ci_add_buffer_entry(node, s);
|
|
|
|
node->td_remaining_size -= sg_dma_len(s);
|
|
|
|
} else {
|
|
|
|
ret = prepare_td_per_sg(hwep, hwreq, s);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
node = list_entry(hwreq->tds.prev,
|
|
|
|
struct td_node, td);
|
|
|
|
}
|
|
|
|
|
|
|
|
s = sg_next(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
2015-09-19 01:30:19 +08:00
|
|
|
* _hardware_enqueue: configures a request at hardware level
|
2013-06-13 23:00:03 +08:00
|
|
|
* @hwep: endpoint
|
2015-09-19 01:30:19 +08:00
|
|
|
* @hwreq: request
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = hwep->ci;
|
2011-02-18 20:13:16 +08:00
|
|
|
int ret = 0;
|
2013-06-13 22:59:53 +08:00
|
|
|
struct td_node *firstnode, *lastnode;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* don't queue twice */
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwreq->req.status == -EALREADY)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EALREADY;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.status = -EALREADY;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2017-03-13 10:18:42 +08:00
|
|
|
ret = usb_gadget_map_request_by_dev(ci->dev->parent,
|
|
|
|
&hwreq->req, hwep->dir);
|
2012-05-11 22:25:56 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
usb: chipidea: udc: add software sg list support
The chipidea controller doesn't support short transfer for sg list,
so we still keep setting IOC per TD, otherwise, there will be no interrupt
for short transfer. Each TD has five entries for data buffer, each data
buffer could be non-countinuous 4KB buffer, so it could handle
up to 5 sg buffers one time. The benefit of this patch is avoiding
OOM for low memory system(eg, 256MB) during large USB transfers, see
below for detail. The non-sg handling has not changed.
ufb: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 2 PID: 370 Comm: ufb Not tainted 5.4.3-1.1.0+g54b3750d61fd #1
Hardware name: NXP i.MX8MNano DDR4 EVK board (DT)
Call trace:
dump_backtrace+0x0/0x140
show_stack+0x14/0x20
dump_stack+0xb4/0xf8
warn_alloc+0xec/0x158
__alloc_pages_slowpath+0x9cc/0x9f8
__alloc_pages_nodemask+0x21c/0x280
alloc_pages_current+0x7c/0xe8
kmalloc_order+0x1c/0x88
__kmalloc+0x25c/0x298
ffs_epfile_io.isra.0+0x20c/0x7d0
ffs_epfile_read_iter+0xa8/0x188
new_sync_read+0xe4/0x170
__vfs_read+0x2c/0x40
vfs_read+0xc8/0x1a0
ksys_read+0x68/0xf0
__arm64_sys_read+0x18/0x20
el0_svc_common.constprop.0+0x68/0x160
el0_svc_handler+0x20/0x80
el0_svc+0x8/0xc
Mem-Info:
active_anon:2856 inactive_anon:5269 isolated_anon:12
active_file:5238 inactive_file:18803 isolated_file:0
unevictable:0 dirty:22 writeback:416 unstable:0
slab_reclaimable:4073 slab_unreclaimable:3408
mapped:727 shmem:7393 pagetables:37 bounce:0
free:4104 free_pcp:118 free_cma:0
Node 0 active_anon:11436kB inactive_anon:21076kB active_file:20988kB inactive_file:75216kB unevictable:0kB isolated(ano
Node 0 DMA32 free:16820kB min:1808kB low:2260kB high:2712kB active_anon:11436kB inactive_anon:21076kB active_file:2098B
lowmem_reserve[]: 0 0 0
Node 0 DMA32: 508*4kB (UME) 242*8kB (UME) 730*16kB (UM) 21*32kB (UME) 5*64kB (UME) 2*128kB (M) 0*256kB 0*512kB 0*1024kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=32768kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=64kB
31455 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
65536 pages RAM
0 pages HighMem/MovableOnly
10766 pages reserved
0 pages cma reserved
0 pages hwpoisoned
Reviewed-by: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
2020-02-21 21:40:57 +08:00
|
|
|
if (hwreq->req.num_mapped_sgs)
|
|
|
|
ret = prepare_td_for_sg(hwep, hwreq);
|
|
|
|
else
|
|
|
|
ret = prepare_td_for_non_sg(hwep, hwreq);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
usb: chipidea: udc: add software sg list support
The chipidea controller doesn't support short transfer for sg list,
so we still keep setting IOC per TD, otherwise, there will be no interrupt
for short transfer. Each TD has five entries for data buffer, each data
buffer could be non-countinuous 4KB buffer, so it could handle
up to 5 sg buffers one time. The benefit of this patch is avoiding
OOM for low memory system(eg, 256MB) during large USB transfers, see
below for detail. The non-sg handling has not changed.
ufb: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 2 PID: 370 Comm: ufb Not tainted 5.4.3-1.1.0+g54b3750d61fd #1
Hardware name: NXP i.MX8MNano DDR4 EVK board (DT)
Call trace:
dump_backtrace+0x0/0x140
show_stack+0x14/0x20
dump_stack+0xb4/0xf8
warn_alloc+0xec/0x158
__alloc_pages_slowpath+0x9cc/0x9f8
__alloc_pages_nodemask+0x21c/0x280
alloc_pages_current+0x7c/0xe8
kmalloc_order+0x1c/0x88
__kmalloc+0x25c/0x298
ffs_epfile_io.isra.0+0x20c/0x7d0
ffs_epfile_read_iter+0xa8/0x188
new_sync_read+0xe4/0x170
__vfs_read+0x2c/0x40
vfs_read+0xc8/0x1a0
ksys_read+0x68/0xf0
__arm64_sys_read+0x18/0x20
el0_svc_common.constprop.0+0x68/0x160
el0_svc_handler+0x20/0x80
el0_svc+0x8/0xc
Mem-Info:
active_anon:2856 inactive_anon:5269 isolated_anon:12
active_file:5238 inactive_file:18803 isolated_file:0
unevictable:0 dirty:22 writeback:416 unstable:0
slab_reclaimable:4073 slab_unreclaimable:3408
mapped:727 shmem:7393 pagetables:37 bounce:0
free:4104 free_pcp:118 free_cma:0
Node 0 active_anon:11436kB inactive_anon:21076kB active_file:20988kB inactive_file:75216kB unevictable:0kB isolated(ano
Node 0 DMA32 free:16820kB min:1808kB low:2260kB high:2712kB active_anon:11436kB inactive_anon:21076kB active_file:2098B
lowmem_reserve[]: 0 0 0
Node 0 DMA32: 508*4kB (UME) 242*8kB (UME) 730*16kB (UM) 21*32kB (UME) 5*64kB (UME) 2*128kB (M) 0*256kB 0*512kB 0*1024kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=32768kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=64kB
31455 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
65536 pages RAM
0 pages HighMem/MovableOnly
10766 pages reserved
0 pages cma reserved
0 pages hwpoisoned
Reviewed-by: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
2020-02-21 21:40:57 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-06-13 22:59:53 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
|
2013-06-13 22:59:54 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
lastnode = list_entry(hwreq->tds.prev,
|
2013-06-13 22:59:53 +08:00
|
|
|
struct td_node, td);
|
|
|
|
|
|
|
|
lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
|
2013-06-13 23:00:03 +08:00
|
|
|
if (!hwreq->req.no_interrupt)
|
2013-06-13 22:59:53 +08:00
|
|
|
lastnode->ptr->token |= cpu_to_le32(TD_IOC);
|
2013-04-04 18:13:46 +08:00
|
|
|
wmb();
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.actual = 0;
|
|
|
|
if (!list_empty(&hwep->qh.queue)) {
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_req *hwreqprev;
|
2013-06-13 23:00:03 +08:00
|
|
|
int n = hw_ep_bit(hwep->num, hwep->dir);
|
2011-02-18 20:13:16 +08:00
|
|
|
int tmp_stat;
|
2013-06-13 22:59:53 +08:00
|
|
|
struct td_node *prevlastnode;
|
|
|
|
u32 next = firstnode->dma & TD_ADDR_MASK;
|
2011-02-18 20:13:16 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreqprev = list_entry(hwep->qh.queue.prev,
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_req, queue);
|
2013-06-13 23:00:03 +08:00
|
|
|
prevlastnode = list_entry(hwreqprev->tds.prev,
|
2013-06-13 22:59:53 +08:00
|
|
|
struct td_node, td);
|
|
|
|
|
|
|
|
prevlastnode->ptr->next = cpu_to_le32(next);
|
2011-02-18 20:13:16 +08:00
|
|
|
wmb();
|
2012-07-07 22:56:40 +08:00
|
|
|
if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
|
2011-02-18 20:13:16 +08:00
|
|
|
goto done;
|
|
|
|
do {
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
|
|
|
|
tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
|
|
|
|
} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
|
|
|
|
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
|
2011-02-18 20:13:16 +08:00
|
|
|
if (tmp_stat)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* QH configuration */
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
|
|
|
|
hwep->qh.ptr->td.token &=
|
2013-03-30 18:54:04 +08:00
|
|
|
cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
|
2008-11-18 06:14:51 +08:00
|
|
|
|
usb: chipidea: udc: using MultO at TD as real mult value for ISO-TX
We have met a bug that the high bandwidth ISO-TX transfer has failed
at the last packet if it is less than 1024, the TD status shows it
is "Transaction Error".
The root cause of this problem is: the mult value at qh is not correct
for current TD's transfer length. We use TD list to queue un-transfer
TDs, and change mult for new adding TDs. If new adding TDs transfer length
less than 1024, but the queued un-transfer TDs transfer length is larger
than 1024, the transfer error will occur, and vice versa.
Usually, this problem occurs at the last packet, and the first packet for
new frame.
We fixed this problem by setting Mult at QH as the largest value (3), and
set MultO (Multiplier Override) at TD according to every transfer length.
It can cover both hardware version less than 2.3 (the real mult is MultO
if it is not 0) and 2.3+ (the real mult is min(qh.mult, td.multo)).
Since the MultO bits are only existed at TX TD, we keep the ISO-RX behavior
unchanged.
For stable tree: 3.11+.
Cc: stable <stable@vger.kernel.org>
Cc: Michael Grzeschik <m.grzeschik@pengutronix.de>
Reported-by: Matthieu Vanin <b47495@freescale.com>
Tested-by: Matthieu Vanin <b47495@freescale.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-01-10 13:51:32 +08:00
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
|
2013-06-13 23:00:03 +08:00
|
|
|
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
|
2013-06-13 22:59:47 +08:00
|
|
|
|
usb: chipidea: udc: using MultO at TD as real mult value for ISO-TX
We have met a bug that the high bandwidth ISO-TX transfer has failed
at the last packet if it is less than 1024, the TD status shows it
is "Transaction Error".
The root cause of this problem is: the mult value at qh is not correct
for current TD's transfer length. We use TD list to queue un-transfer
TDs, and change mult for new adding TDs. If new adding TDs transfer length
less than 1024, but the queued un-transfer TDs transfer length is larger
than 1024, the transfer error will occur, and vice versa.
Usually, this problem occurs at the last packet, and the first packet for
new frame.
We fixed this problem by setting Mult at QH as the largest value (3), and
set MultO (Multiplier Override) at TD according to every transfer length.
It can cover both hardware version less than 2.3 (the real mult is MultO
if it is not 0) and 2.3+ (the real mult is min(qh.mult, td.multo)).
Since the MultO bits are only existed at TX TD, we keep the ISO-RX behavior
unchanged.
For stable tree: 3.11+.
Cc: stable <stable@vger.kernel.org>
Cc: Michael Grzeschik <m.grzeschik@pengutronix.de>
Reported-by: Matthieu Vanin <b47495@freescale.com>
Tested-by: Matthieu Vanin <b47495@freescale.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-01-10 13:51:32 +08:00
|
|
|
if (hwreq->req.length == 0
|
|
|
|
|| hwreq->req.length % hwep->ep.maxpacket)
|
2013-06-13 22:59:47 +08:00
|
|
|
mul++;
|
2016-09-14 13:53:02 +08:00
|
|
|
hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
|
2013-06-13 22:59:47 +08:00
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
ret = hw_ep_prime(ci, hwep->num, hwep->dir,
|
|
|
|
hwep->type == USB_ENDPOINT_XFER_CONTROL);
|
2011-02-18 20:13:16 +08:00
|
|
|
done:
|
|
|
|
return ret;
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:29 +08:00
|
|
|
/**
|
2013-06-13 22:59:54 +08:00
|
|
|
* free_pending_td: remove a pending request for the endpoint
|
2020-07-04 01:41:29 +08:00
|
|
|
* @ci: the controller
|
2013-06-13 23:00:03 +08:00
|
|
|
* @hwep: endpoint
|
2013-06-13 22:59:54 +08:00
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static void free_pending_td(struct ci_hw_ep *hwep)
|
2013-06-13 22:59:54 +08:00
|
|
|
{
|
2013-06-13 23:00:03 +08:00
|
|
|
struct td_node *pending = hwep->pending_td;
|
2013-06-13 22:59:54 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
|
|
|
|
hwep->pending_td = NULL;
|
2013-06-13 22:59:54 +08:00
|
|
|
kfree(pending);
|
|
|
|
}
|
|
|
|
|
usb: chipidea: Add errata for revision 2.40a
At chipidea revision 2.40a, there is a below errata:
9000531823 B2-Medium Adding a dTD to a Primed Endpoint May Not Get Recognized
Title: Adding a dTD to a Primed Endpoint May Not Get Recognized
Impacted Configuration: All device mode configurations.
Description:
There is an issue with the add dTD tripwire semaphore (ATDTW bit in USBCMD register)
that can cause the controller to ignore a dTD that is added to a primed endpoint.
When this happens, the software can read the tripwire bit and the status bit at '1'
even though the endpoint is unprimed.
After executing a dTD, the device controller endpoint state machine executes a final
read of the dTD terminate bit to check if the application added a dTD to the linked
list at the last moment. This read is done in the finpkt_read_latest_next_td (44) state.
After the read is performed, if the terminate bit is still set, the state machine moves
to unprime the endpoint. The decision to unprime the endpoint is done in the
checkqh_decision (59) state, based on the value of the terminate bit.
Before reaching the checkqh_decision state, the state machine traverses the
writeqhtd_status (57), writeqh_status (56), and release_prime_mask (42) states.
As shown in the waveform, the ep_addtd_tripwire_clr signal is not set to clear
the tripwire bit in these states.
Workaround:
The software must implement a periodic poll cycle, and check for each dTD
pending on execution (Active = 1), if the enpoint is primed. It can do this by reading
the corresponding bits in the ENDPTPRIME and ENDPTSTAT registers. If these bits are
read at 0, the software needs to re-prime the endpoint by writing 1 to the corresponding
bit in the ENDPTPRIME register. This can be done for every microframe, every frame or
with a larger interval, depending on the urgency of transfer execution for the application.
Tested-by: Stefan Agner <stefan@agner.ch>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2015-02-11 12:44:56 +08:00
|
|
|
static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
|
|
|
|
struct td_node *node)
|
|
|
|
{
|
2016-09-14 13:53:02 +08:00
|
|
|
hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
|
usb: chipidea: Add errata for revision 2.40a
At chipidea revision 2.40a, there is a below errata:
9000531823 B2-Medium Adding a dTD to a Primed Endpoint May Not Get Recognized
Title: Adding a dTD to a Primed Endpoint May Not Get Recognized
Impacted Configuration: All device mode configurations.
Description:
There is an issue with the add dTD tripwire semaphore (ATDTW bit in USBCMD register)
that can cause the controller to ignore a dTD that is added to a primed endpoint.
When this happens, the software can read the tripwire bit and the status bit at '1'
even though the endpoint is unprimed.
After executing a dTD, the device controller endpoint state machine executes a final
read of the dTD terminate bit to check if the application added a dTD to the linked
list at the last moment. This read is done in the finpkt_read_latest_next_td (44) state.
After the read is performed, if the terminate bit is still set, the state machine moves
to unprime the endpoint. The decision to unprime the endpoint is done in the
checkqh_decision (59) state, based on the value of the terminate bit.
Before reaching the checkqh_decision state, the state machine traverses the
writeqhtd_status (57), writeqh_status (56), and release_prime_mask (42) states.
As shown in the waveform, the ep_addtd_tripwire_clr signal is not set to clear
the tripwire bit in these states.
Workaround:
The software must implement a periodic poll cycle, and check for each dTD
pending on execution (Active = 1), if the enpoint is primed. It can do this by reading
the corresponding bits in the ENDPTPRIME and ENDPTSTAT registers. If these bits are
read at 0, the software needs to re-prime the endpoint by writing 1 to the corresponding
bit in the ENDPTPRIME register. This can be done for every microframe, every frame or
with a larger interval, depending on the urgency of transfer execution for the application.
Tested-by: Stefan Agner <stefan@agner.ch>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2015-02-11 12:44:56 +08:00
|
|
|
hwep->qh.ptr->td.token &=
|
|
|
|
cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
|
|
|
|
|
|
|
|
return hw_ep_prime(ci, hwep->num, hwep->dir,
|
|
|
|
hwep->type == USB_ENDPOINT_XFER_CONTROL);
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
|
|
|
* _hardware_dequeue: handles a request at hardware level
|
|
|
|
* @gadget: gadget
|
2020-07-04 01:41:30 +08:00
|
|
|
* @hwreq: request
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-13 22:59:53 +08:00
|
|
|
u32 tmptoken;
|
2013-06-13 22:59:54 +08:00
|
|
|
struct td_node *node, *tmpnode;
|
|
|
|
unsigned remaining_length;
|
2013-06-13 23:00:03 +08:00
|
|
|
unsigned actual = hwreq->req.length;
|
usb: chipidea: Add errata for revision 2.40a
At chipidea revision 2.40a, there is a below errata:
9000531823 B2-Medium Adding a dTD to a Primed Endpoint May Not Get Recognized
Title: Adding a dTD to a Primed Endpoint May Not Get Recognized
Impacted Configuration: All device mode configurations.
Description:
There is an issue with the add dTD tripwire semaphore (ATDTW bit in USBCMD register)
that can cause the controller to ignore a dTD that is added to a primed endpoint.
When this happens, the software can read the tripwire bit and the status bit at '1'
even though the endpoint is unprimed.
After executing a dTD, the device controller endpoint state machine executes a final
read of the dTD terminate bit to check if the application added a dTD to the linked
list at the last moment. This read is done in the finpkt_read_latest_next_td (44) state.
After the read is performed, if the terminate bit is still set, the state machine moves
to unprime the endpoint. The decision to unprime the endpoint is done in the
checkqh_decision (59) state, based on the value of the terminate bit.
Before reaching the checkqh_decision state, the state machine traverses the
writeqhtd_status (57), writeqh_status (56), and release_prime_mask (42) states.
As shown in the waveform, the ep_addtd_tripwire_clr signal is not set to clear
the tripwire bit in these states.
Workaround:
The software must implement a periodic poll cycle, and check for each dTD
pending on execution (Active = 1), if the enpoint is primed. It can do this by reading
the corresponding bits in the ENDPTPRIME and ENDPTSTAT registers. If these bits are
read at 0, the software needs to re-prime the endpoint by writing 1 to the corresponding
bit in the ENDPTPRIME register. This can be done for every microframe, every frame or
with a larger interval, depending on the urgency of transfer execution for the application.
Tested-by: Stefan Agner <stefan@agner.ch>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2015-02-11 12:44:56 +08:00
|
|
|
struct ci_hdrc *ci = hwep->ci;
|
2013-03-30 18:54:07 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwreq->req.status != -EALREADY)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.status = 0;
|
2011-02-18 20:13:16 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
|
2013-06-13 22:59:53 +08:00
|
|
|
tmptoken = le32_to_cpu(node->ptr->token);
|
2013-06-13 22:59:54 +08:00
|
|
|
if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
|
usb: chipidea: Add errata for revision 2.40a
At chipidea revision 2.40a, there is a below errata:
9000531823 B2-Medium Adding a dTD to a Primed Endpoint May Not Get Recognized
Title: Adding a dTD to a Primed Endpoint May Not Get Recognized
Impacted Configuration: All device mode configurations.
Description:
There is an issue with the add dTD tripwire semaphore (ATDTW bit in USBCMD register)
that can cause the controller to ignore a dTD that is added to a primed endpoint.
When this happens, the software can read the tripwire bit and the status bit at '1'
even though the endpoint is unprimed.
After executing a dTD, the device controller endpoint state machine executes a final
read of the dTD terminate bit to check if the application added a dTD to the linked
list at the last moment. This read is done in the finpkt_read_latest_next_td (44) state.
After the read is performed, if the terminate bit is still set, the state machine moves
to unprime the endpoint. The decision to unprime the endpoint is done in the
checkqh_decision (59) state, based on the value of the terminate bit.
Before reaching the checkqh_decision state, the state machine traverses the
writeqhtd_status (57), writeqh_status (56), and release_prime_mask (42) states.
As shown in the waveform, the ep_addtd_tripwire_clr signal is not set to clear
the tripwire bit in these states.
Workaround:
The software must implement a periodic poll cycle, and check for each dTD
pending on execution (Active = 1), if the enpoint is primed. It can do this by reading
the corresponding bits in the ENDPTPRIME and ENDPTSTAT registers. If these bits are
read at 0, the software needs to re-prime the endpoint by writing 1 to the corresponding
bit in the ENDPTPRIME register. This can be done for every microframe, every frame or
with a larger interval, depending on the urgency of transfer execution for the application.
Tested-by: Stefan Agner <stefan@agner.ch>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2015-02-11 12:44:56 +08:00
|
|
|
int n = hw_ep_bit(hwep->num, hwep->dir);
|
|
|
|
|
|
|
|
if (ci->rev == CI_REVISION_24)
|
|
|
|
if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
|
|
|
|
reprime_dtd(ci, hwep, node);
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.status = -EALREADY;
|
2011-02-18 20:13:16 +08:00
|
|
|
return -EBUSY;
|
2013-06-13 22:59:53 +08:00
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 22:59:54 +08:00
|
|
|
remaining_length = (tmptoken & TD_TOTAL_BYTES);
|
|
|
|
remaining_length >>= __ffs(TD_TOTAL_BYTES);
|
|
|
|
actual -= remaining_length;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.status = tmptoken & TD_STATUS;
|
|
|
|
if ((TD_STATUS_HALTED & hwreq->req.status)) {
|
|
|
|
hwreq->req.status = -EPIPE;
|
2013-06-13 22:59:54 +08:00
|
|
|
break;
|
2013-06-13 23:00:03 +08:00
|
|
|
} else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
|
|
|
|
hwreq->req.status = -EPROTO;
|
2013-06-13 22:59:54 +08:00
|
|
|
break;
|
2013-06-13 23:00:03 +08:00
|
|
|
} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
|
|
|
|
hwreq->req.status = -EILSEQ;
|
2013-06-13 22:59:54 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (remaining_length) {
|
2016-08-12 01:19:13 +08:00
|
|
|
if (hwep->dir == TX) {
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.status = -EPROTO;
|
2013-06-13 22:59:54 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* As the hardware could still address the freed td
|
|
|
|
* which will run the udc unusable, the cleanup of the
|
|
|
|
* td has to be delayed by one.
|
|
|
|
*/
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->pending_td)
|
|
|
|
free_pending_td(hwep);
|
2013-06-13 22:59:54 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->pending_td = node;
|
2013-06-13 22:59:54 +08:00
|
|
|
list_del_init(&node->td);
|
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2017-03-13 10:18:42 +08:00
|
|
|
usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
|
|
|
|
&hwreq->req, hwep->dir);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.actual += actual;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwreq->req.status)
|
|
|
|
return hwreq->req.status;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
return hwreq->req.actual;
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* _ep_nuke: dequeues all endpoint requests
|
2013-06-13 23:00:03 +08:00
|
|
|
* @hwep: endpoint
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
* Caller must hold lock
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int _ep_nuke(struct ci_hw_ep *hwep)
|
2013-06-13 23:00:03 +08:00
|
|
|
__releases(hwep->lock)
|
|
|
|
__acquires(hwep->lock)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-13 22:59:54 +08:00
|
|
|
struct td_node *node, *tmpnode;
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
while (!list_empty(&hwep->qh.queue)) {
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* pop oldest request */
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
|
|
|
|
struct ci_hw_req, queue);
|
2013-04-04 18:13:47 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
|
|
|
|
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
|
2013-06-13 22:59:54 +08:00
|
|
|
list_del_init(&node->td);
|
|
|
|
node->ptr = NULL;
|
|
|
|
kfree(node);
|
2013-04-04 18:13:47 +08:00
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
list_del_init(&hwreq->queue);
|
|
|
|
hwreq->req.status = -ESHUTDOWN;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwreq->req.complete != NULL) {
|
|
|
|
spin_unlock(hwep->lock);
|
2014-09-25 04:43:19 +08:00
|
|
|
usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock(hwep->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-13 22:59:54 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->pending_td)
|
|
|
|
free_pending_td(hwep);
|
2013-06-13 22:59:54 +08:00
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
usb: chipidea: udc: using the correct stall implementation
According to spec, there are functional and protocol stalls.
For functional stall, it is for bulk and interrupt endpoints,
below are cases for it:
- Host sends SET_FEATURE request for Set-Halt, the udc driver
needs to set stall, and return true unconditionally.
- The gadget driver may call usb_ep_set_halt to stall certain
endpoints, if there is a transfer in pending, the udc driver
should not set stall, and return -EAGAIN accordingly.
These two kinds of stall need to be cleared by host using CLEAR_FEATURE
request (Clear-Halt).
For protocol stall, it is for control endpoint, this stall will
be set if the control request has failed. This stall will be
cleared by next setup request (hardware will do it).
It fixed usbtest (drivers/usb/misc/usbtest.c) Test 13 "set/clear halt"
test failure, meanwhile, this change has been verified by
USB2 CV Compliance Test and MSC Tests.
Cc: <stable@vger.kernel.org> #3.10+
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Felipe Balbi <balbi@ti.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
2015-08-24 14:10:07 +08:00
|
|
|
static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
|
|
|
|
{
|
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
|
|
|
int direction, retval = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (ep == NULL || hwep->ep.desc == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (usb_endpoint_xfer_isoc(hwep->ep.desc))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
|
|
|
|
|
|
|
if (value && hwep->dir == TX && check_transfer &&
|
|
|
|
!list_empty(&hwep->qh.queue) &&
|
|
|
|
!usb_endpoint_xfer_control(hwep->ep.desc)) {
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
direction = hwep->dir;
|
|
|
|
do {
|
|
|
|
retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
|
|
|
|
|
|
|
|
if (!value)
|
|
|
|
hwep->wedge = 0;
|
|
|
|
|
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
|
|
|
|
hwep->dir = (hwep->dir == TX) ? RX : TX;
|
|
|
|
|
|
|
|
} while (hwep->dir != direction);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
|
|
|
* _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
|
|
|
|
* @gadget: gadget
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
|
|
|
static int _gadget_stop_activity(struct usb_gadget *gadget)
|
|
|
|
{
|
|
|
|
struct usb_ep *ep;
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
|
2011-02-18 20:13:17 +08:00
|
|
|
unsigned long flags;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* flush all endpoints */
|
|
|
|
gadget_for_each_ep(ep, gadget) {
|
|
|
|
usb_ep_fifo_flush(ep);
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
usb_ep_fifo_flush(&ci->ep0out->ep);
|
|
|
|
usb_ep_fifo_flush(&ci->ep0in->ep);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* make sure to disable all endpoints */
|
|
|
|
gadget_for_each_ep(ep, gadget) {
|
|
|
|
usb_ep_disable(ep);
|
|
|
|
}
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci->status != NULL) {
|
|
|
|
usb_ep_free_request(&ci->ep0in->ep, ci->status);
|
|
|
|
ci->status = NULL;
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
usb: chipidea: udc: don't do hardware access if gadget has stopped
After _gadget_stop_activity is executed, we can consider the hardware
operation for gadget has finished, and the udc can be stopped and enter
low power mode. So, any later hardware operations (from usb_ep_ops APIs
or usb_gadget_ops APIs) should be considered invalid, any deinitializatons
has been covered at _gadget_stop_activity.
I meet this problem when I plug out usb cable from PC using mass_storage
gadget, my callstack like: vbus interrupt->.vbus_session->
composite_disconnect ->pm_runtime_put_sync(&_gadget->dev),
the composite_disconnect will call fsg_disable, but fsg_disable calls
usb_ep_disable using async way, there are register accesses for
usb_ep_disable. So sometimes, I get system hang due to visit register
without clock, sometimes not.
The Linux Kernel USB maintainer Alan Stern suggests this kinds of solution.
See: http://marc.info/?l=linux-usb&m=138541769810983&w=2.
Cc: <stable@vger.kernel.org> #v4.9+
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Link: https://lore.kernel.org/r/20190820020503.27080-2-peter.chen@nxp.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-08-20 10:07:58 +08:00
|
|
|
spin_lock_irqsave(&ci->lock, flags);
|
|
|
|
ci->gadget.speed = USB_SPEED_UNKNOWN;
|
|
|
|
ci->remote_wakeup = 0;
|
|
|
|
ci->suspended = 0;
|
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* ISR block
|
|
|
|
*****************************************************************************/
|
|
|
|
/**
|
|
|
|
* isr_reset_handler: USB reset interrupt handler
|
2012-07-07 22:56:40 +08:00
|
|
|
* @ci: UDC device
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function resets USB engine after a bus reset occurred
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static void isr_reset_handler(struct ci_hdrc *ci)
|
2012-07-07 22:56:40 +08:00
|
|
|
__releases(ci->lock)
|
|
|
|
__acquires(ci->lock)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
usb: chipidea: udc: Fix spinlock recursion during bus reset
After configuration, the host also possible sends bus reset
at any time, at such situation, it will trigger below spinlock
recursion dump. This commit unlocks the spinlock before calling
gadget's disconnect.
BUG: spinlock recursion on CPU#0, swapper/0/0
lock: 0xbf128014, .magic: dead4ead, .owner: swapper/0/0, .owner_cpu: 0
CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.11.0-next-20130910+ #106
[<80014e20>] (unwind_backtrace+0x0/0xec) from [<80011a6c>] (show_stack+0x10/0x14)
[<80011a6c>] (show_stack+0x10/0x14) from [<805c143c>] (dump_stack+0x94/0xbc)
[<805c143c>] (dump_stack+0x94/0xbc) from [<80282cf8>] (do_raw_spin_lock+0x16c/0x18c)
[<80282cf8>] (do_raw_spin_lock+0x16c/0x18c) from [<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c)
[<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c) from [<803cff88>] (ep_disable+0x24/0x110)
[<803cff88>] (ep_disable+0x24/0x110) from [<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial])
[<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial]) from [<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm])
[<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm]) from [<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite])
[<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite]) from [<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite])
[<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite]) from [<803d1024>] (udc_irq+0x770/0xce4)
[<803d1024>] (udc_irq+0x770/0xce4) from [<803cdcc0>] (ci_irq+0x98/0x164)
[<803cdcc0>] (ci_irq+0x98/0x164) from [<8007edfc>] (handle_irq_event_percpu+0x50/0x17c)
[<8007edfc>] (handle_irq_event_percpu+0x50/0x17c) from [<8007ef64>] (handle_irq_event+0x3c/0x5c)
[<8007ef64>] (handle_irq_event+0x3c/0x5c) from [<80081e98>] (handle_fasteoi_irq+0x98/0x168)
[<80081e98>] (handle_fasteoi_irq+0x98/0x168) from [<8007e598>] (generic_handle_irq+0x28/0x3c)
[<8007e598>] (generic_handle_irq+0x28/0x3c) from [<8000edf4>] (handle_IRQ+0x4c/0xb4)
[<8000edf4>] (handle_IRQ+0x4c/0xb4) from [<800085bc>] (gic_handle_irq+0x28/0x5c)
[<800085bc>] (gic_handle_irq+0x28/0x5c) from [<800125c0>] (__irq_svc+0x40/0x54)
Exception stack(0x8083bf68 to 0x8083bfb0)
bf60: 81533b80 00000000 00096234 8001d760 8088e12c 00000000
bf80: 8083a000 8083a000 8084290c 805cb414 808428ac 8083a000 00000001 8083bfb0
bfa0: 8000f138 8000f13c 60000013 ffffffff
[<800125c0>] (__irq_svc+0x40/0x54) from [<8000f13c>] (arch_cpu_idle+0x30/0x3c)
[<8000f13c>] (arch_cpu_idle+0x30/0x3c) from [<8005eb94>] (cpu_startup_entry+0xf4/0x148)
[<8005eb94>] (cpu_startup_entry+0xf4/0x148) from [<807f1a2c>] (start_kernel+0x2c4/0x318)
BUG: spinlock lockup suspected on CPU#0, swapper/0/0
lock: 0xbf128014, .magic: dead4ead, .owner: swapper/0/0, .owner_cpu: 0
CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.11.0-next-20130910+ #106
[<80014e20>] (unwind_backtrace+0x0/0xec) from [<80011a6c>] (show_stack+0x10/0x14)
[<80011a6c>] (show_stack+0x10/0x14) from [<805c143c>] (dump_stack+0x94/0xbc)
[<805c143c>] (dump_stack+0x94/0xbc) from [<80282c94>] (do_raw_spin_lock+0x108/0x18c)
[<80282c94>] (do_raw_spin_lock+0x108/0x18c) from [<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c)
[<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c) from [<803cff88>] (ep_disable+0x24/0x110)
[<803cff88>] (ep_disable+0x24/0x110) from [<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial])
[<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial]) from [<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm])
[<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm]) from [<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite])
[<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite]) from [<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite])
[<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite]) from [<803d1024>] (udc_irq+0x770/0xce4)
[<803d1024>] (udc_irq+0x770/0xce4) from [<803cdcc0>] (ci_irq+0x98/0x164)
[<803cdcc0>] (ci_irq+0x98/0x164) from [<8007edfc>] (handle_irq_event_percpu+0x50/0x17c)
[<8007edfc>] (handle_irq_event_percpu+0x50/0x17c) from [<8007ef64>] (handle_irq_event+0x3c/0x5c)
[<8007ef64>] (handle_irq_event+0x3c/0x5c) from [<80081e98>] (handle_fasteoi_irq+0x98/0x168)
[<80081e98>] (handle_fasteoi_irq+0x98/0x168) from [<8007e598>] (generic_handle_irq+0x28/0x3c)
[<8007e598>] (generic_handle_irq+0x28/0x3c) from [<8000edf4>] (handle_IRQ+0x4c/0xb4)
[<8000edf4>] (handle_IRQ+0x4c/0xb4) from [<800085bc>] (gic_handle_irq+0x28/0x5c)
[<800085bc>] (gic_handle_irq+0x28/0x5c) from [<800125c0>] (__irq_svc+0x40/0x54)
Exception stack(0x8083bf68 to 0x8083bfb0)
bf60: 81533b80 00000000 00096234 8001d760 8088e12c 00000000
bf80: 8083a000 8083a000 8084290c 805cb414 808428ac 8083a000 00000001 8083bfb0
bfa0: 8000f138 8000f13c 60000013 ffffffff
[<800125c0>] (__irq_svc+0x40/0x54) from [<8000f13c>] (arch_cpu_idle+0x30/0x3c)
[<8000f13c>] (arch_cpu_idle+0x30/0x3c) from [<8005eb94>] (cpu_startup_entry+0xf4/0x148)
[<8005eb94>] (cpu_startup_entry+0xf4/0x148) from [<807f1a2c>] (start_kernel+0x2c4/0x318)
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-10-09 14:39:52 +08:00
|
|
|
spin_unlock(&ci->lock);
|
2014-11-06 14:27:58 +08:00
|
|
|
if (ci->gadget.speed != USB_SPEED_UNKNOWN)
|
|
|
|
usb_gadget_udc_reset(&ci->gadget, ci->driver);
|
2013-09-17 12:37:19 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
retval = _gadget_stop_activity(&ci->gadget);
|
2008-11-18 06:14:51 +08:00
|
|
|
if (retval)
|
|
|
|
goto done;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
retval = hw_usb_reset(ci);
|
2008-11-18 06:14:51 +08:00
|
|
|
if (retval)
|
|
|
|
goto done;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
|
|
|
|
if (ci->status == NULL)
|
2011-05-02 14:26:32 +08:00
|
|
|
retval = -ENOMEM;
|
2011-01-11 11:49:22 +08:00
|
|
|
|
2012-05-11 22:25:50 +08:00
|
|
|
done:
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock(&ci->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
if (retval)
|
2012-07-07 22:56:40 +08:00
|
|
|
dev_err(ci->dev, "error: %i\n", retval);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* isr_get_status_complete: get_status request complete function
|
|
|
|
* @ep: endpoint
|
|
|
|
* @req: request handled
|
|
|
|
*
|
|
|
|
* Caller must release lock
|
|
|
|
*/
|
|
|
|
static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
|
|
|
|
{
|
2012-05-09 04:29:02 +08:00
|
|
|
if (ep == NULL || req == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
kfree(req->buf);
|
|
|
|
usb_ep_free_request(ep, req);
|
|
|
|
}
|
|
|
|
|
2013-03-30 18:54:09 +08:00
|
|
|
/**
|
|
|
|
* _ep_queue: queues (submits) an I/O request to an endpoint
|
2015-09-19 01:30:19 +08:00
|
|
|
* @ep: endpoint
|
|
|
|
* @req: request
|
|
|
|
* @gfp_flags: GFP flags (not used)
|
2013-03-30 18:54:09 +08:00
|
|
|
*
|
|
|
|
* Caller must hold lock
|
2015-09-19 01:30:19 +08:00
|
|
|
* This function returns an error code
|
2013-03-30 18:54:09 +08:00
|
|
|
*/
|
|
|
|
static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
|
|
|
|
gfp_t __maybe_unused gfp_flags)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
|
|
|
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
|
|
|
|
struct ci_hdrc *ci = hwep->ci;
|
2013-03-30 18:54:09 +08:00
|
|
|
int retval = 0;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
|
2013-03-30 18:54:09 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
|
2013-03-30 18:54:09 +08:00
|
|
|
if (req->length)
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep = (ci->ep0_dir == RX) ?
|
2013-03-30 18:54:09 +08:00
|
|
|
ci->ep0out : ci->ep0in;
|
2013-06-13 23:00:03 +08:00
|
|
|
if (!list_empty(&hwep->qh.queue)) {
|
|
|
|
_ep_nuke(hwep);
|
|
|
|
dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
|
|
|
|
_usb_addr(hwep));
|
2013-03-30 18:54:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
|
2016-09-28 18:26:18 +08:00
|
|
|
hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
|
2013-06-13 23:00:03 +08:00
|
|
|
dev_err(hwep->ci->dev, "request length too big for isochronous\n");
|
2013-06-13 22:59:47 +08:00
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2013-03-30 18:54:09 +08:00
|
|
|
/* first nuke then test link, e.g. previous status has not sent */
|
2013-06-13 23:00:03 +08:00
|
|
|
if (!list_empty(&hwreq->queue)) {
|
|
|
|
dev_err(hwep->ci->dev, "request already in queue\n");
|
2013-03-30 18:54:09 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* push request */
|
2013-06-13 23:00:03 +08:00
|
|
|
hwreq->req.status = -EINPROGRESS;
|
|
|
|
hwreq->req.actual = 0;
|
2013-03-30 18:54:09 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
retval = _hardware_enqueue(hwep, hwreq);
|
2013-03-30 18:54:09 +08:00
|
|
|
|
|
|
|
if (retval == -EALREADY)
|
|
|
|
retval = 0;
|
|
|
|
if (!retval)
|
2013-06-13 23:00:03 +08:00
|
|
|
list_add_tail(&hwreq->queue, &hwep->qh.queue);
|
2013-03-30 18:54:09 +08:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
|
|
|
* isr_get_status_response: get_status request response
|
2012-07-07 22:56:40 +08:00
|
|
|
* @ci: ci struct
|
2008-11-18 06:14:51 +08:00
|
|
|
* @setup: setup request packet
|
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int isr_get_status_response(struct ci_hdrc *ci,
|
2008-11-18 06:14:51 +08:00
|
|
|
struct usb_ctrlrequest *setup)
|
2013-06-13 23:00:03 +08:00
|
|
|
__releases(hwep->lock)
|
|
|
|
__acquires(hwep->lock)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = ci->ep0in;
|
2008-11-18 06:14:51 +08:00
|
|
|
struct usb_request *req = NULL;
|
|
|
|
gfp_t gfp_flags = GFP_ATOMIC;
|
|
|
|
int dir, num, retval;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep == NULL || setup == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock(hwep->lock);
|
|
|
|
req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
|
|
|
|
spin_lock(hwep->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
if (req == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
req->complete = isr_get_status_complete;
|
|
|
|
req->length = 2;
|
|
|
|
req->buf = kzalloc(req->length, gfp_flags);
|
|
|
|
if (req->buf == NULL) {
|
|
|
|
retval = -ENOMEM;
|
|
|
|
goto err_free_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
|
2015-01-28 16:32:25 +08:00
|
|
|
*(u16 *)req->buf = (ci->remote_wakeup << 1) |
|
|
|
|
ci->gadget.is_selfpowered;
|
2008-11-18 06:14:51 +08:00
|
|
|
} else if ((setup->bRequestType & USB_RECIP_MASK) \
|
|
|
|
== USB_RECIP_ENDPOINT) {
|
|
|
|
dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
|
|
|
|
TX : RX;
|
|
|
|
num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
|
2012-07-07 22:56:40 +08:00
|
|
|
*(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
/* else do nothing; reserved for future use */
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
retval = _ep_queue(&hwep->ep, req, gfp_flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
if (retval)
|
|
|
|
goto err_free_buf;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_buf:
|
|
|
|
kfree(req->buf);
|
|
|
|
err_free_req:
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock(hwep->lock);
|
|
|
|
usb_ep_free_request(&hwep->ep, req);
|
|
|
|
spin_lock(hwep->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2011-02-18 20:13:18 +08:00
|
|
|
/**
|
|
|
|
* isr_setup_status_complete: setup_status request complete function
|
|
|
|
* @ep: endpoint
|
|
|
|
* @req: request handled
|
|
|
|
*
|
|
|
|
* Caller must release lock. Put the port in test mode if test mode
|
|
|
|
* feature is selected.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = req->context;
|
2011-02-18 20:13:18 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci->setaddr) {
|
|
|
|
hw_usb_set_address(ci, ci->address);
|
|
|
|
ci->setaddr = false;
|
2014-05-04 09:24:44 +08:00
|
|
|
if (ci->address)
|
|
|
|
usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
|
usb: gadget: ci13xxx: don't use "advance" feature when setting address
Newer versions of the chipidea controller support the "advance" setting
of usb address, which means instead of setting it immediately, deferring
it till the status completion. Unfortunately, older versions of the
controller don't have this feature, so in order to support those too, we
simply don't use it. It's about 4 lines of extra code, and isn't in any
way critical to performance. While at it, change the return value of the
hw_usb_set_address() to void, since it can't fail in any measurable way.
With this patch, ci13xxx_udc driver works with the chipidea controller in
kirkwood (feroceon SoC), as found in, for example, sheevaplug.
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-11 22:25:43 +08:00
|
|
|
}
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock_irqsave(&ci->lock, flags);
|
|
|
|
if (ci->test_mode)
|
|
|
|
hw_port_test_set(ci, ci->test_mode);
|
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
2011-02-18 20:13:18 +08:00
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
|
|
|
* isr_setup_status_phase: queues the status phase of a setup transation
|
2012-07-07 22:56:40 +08:00
|
|
|
* @ci: ci struct
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int isr_setup_status_phase(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2016-09-06 01:29:58 +08:00
|
|
|
/*
|
|
|
|
* Unexpected USB controller behavior, caused by bad signal integrity
|
|
|
|
* or ground reference problems, can lead to isr_setup_status_phase
|
|
|
|
* being called with ci->status equal to NULL.
|
|
|
|
* If this situation occurs, you should review your USB hardware design.
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(!ci->status))
|
|
|
|
return -EPIPE;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->status->context = ci;
|
|
|
|
ci->status->complete = isr_setup_status_complete;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2017-07-10 10:08:20 +08:00
|
|
|
return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* isr_tr_complete_low: transaction complete low level handler
|
2013-06-13 23:00:03 +08:00
|
|
|
* @hwep: endpoint
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns an error code
|
|
|
|
* Caller must hold lock
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int isr_tr_complete_low(struct ci_hw_ep *hwep)
|
2013-06-13 23:00:03 +08:00
|
|
|
__releases(hwep->lock)
|
|
|
|
__acquires(hwep->lock)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_req *hwreq, *hwreqtemp;
|
|
|
|
struct ci_hw_ep *hweptemp = hwep;
|
2012-09-12 19:58:04 +08:00
|
|
|
int retval = 0;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
|
2011-02-18 20:13:16 +08:00
|
|
|
queue) {
|
2013-06-13 23:00:03 +08:00
|
|
|
retval = _hardware_dequeue(hwep, hwreq);
|
2011-02-18 20:13:16 +08:00
|
|
|
if (retval < 0)
|
|
|
|
break;
|
2013-06-13 23:00:03 +08:00
|
|
|
list_del_init(&hwreq->queue);
|
|
|
|
if (hwreq->req.complete != NULL) {
|
|
|
|
spin_unlock(hwep->lock);
|
|
|
|
if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
|
|
|
|
hwreq->req.length)
|
|
|
|
hweptemp = hwep->ci->ep0in;
|
2014-09-25 04:43:19 +08:00
|
|
|
usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock(hwep->lock);
|
2011-02-18 20:13:16 +08:00
|
|
|
}
|
2010-12-15 15:45:50 +08:00
|
|
|
}
|
|
|
|
|
2011-05-02 14:26:27 +08:00
|
|
|
if (retval == -EBUSY)
|
2011-02-18 20:13:16 +08:00
|
|
|
retval = 0;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2015-03-08 16:05:01 +08:00
|
|
|
static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
|
|
|
|
{
|
|
|
|
dev_warn(&ci->gadget.dev,
|
|
|
|
"connect the device to an alternate port if you want HNP\n");
|
|
|
|
return isr_setup_status_phase(ci);
|
|
|
|
}
|
|
|
|
|
2014-03-11 13:47:37 +08:00
|
|
|
/**
|
|
|
|
* isr_setup_packet_handler: setup packet handler
|
|
|
|
* @ci: UDC descriptor
|
|
|
|
*
|
|
|
|
* This function handles setup packet
|
|
|
|
*/
|
|
|
|
static void isr_setup_packet_handler(struct ci_hdrc *ci)
|
|
|
|
__releases(ci->lock)
|
|
|
|
__acquires(ci->lock)
|
|
|
|
{
|
|
|
|
struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
|
|
|
|
struct usb_ctrlrequest req;
|
|
|
|
int type, num, dir, err = -EINVAL;
|
|
|
|
u8 tmode = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush data and handshake transactions of previous
|
|
|
|
* setup packet.
|
|
|
|
*/
|
|
|
|
_ep_nuke(ci->ep0out);
|
|
|
|
_ep_nuke(ci->ep0in);
|
|
|
|
|
|
|
|
/* read_setup_packet */
|
|
|
|
do {
|
|
|
|
hw_test_and_set_setup_guard(ci);
|
|
|
|
memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
|
|
|
|
} while (!hw_test_and_clear_setup_guard(ci));
|
|
|
|
|
|
|
|
type = req.bRequestType;
|
|
|
|
|
|
|
|
ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
|
|
|
|
|
|
|
|
switch (req.bRequest) {
|
|
|
|
case USB_REQ_CLEAR_FEATURE:
|
|
|
|
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
|
|
|
|
le16_to_cpu(req.wValue) ==
|
|
|
|
USB_ENDPOINT_HALT) {
|
|
|
|
if (req.wLength != 0)
|
|
|
|
break;
|
|
|
|
num = le16_to_cpu(req.wIndex);
|
2016-08-12 01:19:13 +08:00
|
|
|
dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
|
2014-03-11 13:47:37 +08:00
|
|
|
num &= USB_ENDPOINT_NUMBER_MASK;
|
2016-08-12 01:19:13 +08:00
|
|
|
if (dir == TX)
|
2014-03-11 13:47:37 +08:00
|
|
|
num += ci->hw_ep_max / 2;
|
|
|
|
if (!ci->ci_hw_ep[num].wedge) {
|
|
|
|
spin_unlock(&ci->lock);
|
|
|
|
err = usb_ep_clear_halt(
|
|
|
|
&ci->ci_hw_ep[num].ep);
|
|
|
|
spin_lock(&ci->lock);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
err = isr_setup_status_phase(ci);
|
|
|
|
} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
|
|
|
|
le16_to_cpu(req.wValue) ==
|
|
|
|
USB_DEVICE_REMOTE_WAKEUP) {
|
|
|
|
if (req.wLength != 0)
|
|
|
|
break;
|
|
|
|
ci->remote_wakeup = 0;
|
|
|
|
err = isr_setup_status_phase(ci);
|
|
|
|
} else {
|
|
|
|
goto delegate;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case USB_REQ_GET_STATUS:
|
2016-02-19 10:04:43 +08:00
|
|
|
if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
|
|
|
|
le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
|
2014-03-11 13:47:37 +08:00
|
|
|
type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
|
|
|
|
type != (USB_DIR_IN|USB_RECIP_INTERFACE))
|
|
|
|
goto delegate;
|
|
|
|
if (le16_to_cpu(req.wLength) != 2 ||
|
|
|
|
le16_to_cpu(req.wValue) != 0)
|
|
|
|
break;
|
|
|
|
err = isr_get_status_response(ci, &req);
|
|
|
|
break;
|
|
|
|
case USB_REQ_SET_ADDRESS:
|
|
|
|
if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
|
|
|
|
goto delegate;
|
|
|
|
if (le16_to_cpu(req.wLength) != 0 ||
|
|
|
|
le16_to_cpu(req.wIndex) != 0)
|
|
|
|
break;
|
|
|
|
ci->address = (u8)le16_to_cpu(req.wValue);
|
|
|
|
ci->setaddr = true;
|
|
|
|
err = isr_setup_status_phase(ci);
|
|
|
|
break;
|
|
|
|
case USB_REQ_SET_FEATURE:
|
|
|
|
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
|
|
|
|
le16_to_cpu(req.wValue) ==
|
|
|
|
USB_ENDPOINT_HALT) {
|
|
|
|
if (req.wLength != 0)
|
|
|
|
break;
|
|
|
|
num = le16_to_cpu(req.wIndex);
|
2016-08-12 01:19:13 +08:00
|
|
|
dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
|
2014-03-11 13:47:37 +08:00
|
|
|
num &= USB_ENDPOINT_NUMBER_MASK;
|
2016-08-12 01:19:13 +08:00
|
|
|
if (dir == TX)
|
2014-03-11 13:47:37 +08:00
|
|
|
num += ci->hw_ep_max / 2;
|
|
|
|
|
|
|
|
spin_unlock(&ci->lock);
|
usb: chipidea: udc: using the correct stall implementation
According to spec, there are functional and protocol stalls.
For functional stall, it is for bulk and interrupt endpoints,
below are cases for it:
- Host sends SET_FEATURE request for Set-Halt, the udc driver
needs to set stall, and return true unconditionally.
- The gadget driver may call usb_ep_set_halt to stall certain
endpoints, if there is a transfer in pending, the udc driver
should not set stall, and return -EAGAIN accordingly.
These two kinds of stall need to be cleared by host using CLEAR_FEATURE
request (Clear-Halt).
For protocol stall, it is for control endpoint, this stall will
be set if the control request has failed. This stall will be
cleared by next setup request (hardware will do it).
It fixed usbtest (drivers/usb/misc/usbtest.c) Test 13 "set/clear halt"
test failure, meanwhile, this change has been verified by
USB2 CV Compliance Test and MSC Tests.
Cc: <stable@vger.kernel.org> #3.10+
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Felipe Balbi <balbi@ti.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
2015-08-24 14:10:07 +08:00
|
|
|
err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
|
2014-03-11 13:47:37 +08:00
|
|
|
spin_lock(&ci->lock);
|
|
|
|
if (!err)
|
|
|
|
isr_setup_status_phase(ci);
|
|
|
|
} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
|
|
|
|
if (req.wLength != 0)
|
|
|
|
break;
|
|
|
|
switch (le16_to_cpu(req.wValue)) {
|
|
|
|
case USB_DEVICE_REMOTE_WAKEUP:
|
|
|
|
ci->remote_wakeup = 1;
|
|
|
|
err = isr_setup_status_phase(ci);
|
|
|
|
break;
|
|
|
|
case USB_DEVICE_TEST_MODE:
|
|
|
|
tmode = le16_to_cpu(req.wIndex) >> 8;
|
|
|
|
switch (tmode) {
|
2020-06-18 22:42:06 +08:00
|
|
|
case USB_TEST_J:
|
|
|
|
case USB_TEST_K:
|
|
|
|
case USB_TEST_SE0_NAK:
|
|
|
|
case USB_TEST_PACKET:
|
|
|
|
case USB_TEST_FORCE_ENABLE:
|
2014-03-11 13:47:37 +08:00
|
|
|
ci->test_mode = tmode;
|
|
|
|
err = isr_setup_status_phase(
|
|
|
|
ci);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2014-04-23 15:56:47 +08:00
|
|
|
break;
|
|
|
|
case USB_DEVICE_B_HNP_ENABLE:
|
|
|
|
if (ci_otg_is_fsm_mode(ci)) {
|
|
|
|
ci->gadget.b_hnp_enable = 1;
|
|
|
|
err = isr_setup_status_phase(
|
|
|
|
ci);
|
|
|
|
}
|
|
|
|
break;
|
2015-03-08 16:05:01 +08:00
|
|
|
case USB_DEVICE_A_ALT_HNP_SUPPORT:
|
|
|
|
if (ci_otg_is_fsm_mode(ci))
|
|
|
|
err = otg_a_alt_hnp_support(ci);
|
|
|
|
break;
|
2015-07-17 08:44:24 +08:00
|
|
|
case USB_DEVICE_A_HNP_SUPPORT:
|
|
|
|
if (ci_otg_is_fsm_mode(ci)) {
|
|
|
|
ci->gadget.a_hnp_support = 1;
|
|
|
|
err = isr_setup_status_phase(
|
|
|
|
ci);
|
|
|
|
}
|
|
|
|
break;
|
2014-03-11 13:47:37 +08:00
|
|
|
default:
|
|
|
|
goto delegate;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
goto delegate;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
delegate:
|
|
|
|
if (req.wLength == 0) /* no data phase */
|
|
|
|
ci->ep0_dir = TX;
|
|
|
|
|
|
|
|
spin_unlock(&ci->lock);
|
|
|
|
err = ci->driver->setup(&ci->gadget, &req);
|
|
|
|
spin_lock(&ci->lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err < 0) {
|
|
|
|
spin_unlock(&ci->lock);
|
usb: chipidea: udc: using the correct stall implementation
According to spec, there are functional and protocol stalls.
For functional stall, it is for bulk and interrupt endpoints,
below are cases for it:
- Host sends SET_FEATURE request for Set-Halt, the udc driver
needs to set stall, and return true unconditionally.
- The gadget driver may call usb_ep_set_halt to stall certain
endpoints, if there is a transfer in pending, the udc driver
should not set stall, and return -EAGAIN accordingly.
These two kinds of stall need to be cleared by host using CLEAR_FEATURE
request (Clear-Halt).
For protocol stall, it is for control endpoint, this stall will
be set if the control request has failed. This stall will be
cleared by next setup request (hardware will do it).
It fixed usbtest (drivers/usb/misc/usbtest.c) Test 13 "set/clear halt"
test failure, meanwhile, this change has been verified by
USB2 CV Compliance Test and MSC Tests.
Cc: <stable@vger.kernel.org> #3.10+
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Felipe Balbi <balbi@ti.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
2015-08-24 14:10:07 +08:00
|
|
|
if (_ep_set_halt(&hwep->ep, 1, false))
|
|
|
|
dev_err(ci->dev, "error: _ep_set_halt\n");
|
2014-03-11 13:47:37 +08:00
|
|
|
spin_lock(&ci->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/**
|
|
|
|
* isr_tr_complete_handler: transaction complete interrupt handler
|
2012-07-07 22:56:40 +08:00
|
|
|
* @ci: UDC descriptor
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function handles traffic events
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static void isr_tr_complete_handler(struct ci_hdrc *ci)
|
2012-07-07 22:56:40 +08:00
|
|
|
__releases(ci->lock)
|
|
|
|
__acquires(ci->lock)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
2014-03-11 13:47:37 +08:00
|
|
|
int err;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
for (i = 0; i < ci->hw_ep_max; i++) {
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->ep.desc == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
continue; /* not configured */
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
if (hw_test_and_clear_complete(ci, i)) {
|
2013-06-13 23:00:03 +08:00
|
|
|
err = isr_tr_complete_low(hwep);
|
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
|
2008-11-18 06:14:51 +08:00
|
|
|
if (err > 0) /* needs status phase */
|
2012-07-07 22:56:40 +08:00
|
|
|
err = isr_setup_status_phase(ci);
|
2008-11-18 06:14:51 +08:00
|
|
|
if (err < 0) {
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_unlock(&ci->lock);
|
usb: chipidea: udc: using the correct stall implementation
According to spec, there are functional and protocol stalls.
For functional stall, it is for bulk and interrupt endpoints,
below are cases for it:
- Host sends SET_FEATURE request for Set-Halt, the udc driver
needs to set stall, and return true unconditionally.
- The gadget driver may call usb_ep_set_halt to stall certain
endpoints, if there is a transfer in pending, the udc driver
should not set stall, and return -EAGAIN accordingly.
These two kinds of stall need to be cleared by host using CLEAR_FEATURE
request (Clear-Halt).
For protocol stall, it is for control endpoint, this stall will
be set if the control request has failed. This stall will be
cleared by next setup request (hardware will do it).
It fixed usbtest (drivers/usb/misc/usbtest.c) Test 13 "set/clear halt"
test failure, meanwhile, this change has been verified by
USB2 CV Compliance Test and MSC Tests.
Cc: <stable@vger.kernel.org> #3.10+
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Felipe Balbi <balbi@ti.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
2015-08-24 14:10:07 +08:00
|
|
|
if (_ep_set_halt(&hwep->ep, 1, false))
|
2012-07-07 22:56:40 +08:00
|
|
|
dev_err(ci->dev,
|
usb: chipidea: udc: using the correct stall implementation
According to spec, there are functional and protocol stalls.
For functional stall, it is for bulk and interrupt endpoints,
below are cases for it:
- Host sends SET_FEATURE request for Set-Halt, the udc driver
needs to set stall, and return true unconditionally.
- The gadget driver may call usb_ep_set_halt to stall certain
endpoints, if there is a transfer in pending, the udc driver
should not set stall, and return -EAGAIN accordingly.
These two kinds of stall need to be cleared by host using CLEAR_FEATURE
request (Clear-Halt).
For protocol stall, it is for control endpoint, this stall will
be set if the control request has failed. This stall will be
cleared by next setup request (hardware will do it).
It fixed usbtest (drivers/usb/misc/usbtest.c) Test 13 "set/clear halt"
test failure, meanwhile, this change has been verified by
USB2 CV Compliance Test and MSC Tests.
Cc: <stable@vger.kernel.org> #3.10+
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Felipe Balbi <balbi@ti.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
2015-08-24 14:10:07 +08:00
|
|
|
"error: _ep_set_halt\n");
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock(&ci->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-19 13:41:41 +08:00
|
|
|
/* Only handle setup packet below */
|
2014-03-11 13:47:37 +08:00
|
|
|
if (i == 0 &&
|
|
|
|
hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
|
|
|
|
isr_setup_packet_handler(ci);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* ENDPT block
|
|
|
|
*****************************************************************************/
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_enable: configure endpoint, making it usable
|
|
|
|
*
|
|
|
|
* Check usb_ep_enable() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static int ep_enable(struct usb_ep *ep,
|
|
|
|
const struct usb_endpoint_descriptor *desc)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
2011-01-11 11:49:22 +08:00
|
|
|
int retval = 0;
|
2008-11-18 06:14:51 +08:00
|
|
|
unsigned long flags;
|
2013-03-30 18:54:05 +08:00
|
|
|
u32 cap = 0;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
if (ep == NULL || desc == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* only internal SW should enable ctrl endpts */
|
|
|
|
|
2015-02-11 12:44:41 +08:00
|
|
|
if (!list_empty(&hwep->qh.queue)) {
|
2013-06-13 23:00:03 +08:00
|
|
|
dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
|
2015-02-11 12:44:41 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
hwep->ep.desc = desc;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
|
|
|
|
hwep->num = usb_endpoint_num(desc);
|
|
|
|
hwep->type = usb_endpoint_type(desc);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2016-09-28 19:17:38 +08:00
|
|
|
hwep->ep.maxpacket = usb_endpoint_maxp(desc);
|
2016-09-28 18:26:18 +08:00
|
|
|
hwep->ep.mult = usb_endpoint_maxp_mult(desc);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
|
2013-03-30 18:54:05 +08:00
|
|
|
cap |= QH_IOS;
|
usb: chipidea: udc: Disable auto ZLP generation on ep0
There are 2 methods for ZLP (zero-length packet) generation:
1) In software
2) Automatic generation by device controller
1) is implemented in UDC driver and it attaches ZLP to IN packet if
descriptor->size < wLength
2) can be enabled/disabled by setting ZLT bit in the QH
When gadget ffs is connected to ubuntu host, the host sends
get descriptor request and wLength in setup packet is 255 while the
size of descriptor which will be sent by gadget in IN packet is
64 byte. So the composite driver sets req->zero = 1.
In UDC driver following code will be executed then
if (hwreq->req.zero && hwreq->req.length
&& (hwreq->req.length % hwep->ep.maxpacket == 0))
add_td_to_list(hwep, hwreq, 0);
Case-A:
So in case of ubuntu host, UDC driver will attach a ZLP to the IN packet.
ubuntu host will request 255 byte in IN request, gadget will send 64 byte
with ZLP and host will come to know that there is no more data.
But hold on, by default ZLT=0 for endpoint 0 so hardware also tries to
automatically generate the ZLP which blocks enumeration for ~6 seconds due
to endpoint 0 STALL, NAKs are sent to host for any requests (OUT/PING)
Case-B:
In case when gadget ffs is connected to Apple device, Apple device sends
setup packet with wLength=64. So descriptor->size = 64 and wLength=64
therefore req->zero = 0 and UDC driver will not attach any ZLP to the
IN packet. Apple device requests 64 bytes, gets 64 bytes and doesn't
further request for IN data. But ZLT=0 by default for endpoint 0 so
hardware tries to automatically generate the ZLP which blocks enumeration
for ~6 seconds due to endpoint 0 STALL, NAKs are sent to host for any
requests (OUT/PING)
According to USB2.0 specs:
8.5.3.2 Variable-length Data Stage
A control pipe may have a variable-length data phase in which the
host requests more data than is contained in the specified data
structure. When all of the data structure is returned to the host,
the function should indicate that the Data stage is ended by
returning a packet that is shorter than the MaxPacketSize for the
pipe. If the data structure is an exact multiple of wMaxPacketSize
for the pipe, the function will return a zero-length packet to indicate
the end of the Data stage.
In Case-A mentioned above:
If we disable software ZLP generation & ZLT=0 for endpoint 0 OR if software
ZLP generation is not disabled but we set ZLT=1 for endpoint 0 then
enumeration doesn't block for 6 seconds.
In Case-B mentioned above:
If we disable software ZLP generation & ZLT=0 for endpoint then enumeration
still blocks due to ZLP automatically generated by hardware and host not needing
it. But if we keep software ZLP generation enabled but we set ZLT=1 for
endpoint 0 then enumeration doesn't block for 6 seconds.
So the proper solution for this issue seems to disable automatic ZLP generation
by hardware (i.e by setting ZLT=1 for endpoint 0) and let software (UDC driver)
handle the ZLP generation based on req->zero field.
Cc: stable@vger.kernel.org
Signed-off-by: Abbas Raza <Abbas_Raza@mentor.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-07-17 19:34:31 +08:00
|
|
|
|
|
|
|
cap |= QH_ZLT;
|
2013-06-13 23:00:03 +08:00
|
|
|
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
|
usb: chipidea: udc: using MultO at TD as real mult value for ISO-TX
We have met a bug that the high bandwidth ISO-TX transfer has failed
at the last packet if it is less than 1024, the TD status shows it
is "Transaction Error".
The root cause of this problem is: the mult value at qh is not correct
for current TD's transfer length. We use TD list to queue un-transfer
TDs, and change mult for new adding TDs. If new adding TDs transfer length
less than 1024, but the queued un-transfer TDs transfer length is larger
than 1024, the transfer error will occur, and vice versa.
Usually, this problem occurs at the last packet, and the first packet for
new frame.
We fixed this problem by setting Mult at QH as the largest value (3), and
set MultO (Multiplier Override) at TD according to every transfer length.
It can cover both hardware version less than 2.3 (the real mult is MultO
if it is not 0) and 2.3+ (the real mult is min(qh.mult, td.multo)).
Since the MultO bits are only existed at TX TD, we keep the ISO-RX behavior
unchanged.
For stable tree: 3.11+.
Cc: stable <stable@vger.kernel.org>
Cc: Michael Grzeschik <m.grzeschik@pengutronix.de>
Reported-by: Matthieu Vanin <b47495@freescale.com>
Tested-by: Matthieu Vanin <b47495@freescale.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-01-10 13:51:32 +08:00
|
|
|
/*
|
|
|
|
* For ISO-TX, we set mult at QH as the largest value, and use
|
|
|
|
* MultO at TD as real mult value.
|
|
|
|
*/
|
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
|
|
|
|
cap |= 3 << __ffs(QH_MULT);
|
2013-03-30 18:54:05 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->qh.ptr->cap = cpu_to_le32(cap);
|
2013-03-30 18:54:05 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2014-02-19 13:41:41 +08:00
|
|
|
if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
|
|
|
|
dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
|
|
|
|
retval = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-05-02 14:26:32 +08:00
|
|
|
/*
|
|
|
|
* Enable endpoints in the HW other than ep0 as ep0
|
|
|
|
* is always enabled
|
|
|
|
*/
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->num)
|
|
|
|
retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
|
|
|
|
hwep->type);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_disable: endpoint is no longer usable
|
|
|
|
*
|
|
|
|
* Check usb_ep_disable() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static int ep_disable(struct usb_ep *ep)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
2008-11-18 06:14:51 +08:00
|
|
|
int direction, retval = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (ep == NULL)
|
|
|
|
return -EINVAL;
|
2013-06-13 23:00:03 +08:00
|
|
|
else if (hwep->ep.desc == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
usb: chipidea: udc: don't do hardware access if gadget has stopped
After _gadget_stop_activity is executed, we can consider the hardware
operation for gadget has finished, and the udc can be stopped and enter
low power mode. So, any later hardware operations (from usb_ep_ops APIs
or usb_gadget_ops APIs) should be considered invalid, any deinitializatons
has been covered at _gadget_stop_activity.
I meet this problem when I plug out usb cable from PC using mass_storage
gadget, my callstack like: vbus interrupt->.vbus_session->
composite_disconnect ->pm_runtime_put_sync(&_gadget->dev),
the composite_disconnect will call fsg_disable, but fsg_disable calls
usb_ep_disable using async way, there are register accesses for
usb_ep_disable. So sometimes, I get system hang due to visit register
without clock, sometimes not.
The Linux Kernel USB maintainer Alan Stern suggests this kinds of solution.
See: http://marc.info/?l=linux-usb&m=138541769810983&w=2.
Cc: <stable@vger.kernel.org> #v4.9+
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Link: https://lore.kernel.org/r/20190820020503.27080-2-peter.chen@nxp.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-08-20 10:07:58 +08:00
|
|
|
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
/* only internal SW should disable ctrl endpts */
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
direction = hwep->dir;
|
2008-11-18 06:14:51 +08:00
|
|
|
do {
|
2013-06-13 23:00:03 +08:00
|
|
|
retval |= _ep_nuke(hwep);
|
|
|
|
retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
|
|
|
|
hwep->dir = (hwep->dir == TX) ? RX : TX;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
} while (hwep->dir != direction);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->ep.desc = NULL;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_alloc_request: allocate a request object to use with this endpoint
|
|
|
|
*
|
|
|
|
* Check usb_ep_alloc_request() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_req *hwreq = NULL;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-05-09 04:29:02 +08:00
|
|
|
if (ep == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return NULL;
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwreq != NULL) {
|
|
|
|
INIT_LIST_HEAD(&hwreq->queue);
|
|
|
|
INIT_LIST_HEAD(&hwreq->tds);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
return (hwreq == NULL) ? NULL : &hwreq->req;
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_free_request: frees a request object
|
|
|
|
*
|
|
|
|
* Check usb_ep_free_request() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
|
|
|
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
|
2013-06-13 22:59:54 +08:00
|
|
|
struct td_node *node, *tmpnode;
|
2008-11-18 06:14:51 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (ep == NULL || req == NULL) {
|
|
|
|
return;
|
2013-06-13 23:00:03 +08:00
|
|
|
} else if (!list_empty(&hwreq->queue)) {
|
|
|
|
dev_err(hwep->ci->dev, "freeing queued request\n");
|
2008-11-18 06:14:51 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
|
|
|
|
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
|
2013-06-13 22:59:54 +08:00
|
|
|
list_del_init(&node->td);
|
|
|
|
node->ptr = NULL;
|
|
|
|
kfree(node);
|
|
|
|
}
|
2013-06-13 22:59:53 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
kfree(hwreq);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_queue: queues (submits) an I/O request to an endpoint
|
|
|
|
*
|
|
|
|
* Check usb_ep_queue()* at usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static int ep_queue(struct usb_ep *ep, struct usb_request *req,
|
|
|
|
gfp_t __maybe_unused gfp_flags)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
2008-11-18 06:14:51 +08:00
|
|
|
int retval = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
usb: chipidea: udc: don't do hardware access if gadget has stopped
After _gadget_stop_activity is executed, we can consider the hardware
operation for gadget has finished, and the udc can be stopped and enter
low power mode. So, any later hardware operations (from usb_ep_ops APIs
or usb_gadget_ops APIs) should be considered invalid, any deinitializatons
has been covered at _gadget_stop_activity.
I meet this problem when I plug out usb cable from PC using mass_storage
gadget, my callstack like: vbus interrupt->.vbus_session->
composite_disconnect ->pm_runtime_put_sync(&_gadget->dev),
the composite_disconnect will call fsg_disable, but fsg_disable calls
usb_ep_disable using async way, there are register accesses for
usb_ep_disable. So sometimes, I get system hang due to visit register
without clock, sometimes not.
The Linux Kernel USB maintainer Alan Stern suggests this kinds of solution.
See: http://marc.info/?l=linux-usb&m=138541769810983&w=2.
Cc: <stable@vger.kernel.org> #v4.9+
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Link: https://lore.kernel.org/r/20190820020503.27080-2-peter.chen@nxp.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-08-20 10:07:58 +08:00
|
|
|
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
2013-03-30 18:54:09 +08:00
|
|
|
retval = _ep_queue(ep, req, gfp_flags);
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
|
|
|
|
*
|
|
|
|
* Check usb_ep_dequeue() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
|
|
|
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
|
2008-11-18 06:14:51 +08:00
|
|
|
unsigned long flags;
|
usb: chipidea: udc: delete td from req's td list at ep_dequeue
We need to delete un-finished td from current request's td list
at ep_dequeue API, otherwise, this non-user td will be remained
at td list before this request is freed. So if we do ep_queue->
ep_dequeue->ep_queue sequence, when the complete interrupt for
the second ep_queue comes, we search td list for this request,
the first td (added by the first ep_queue) will be handled, and
its status is still active, so we will consider the this transfer
still not be completed, but in fact, it has completed. It causes
the peripheral side considers it never receives current data for
this transfer.
We met this problem when do "Error Recovery Test - Device Configured"
test item for USBCV2 MSC test, the host has never received ACK for
the IN token for CSW due to peripheral considers it does not get this
CBW, the USBCV test log like belows:
--------------------------------------------------------------------------
INFO
Issuing BOT MSC Reset, reset should always succeed
INFO
Retrieving status on CBW endpoint
INFO
CBW endpoint status = 0x0
INFO
Retrieving status on CSW endpoint
INFO
CSW endpoint status = 0x0
INFO
Issuing required command (Test Unit Ready) to verify device has recovered
INFO
Issuing CBW (attempt #1):
INFO
|----- CBW LUN = 0x0
INFO
|----- CBW Flags = 0x0
INFO
|----- CBW Data Transfer Length = 0x0
INFO
|----- CBW CDB Length = 0x6
INFO
|----- CBW CDB-00 = 0x0
INFO
|----- CBW CDB-01 = 0x0
INFO
|----- CBW CDB-02 = 0x0
INFO
|----- CBW CDB-03 = 0x0
INFO
|----- CBW CDB-04 = 0x0
INFO
|----- CBW CDB-05 = 0x0
INFO
Issuing CSW : try 1
INFO
CSW Bulk Request timed out!
ERROR
Failed CSW phase : should have been success or stall
FAIL
(5.3.4) The CSW status value must be 0x00, 0x01, or 0x02.
ERROR
BOTCommonMSCRequest failed: error=80004000
Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-07-02 12:16:31 +08:00
|
|
|
struct td_node *node, *tmpnode;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
|
|
|
|
hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
|
|
|
|
list_empty(&hwep->qh.queue))
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
usb: chipidea: udc: don't do hardware access if gadget has stopped
After _gadget_stop_activity is executed, we can consider the hardware
operation for gadget has finished, and the udc can be stopped and enter
low power mode. So, any later hardware operations (from usb_ep_ops APIs
or usb_gadget_ops APIs) should be considered invalid, any deinitializatons
has been covered at _gadget_stop_activity.
I meet this problem when I plug out usb cable from PC using mass_storage
gadget, my callstack like: vbus interrupt->.vbus_session->
composite_disconnect ->pm_runtime_put_sync(&_gadget->dev),
the composite_disconnect will call fsg_disable, but fsg_disable calls
usb_ep_disable using async way, there are register accesses for
usb_ep_disable. So sometimes, I get system hang due to visit register
without clock, sometimes not.
The Linux Kernel USB maintainer Alan Stern suggests this kinds of solution.
See: http://marc.info/?l=linux-usb&m=138541769810983&w=2.
Cc: <stable@vger.kernel.org> #v4.9+
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Link: https://lore.kernel.org/r/20190820020503.27080-2-peter.chen@nxp.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-08-20 10:07:58 +08:00
|
|
|
if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
|
|
|
|
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
usb: chipidea: udc: delete td from req's td list at ep_dequeue
We need to delete un-finished td from current request's td list
at ep_dequeue API, otherwise, this non-user td will be remained
at td list before this request is freed. So if we do ep_queue->
ep_dequeue->ep_queue sequence, when the complete interrupt for
the second ep_queue comes, we search td list for this request,
the first td (added by the first ep_queue) will be handled, and
its status is still active, so we will consider the this transfer
still not be completed, but in fact, it has completed. It causes
the peripheral side considers it never receives current data for
this transfer.
We met this problem when do "Error Recovery Test - Device Configured"
test item for USBCV2 MSC test, the host has never received ACK for
the IN token for CSW due to peripheral considers it does not get this
CBW, the USBCV test log like belows:
--------------------------------------------------------------------------
INFO
Issuing BOT MSC Reset, reset should always succeed
INFO
Retrieving status on CBW endpoint
INFO
CBW endpoint status = 0x0
INFO
Retrieving status on CSW endpoint
INFO
CSW endpoint status = 0x0
INFO
Issuing required command (Test Unit Ready) to verify device has recovered
INFO
Issuing CBW (attempt #1):
INFO
|----- CBW LUN = 0x0
INFO
|----- CBW Flags = 0x0
INFO
|----- CBW Data Transfer Length = 0x0
INFO
|----- CBW CDB Length = 0x6
INFO
|----- CBW CDB-00 = 0x0
INFO
|----- CBW CDB-01 = 0x0
INFO
|----- CBW CDB-02 = 0x0
INFO
|----- CBW CDB-03 = 0x0
INFO
|----- CBW CDB-04 = 0x0
INFO
|----- CBW CDB-05 = 0x0
INFO
Issuing CSW : try 1
INFO
CSW Bulk Request timed out!
ERROR
Failed CSW phase : should have been success or stall
FAIL
(5.3.4) The CSW status value must be 0x00, 0x01, or 0x02.
ERROR
BOTCommonMSCRequest failed: error=80004000
Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Chen <peter.chen@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-07-02 12:16:31 +08:00
|
|
|
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
|
|
|
|
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
|
|
|
|
list_del(&node->td);
|
|
|
|
kfree(node);
|
|
|
|
}
|
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
/* pop request */
|
2013-06-13 23:00:03 +08:00
|
|
|
list_del_init(&hwreq->queue);
|
2012-05-11 22:25:56 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
|
2012-05-11 22:25:56 +08:00
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
req->status = -ECONNRESET;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwreq->req.complete != NULL) {
|
|
|
|
spin_unlock(hwep->lock);
|
2014-09-25 04:43:19 +08:00
|
|
|
usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock(hwep->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_set_halt: sets the endpoint halt feature
|
|
|
|
*
|
|
|
|
* Check usb_ep_set_halt() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static int ep_set_halt(struct usb_ep *ep, int value)
|
|
|
|
{
|
usb: chipidea: udc: using the correct stall implementation
According to spec, there are functional and protocol stalls.
For functional stall, it is for bulk and interrupt endpoints,
below are cases for it:
- Host sends SET_FEATURE request for Set-Halt, the udc driver
needs to set stall, and return true unconditionally.
- The gadget driver may call usb_ep_set_halt to stall certain
endpoints, if there is a transfer in pending, the udc driver
should not set stall, and return -EAGAIN accordingly.
These two kinds of stall need to be cleared by host using CLEAR_FEATURE
request (Clear-Halt).
For protocol stall, it is for control endpoint, this stall will
be set if the control request has failed. This stall will be
cleared by next setup request (hardware will do it).
It fixed usbtest (drivers/usb/misc/usbtest.c) Test 13 "set/clear halt"
test failure, meanwhile, this change has been verified by
USB2 CV Compliance Test and MSC Tests.
Cc: <stable@vger.kernel.org> #3.10+
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Felipe Balbi <balbi@ti.com>
Signed-off-by: Peter Chen <peter.chen@freescale.com>
2015-08-24 14:10:07 +08:00
|
|
|
return _ep_set_halt(ep, value, true);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_set_wedge: sets the halt feature and ignores clear requests
|
|
|
|
*
|
|
|
|
* Check usb_ep_set_wedge() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static int ep_set_wedge(struct usb_ep *ep)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
2008-11-18 06:14:51 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
if (ep == NULL || hwep->ep.desc == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
|
|
|
hwep->wedge = 1;
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
return usb_ep_set_halt(ep);
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* ep_fifo_flush: flushes contents of a fifo
|
|
|
|
*
|
|
|
|
* Check usb_ep_fifo_flush() at "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static void ep_fifo_flush(struct usb_ep *ep)
|
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
|
2008-11-18 06:14:51 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (ep == NULL) {
|
2013-06-13 23:00:03 +08:00
|
|
|
dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
|
2008-11-18 06:14:51 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
usb: chipidea: udc: don't do hardware access if gadget has stopped
After _gadget_stop_activity is executed, we can consider the hardware
operation for gadget has finished, and the udc can be stopped and enter
low power mode. So, any later hardware operations (from usb_ep_ops APIs
or usb_gadget_ops APIs) should be considered invalid, any deinitializatons
has been covered at _gadget_stop_activity.
I meet this problem when I plug out usb cable from PC using mass_storage
gadget, my callstack like: vbus interrupt->.vbus_session->
composite_disconnect ->pm_runtime_put_sync(&_gadget->dev),
the composite_disconnect will call fsg_disable, but fsg_disable calls
usb_ep_disable using async way, there are register accesses for
usb_ep_disable. So sometimes, I get system hang due to visit register
without clock, sometimes not.
The Linux Kernel USB maintainer Alan Stern suggests this kinds of solution.
See: http://marc.info/?l=linux-usb&m=138541769810983&w=2.
Cc: <stable@vger.kernel.org> #v4.9+
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Link: https://lore.kernel.org/r/20190820020503.27080-2-peter.chen@nxp.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-08-20 10:07:58 +08:00
|
|
|
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* Endpoint-specific part of the API to the USB controller hardware
|
|
|
|
* Check "usb_gadget.h" for details
|
|
|
|
*/
|
|
|
|
static const struct usb_ep_ops usb_ep_ops = {
|
|
|
|
.enable = ep_enable,
|
|
|
|
.disable = ep_disable,
|
|
|
|
.alloc_request = ep_alloc_request,
|
|
|
|
.free_request = ep_free_request,
|
|
|
|
.queue = ep_queue,
|
|
|
|
.dequeue = ep_dequeue,
|
|
|
|
.set_halt = ep_set_halt,
|
|
|
|
.set_wedge = ep_set_wedge,
|
|
|
|
.fifo_flush = ep_fifo_flush,
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* GADGET block
|
|
|
|
*****************************************************************************/
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2019-09-10 10:54:52 +08:00
|
|
|
* ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
|
|
|
|
*/
|
|
|
|
static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
|
|
|
|
{
|
|
|
|
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
|
|
|
|
|
|
|
|
if (is_active) {
|
2020-01-23 10:49:19 +08:00
|
|
|
pm_runtime_get_sync(ci->dev);
|
2019-09-10 10:54:52 +08:00
|
|
|
hw_device_reset(ci);
|
2020-03-16 11:10:34 +08:00
|
|
|
spin_lock_irq(&ci->lock);
|
2019-09-10 14:54:57 +08:00
|
|
|
if (ci->driver) {
|
|
|
|
hw_device_state(ci, ci->ep0out->qh.dma);
|
|
|
|
usb_gadget_set_state(_gadget, USB_STATE_POWERED);
|
2020-03-16 11:10:34 +08:00
|
|
|
spin_unlock_irq(&ci->lock);
|
2019-09-10 14:54:57 +08:00
|
|
|
usb_udc_vbus_handler(_gadget, true);
|
2020-03-16 11:10:34 +08:00
|
|
|
} else {
|
|
|
|
spin_unlock_irq(&ci->lock);
|
2019-09-10 14:54:57 +08:00
|
|
|
}
|
2019-09-10 10:54:52 +08:00
|
|
|
} else {
|
|
|
|
usb_udc_vbus_handler(_gadget, false);
|
|
|
|
if (ci->driver)
|
|
|
|
ci->driver->disconnect(&ci->gadget);
|
|
|
|
hw_device_state(ci, 0);
|
|
|
|
if (ci->platdata->notify_event)
|
|
|
|
ci->platdata->notify_event(ci,
|
|
|
|
CI_HDRC_CONTROLLER_STOPPED_EVENT);
|
|
|
|
_gadget_stop_activity(&ci->gadget);
|
2020-01-23 10:49:19 +08:00
|
|
|
pm_runtime_put_sync(ci->dev);
|
2019-09-10 10:54:52 +08:00
|
|
|
usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
|
2010-12-07 20:24:02 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
|
2010-12-07 20:24:02 +08:00
|
|
|
unsigned long flags;
|
2020-01-23 14:00:26 +08:00
|
|
|
int ret = 0;
|
2010-12-07 20:24:02 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock_irqsave(&ci->lock, flags);
|
|
|
|
ci->vbus_active = is_active;
|
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
2010-12-07 20:24:02 +08:00
|
|
|
|
2017-09-04 23:14:01 +08:00
|
|
|
if (ci->usb_phy)
|
|
|
|
usb_phy_set_charger_state(ci->usb_phy, is_active ?
|
|
|
|
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
|
|
|
|
|
2020-01-23 14:00:26 +08:00
|
|
|
if (ci->platdata->notify_event)
|
|
|
|
ret = ci->platdata->notify_event(ci,
|
|
|
|
CI_HDRC_CONTROLLER_VBUS_EVENT);
|
|
|
|
|
2019-09-10 14:54:57 +08:00
|
|
|
if (ci->driver)
|
2019-09-10 10:54:52 +08:00
|
|
|
ci_hdrc_gadget_connect(_gadget, is_active);
|
2010-12-07 20:24:02 +08:00
|
|
|
|
2020-01-23 14:00:26 +08:00
|
|
|
return ret;
|
2010-12-07 20:24:02 +08:00
|
|
|
}
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static int ci_udc_wakeup(struct usb_gadget *_gadget)
|
2011-02-18 20:13:17 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
|
2011-02-18 20:13:17 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock_irqsave(&ci->lock, flags);
|
usb: chipidea: udc: don't do hardware access if gadget has stopped
After _gadget_stop_activity is executed, we can consider the hardware
operation for gadget has finished, and the udc can be stopped and enter
low power mode. So, any later hardware operations (from usb_ep_ops APIs
or usb_gadget_ops APIs) should be considered invalid, any deinitializatons
has been covered at _gadget_stop_activity.
I meet this problem when I plug out usb cable from PC using mass_storage
gadget, my callstack like: vbus interrupt->.vbus_session->
composite_disconnect ->pm_runtime_put_sync(&_gadget->dev),
the composite_disconnect will call fsg_disable, but fsg_disable calls
usb_ep_disable using async way, there are register accesses for
usb_ep_disable. So sometimes, I get system hang due to visit register
without clock, sometimes not.
The Linux Kernel USB maintainer Alan Stern suggests this kinds of solution.
See: http://marc.info/?l=linux-usb&m=138541769810983&w=2.
Cc: <stable@vger.kernel.org> #v4.9+
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Link: https://lore.kernel.org/r/20190820020503.27080-2-peter.chen@nxp.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-08-20 10:07:58 +08:00
|
|
|
if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
if (!ci->remote_wakeup) {
|
2011-02-18 20:13:17 +08:00
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
|
2011-02-18 20:13:17 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
|
2011-02-18 20:13:17 +08:00
|
|
|
out:
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
2011-02-18 20:13:17 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
|
2011-05-04 12:49:47 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
|
2011-05-04 12:49:47 +08:00
|
|
|
|
2014-10-31 01:41:16 +08:00
|
|
|
if (ci->usb_phy)
|
|
|
|
return usb_phy_set_power(ci->usb_phy, ma);
|
2011-05-04 12:49:47 +08:00
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-01-28 16:32:25 +08:00
|
|
|
static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
|
|
|
|
{
|
|
|
|
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
|
|
|
|
struct ci_hw_ep *hwep = ci->ep0in;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(hwep->lock, flags);
|
|
|
|
_gadget->is_selfpowered = (is_on != 0);
|
|
|
|
spin_unlock_irqrestore(hwep->lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-12 19:58:01 +08:00
|
|
|
/* Change Data+ pullup status
|
2019-10-24 23:27:47 +08:00
|
|
|
* this func is used by usb_gadget_connect/disconnect
|
2012-09-12 19:58:01 +08:00
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
|
2012-09-12 19:58:01 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
|
2012-09-12 19:58:01 +08:00
|
|
|
|
2016-08-16 19:19:11 +08:00
|
|
|
/*
|
|
|
|
* Data+ pullup controlled by OTG state machine in OTG fsm mode;
|
|
|
|
* and don't touch Data+ in host mode for dual role config.
|
|
|
|
*/
|
|
|
|
if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
|
2015-03-23 16:03:35 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-01-23 10:49:19 +08:00
|
|
|
pm_runtime_get_sync(ci->dev);
|
2012-09-12 19:58:01 +08:00
|
|
|
if (is_on)
|
|
|
|
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
|
|
|
|
else
|
|
|
|
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
|
2020-01-23 10:49:19 +08:00
|
|
|
pm_runtime_put_sync(ci->dev);
|
2012-09-12 19:58:01 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static int ci_udc_start(struct usb_gadget *gadget,
|
2012-05-09 04:29:04 +08:00
|
|
|
struct usb_gadget_driver *driver);
|
2014-10-18 01:05:12 +08:00
|
|
|
static int ci_udc_stop(struct usb_gadget *gadget);
|
usb: chipidea: udc: workaround for endpoint conflict issue
An endpoint conflict occurs when the USB is working in device mode
during an isochronous communication. When the endpointA IN direction
is an isochronous IN endpoint, and the host sends an IN token to
endpointA on another device, then the OUT transaction may be missed
regardless the OUT endpoint number. Generally, this occurs when the
device is connected to the host through a hub and other devices are
connected to the same hub.
The affected OUT endpoint can be either control, bulk, isochronous, or
an interrupt endpoint. After the OUT endpoint is primed, if an IN token
to the same endpoint number on another device is received, then the OUT
endpoint may be unprimed (cannot be detected by software), which causes
this endpoint to no longer respond to the host OUT token, and thus, no
corresponding interrupt occurs.
There is no good workaround for this issue, the only thing the software
could do is numbering isochronous IN from the highest endpoint since we
have observed most of device number endpoint from the lowest.
Cc: <stable@vger.kernel.org> #v3.14+
Cc: Fabio Estevam <festevam@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Cc: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-06-17 09:49:07 +08:00
|
|
|
|
|
|
|
/* Match ISOC IN from the highest endpoint */
|
|
|
|
static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
|
|
|
|
struct usb_endpoint_descriptor *desc,
|
|
|
|
struct usb_ss_ep_comp_descriptor *comp_desc)
|
|
|
|
{
|
|
|
|
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
|
|
|
|
struct usb_ep *ep;
|
|
|
|
|
|
|
|
if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
|
|
|
|
list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
|
|
|
|
if (ep->caps.dir_in && !ep->claimed)
|
|
|
|
return ep;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2008-11-18 06:14:51 +08:00
|
|
|
* Device operations part of the API to the USB controller hardware,
|
|
|
|
* which don't involve endpoints (or i/o)
|
|
|
|
* Check "usb_gadget.h" for details
|
|
|
|
*/
|
2010-12-07 20:24:02 +08:00
|
|
|
static const struct usb_gadget_ops usb_gadget_ops = {
|
2013-06-24 19:46:36 +08:00
|
|
|
.vbus_session = ci_udc_vbus_session,
|
|
|
|
.wakeup = ci_udc_wakeup,
|
2015-01-28 16:32:25 +08:00
|
|
|
.set_selfpowered = ci_udc_selfpowered,
|
2013-06-24 19:46:36 +08:00
|
|
|
.pullup = ci_udc_pullup,
|
|
|
|
.vbus_draw = ci_udc_vbus_draw,
|
|
|
|
.udc_start = ci_udc_start,
|
|
|
|
.udc_stop = ci_udc_stop,
|
usb: chipidea: udc: workaround for endpoint conflict issue
An endpoint conflict occurs when the USB is working in device mode
during an isochronous communication. When the endpointA IN direction
is an isochronous IN endpoint, and the host sends an IN token to
endpointA on another device, then the OUT transaction may be missed
regardless the OUT endpoint number. Generally, this occurs when the
device is connected to the host through a hub and other devices are
connected to the same hub.
The affected OUT endpoint can be either control, bulk, isochronous, or
an interrupt endpoint. After the OUT endpoint is primed, if an IN token
to the same endpoint number on another device is received, then the OUT
endpoint may be unprimed (cannot be detected by software), which causes
this endpoint to no longer respond to the host OUT token, and thus, no
corresponding interrupt occurs.
There is no good workaround for this issue, the only thing the software
could do is numbering isochronous IN from the highest endpoint since we
have observed most of device number endpoint from the lowest.
Cc: <stable@vger.kernel.org> #v3.14+
Cc: Fabio Estevam <festevam@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Cc: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-06-17 09:49:07 +08:00
|
|
|
.match_ep = ci_udc_match_ep,
|
2010-12-07 20:24:02 +08:00
|
|
|
};
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static int init_eps(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-05-09 04:29:03 +08:00
|
|
|
int retval = 0, i, j;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
for (i = 0; i < ci->hw_ep_max/2; i++)
|
2011-01-11 11:49:22 +08:00
|
|
|
for (j = RX; j <= TX; j++) {
|
2012-07-07 22:56:40 +08:00
|
|
|
int k = i + j * ci->hw_ep_max/2;
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
|
2011-01-11 11:49:22 +08:00
|
|
|
(j == TX) ? "in" : "out");
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->ci = ci;
|
|
|
|
hwep->lock = &ci->lock;
|
|
|
|
hwep->td_pool = ci->td_pool;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
hwep->ep.name = hwep->name;
|
|
|
|
hwep->ep.ops = &usb_ep_ops;
|
2015-07-31 22:00:17 +08:00
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
hwep->ep.caps.type_control = true;
|
|
|
|
} else {
|
|
|
|
hwep->ep.caps.type_iso = true;
|
|
|
|
hwep->ep.caps.type_bulk = true;
|
|
|
|
hwep->ep.caps.type_int = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (j == TX)
|
|
|
|
hwep->ep.caps.dir_in = true;
|
|
|
|
else
|
|
|
|
hwep->ep.caps.dir_out = true;
|
|
|
|
|
2012-09-12 19:58:00 +08:00
|
|
|
/*
|
|
|
|
* for ep0: maxP defined in desc, for other
|
|
|
|
* eps, maxP is set by epautoconfig() called
|
|
|
|
* by gadget layer
|
|
|
|
*/
|
2013-12-13 19:23:38 +08:00
|
|
|
usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
INIT_LIST_HEAD(&hwep->qh.queue);
|
2016-09-08 20:34:30 +08:00
|
|
|
hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
|
|
|
|
&hwep->qh.dma);
|
2013-06-13 23:00:03 +08:00
|
|
|
if (hwep->qh.ptr == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
retval = -ENOMEM;
|
2011-01-11 11:49:22 +08:00
|
|
|
|
2012-05-04 21:47:15 +08:00
|
|
|
/*
|
|
|
|
* set up shorthands for ep0 out and in endpoints,
|
|
|
|
* don't add to gadget's ep_list
|
|
|
|
*/
|
|
|
|
if (i == 0) {
|
|
|
|
if (j == RX)
|
2013-06-13 23:00:03 +08:00
|
|
|
ci->ep0out = hwep;
|
2012-05-04 21:47:15 +08:00
|
|
|
else
|
2013-06-13 23:00:03 +08:00
|
|
|
ci->ep0in = hwep;
|
2012-05-04 21:47:15 +08:00
|
|
|
|
2013-12-13 19:23:38 +08:00
|
|
|
usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
|
2011-01-11 11:49:22 +08:00
|
|
|
continue;
|
2012-05-04 21:47:15 +08:00
|
|
|
}
|
2011-01-11 11:49:22 +08:00
|
|
|
|
2013-06-13 23:00:03 +08:00
|
|
|
list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
|
2011-01-11 11:49:22 +08:00
|
|
|
}
|
2012-05-09 04:29:03 +08:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
static void destroy_eps(struct ci_hdrc *ci)
|
2012-09-12 19:58:03 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ci->hw_ep_max; i++) {
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
|
2012-09-12 19:58:03 +08:00
|
|
|
|
2013-09-10 15:34:39 +08:00
|
|
|
if (hwep->pending_td)
|
|
|
|
free_pending_td(hwep);
|
2013-06-13 23:00:03 +08:00
|
|
|
dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
|
2012-09-12 19:58:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-09 04:29:03 +08:00
|
|
|
/**
|
2013-06-24 19:46:36 +08:00
|
|
|
* ci_udc_start: register a gadget driver
|
2012-05-09 04:29:04 +08:00
|
|
|
* @gadget: our gadget
|
2012-05-09 04:29:03 +08:00
|
|
|
* @driver: the driver being registered
|
|
|
|
*
|
|
|
|
* Interrupts are enabled here.
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int ci_udc_start(struct usb_gadget *gadget,
|
2012-05-09 04:29:04 +08:00
|
|
|
struct usb_gadget_driver *driver)
|
2012-05-09 04:29:03 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
|
2019-07-04 23:03:41 +08:00
|
|
|
int retval;
|
2012-05-09 04:29:03 +08:00
|
|
|
|
2012-05-09 04:29:04 +08:00
|
|
|
if (driver->disconnect == NULL)
|
2012-05-09 04:29:03 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
|
|
|
|
retval = usb_ep_enable(&ci->ep0out->ep);
|
2011-05-02 14:26:32 +08:00
|
|
|
if (retval)
|
|
|
|
return retval;
|
2011-06-29 21:41:57 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
|
|
|
|
retval = usb_ep_enable(&ci->ep0in->ep);
|
2011-05-02 14:26:32 +08:00
|
|
|
if (retval)
|
|
|
|
return retval;
|
2012-07-07 22:56:40 +08:00
|
|
|
|
|
|
|
ci->driver = driver;
|
2014-04-23 15:56:50 +08:00
|
|
|
|
|
|
|
/* Start otg fsm for B-device */
|
|
|
|
if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
|
|
|
|
ci_hdrc_otg_fsm_start(ci);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2019-09-10 10:54:52 +08:00
|
|
|
if (ci->vbus_active)
|
|
|
|
ci_hdrc_gadget_connect(gadget, 1);
|
|
|
|
else
|
2015-03-06 10:36:04 +08:00
|
|
|
usb_udc_vbus_handler(&ci->gadget, false);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-12-12 09:11:42 +08:00
|
|
|
static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
|
|
|
|
{
|
|
|
|
if (!ci_otg_is_fsm_mode(ci))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&ci->fsm.lock);
|
|
|
|
if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
|
|
|
|
ci->fsm.a_bidl_adis_tmout = 1;
|
|
|
|
ci_hdrc_otg_fsm_start(ci);
|
|
|
|
} else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
|
|
|
|
ci->fsm.protocol = PROTO_UNDEF;
|
|
|
|
ci->fsm.otg->state = OTG_STATE_UNDEFINED;
|
|
|
|
}
|
|
|
|
mutex_unlock(&ci->fsm.lock);
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2013-06-24 19:46:36 +08:00
|
|
|
* ci_udc_stop: unregister a gadget driver
|
2008-11-18 06:14:51 +08:00
|
|
|
*/
|
2014-10-18 01:05:12 +08:00
|
|
|
static int ci_udc_stop(struct usb_gadget *gadget)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-06-24 19:46:36 +08:00
|
|
|
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
|
2012-05-09 04:29:04 +08:00
|
|
|
unsigned long flags;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock_irqsave(&ci->lock, flags);
|
2019-09-10 14:54:57 +08:00
|
|
|
ci->driver = NULL;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2013-08-14 17:44:14 +08:00
|
|
|
if (ci->vbus_active) {
|
2012-07-07 22:56:40 +08:00
|
|
|
hw_device_state(ci, 0);
|
2016-12-29 06:57:08 +08:00
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci->platdata->notify_event)
|
|
|
|
ci->platdata->notify_event(ci,
|
2013-06-24 19:46:36 +08:00
|
|
|
CI_HDRC_CONTROLLER_STOPPED_EVENT);
|
2012-07-07 22:56:40 +08:00
|
|
|
_gadget_stop_activity(&ci->gadget);
|
|
|
|
spin_lock_irqsave(&ci->lock, flags);
|
2020-01-23 10:49:19 +08:00
|
|
|
pm_runtime_put(ci->dev);
|
2010-12-07 20:24:02 +08:00
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_unlock_irqrestore(&ci->lock, flags);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2014-12-12 09:11:42 +08:00
|
|
|
ci_udc_stop_for_otg_fsm(ci);
|
2008-11-18 06:14:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* BUS block
|
|
|
|
*****************************************************************************/
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2012-07-07 22:56:40 +08:00
|
|
|
* udc_irq: ci interrupt handler
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* This function returns IRQ_HANDLED if the IRQ has been handled
|
|
|
|
* It locks access to registers
|
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static irqreturn_t udc_irq(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
|
|
|
irqreturn_t retval;
|
|
|
|
u32 intr;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci == NULL)
|
2008-11-18 06:14:51 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_lock(&ci->lock);
|
2010-12-07 20:24:02 +08:00
|
|
|
|
2013-06-24 19:46:36 +08:00
|
|
|
if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
|
2012-07-07 22:56:40 +08:00
|
|
|
if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
|
2012-05-11 22:25:53 +08:00
|
|
|
USBMODE_CM_DC) {
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_unlock(&ci->lock);
|
2010-12-07 20:24:02 +08:00
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
intr = hw_test_and_clear_intr_active(ci);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-05-11 22:25:46 +08:00
|
|
|
if (intr) {
|
2008-11-18 06:14:51 +08:00
|
|
|
/* order defines priority - do NOT change it */
|
2012-05-11 22:25:46 +08:00
|
|
|
if (USBi_URI & intr)
|
2012-07-07 22:56:40 +08:00
|
|
|
isr_reset_handler(ci);
|
2012-05-11 22:25:46 +08:00
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
if (USBi_PCI & intr) {
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->gadget.speed = hw_port_is_high_speed(ci) ?
|
2008-11-18 06:14:51 +08:00
|
|
|
USB_SPEED_HIGH : USB_SPEED_FULL;
|
2017-03-07 10:35:01 +08:00
|
|
|
if (ci->suspended) {
|
|
|
|
if (ci->driver->resume) {
|
|
|
|
spin_unlock(&ci->lock);
|
|
|
|
ci->driver->resume(&ci->gadget);
|
|
|
|
spin_lock(&ci->lock);
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->suspended = 0;
|
2017-03-07 10:35:01 +08:00
|
|
|
usb_gadget_set_state(&ci->gadget,
|
|
|
|
ci->resume_state);
|
2011-02-18 20:13:17 +08:00
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|
2012-05-11 22:25:46 +08:00
|
|
|
|
|
|
|
if (USBi_UI & intr)
|
2012-07-07 22:56:40 +08:00
|
|
|
isr_tr_complete_handler(ci);
|
2012-05-11 22:25:46 +08:00
|
|
|
|
2017-03-07 10:35:01 +08:00
|
|
|
if ((USBi_SLI & intr) && !(ci->suspended)) {
|
|
|
|
ci->suspended = 1;
|
|
|
|
ci->resume_state = ci->gadget.state;
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
|
|
|
|
ci->driver->suspend) {
|
|
|
|
spin_unlock(&ci->lock);
|
|
|
|
ci->driver->suspend(&ci->gadget);
|
|
|
|
spin_lock(&ci->lock);
|
2011-02-18 20:13:17 +08:00
|
|
|
}
|
2017-03-07 10:35:01 +08:00
|
|
|
usb_gadget_set_state(&ci->gadget,
|
|
|
|
USB_STATE_SUSPENDED);
|
2011-02-18 20:13:17 +08:00
|
|
|
}
|
2008-11-18 06:14:51 +08:00
|
|
|
retval = IRQ_HANDLED;
|
|
|
|
} else {
|
|
|
|
retval = IRQ_NONE;
|
|
|
|
}
|
2012-07-07 22:56:40 +08:00
|
|
|
spin_unlock(&ci->lock);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-05-11 22:25:47 +08:00
|
|
|
* udc_start: initialize gadget role
|
2012-07-07 22:56:40 +08:00
|
|
|
* @ci: chipidea controller
|
2008-11-18 06:14:51 +08:00
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
static int udc_start(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2012-07-07 22:56:40 +08:00
|
|
|
struct device *dev = ci->dev;
|
2015-07-09 15:18:45 +08:00
|
|
|
struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
|
2008-11-18 06:14:51 +08:00
|
|
|
int retval = 0;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->gadget.ops = &usb_gadget_ops;
|
|
|
|
ci->gadget.speed = USB_SPEED_UNKNOWN;
|
|
|
|
ci->gadget.max_speed = USB_SPEED_HIGH;
|
|
|
|
ci->gadget.name = ci->platdata->name;
|
2015-07-09 15:18:45 +08:00
|
|
|
ci->gadget.otg_caps = otg_caps;
|
usb: chipidea: udc: add software sg list support
The chipidea controller doesn't support short transfer for sg list,
so we still keep setting IOC per TD, otherwise, there will be no interrupt
for short transfer. Each TD has five entries for data buffer, each data
buffer could be non-countinuous 4KB buffer, so it could handle
up to 5 sg buffers one time. The benefit of this patch is avoiding
OOM for low memory system(eg, 256MB) during large USB transfers, see
below for detail. The non-sg handling has not changed.
ufb: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 2 PID: 370 Comm: ufb Not tainted 5.4.3-1.1.0+g54b3750d61fd #1
Hardware name: NXP i.MX8MNano DDR4 EVK board (DT)
Call trace:
dump_backtrace+0x0/0x140
show_stack+0x14/0x20
dump_stack+0xb4/0xf8
warn_alloc+0xec/0x158
__alloc_pages_slowpath+0x9cc/0x9f8
__alloc_pages_nodemask+0x21c/0x280
alloc_pages_current+0x7c/0xe8
kmalloc_order+0x1c/0x88
__kmalloc+0x25c/0x298
ffs_epfile_io.isra.0+0x20c/0x7d0
ffs_epfile_read_iter+0xa8/0x188
new_sync_read+0xe4/0x170
__vfs_read+0x2c/0x40
vfs_read+0xc8/0x1a0
ksys_read+0x68/0xf0
__arm64_sys_read+0x18/0x20
el0_svc_common.constprop.0+0x68/0x160
el0_svc_handler+0x20/0x80
el0_svc+0x8/0xc
Mem-Info:
active_anon:2856 inactive_anon:5269 isolated_anon:12
active_file:5238 inactive_file:18803 isolated_file:0
unevictable:0 dirty:22 writeback:416 unstable:0
slab_reclaimable:4073 slab_unreclaimable:3408
mapped:727 shmem:7393 pagetables:37 bounce:0
free:4104 free_pcp:118 free_cma:0
Node 0 active_anon:11436kB inactive_anon:21076kB active_file:20988kB inactive_file:75216kB unevictable:0kB isolated(ano
Node 0 DMA32 free:16820kB min:1808kB low:2260kB high:2712kB active_anon:11436kB inactive_anon:21076kB active_file:2098B
lowmem_reserve[]: 0 0 0
Node 0 DMA32: 508*4kB (UME) 242*8kB (UME) 730*16kB (UM) 21*32kB (UME) 5*64kB (UME) 2*128kB (M) 0*256kB 0*512kB 0*1024kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=32768kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=64kB
31455 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
65536 pages RAM
0 pages HighMem/MovableOnly
10766 pages reserved
0 pages cma reserved
0 pages hwpoisoned
Reviewed-by: Jun Li <jun.li@nxp.com>
Signed-off-by: Peter Chen <peter.chen@nxp.com>
2020-02-21 21:40:57 +08:00
|
|
|
ci->gadget.sg_supported = 1;
|
2015-07-09 15:18:45 +08:00
|
|
|
|
2017-08-16 18:32:39 +08:00
|
|
|
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
|
|
|
|
ci->gadget.quirk_avoids_skb_reserve = 1;
|
|
|
|
|
2015-07-31 10:41:00 +08:00
|
|
|
if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
|
|
|
|
otg_caps->adp_support))
|
2015-07-09 15:18:45 +08:00
|
|
|
ci->gadget.is_otg = 1;
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
INIT_LIST_HEAD(&ci->gadget.ep_list);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-05-09 04:29:03 +08:00
|
|
|
/* alloc resources */
|
2017-03-13 10:18:42 +08:00
|
|
|
ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
|
2013-06-24 19:46:36 +08:00
|
|
|
sizeof(struct ci_hw_qh),
|
|
|
|
64, CI_HDRC_PAGE_SIZE);
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci->qh_pool == NULL)
|
2012-05-11 22:25:47 +08:00
|
|
|
return -ENOMEM;
|
2012-05-09 04:29:03 +08:00
|
|
|
|
2017-03-13 10:18:42 +08:00
|
|
|
ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
|
2013-06-24 19:46:36 +08:00
|
|
|
sizeof(struct ci_hw_td),
|
|
|
|
64, CI_HDRC_PAGE_SIZE);
|
2012-07-07 22:56:40 +08:00
|
|
|
if (ci->td_pool == NULL) {
|
2012-05-09 04:29:03 +08:00
|
|
|
retval = -ENOMEM;
|
|
|
|
goto free_qh_pool;
|
|
|
|
}
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
retval = init_eps(ci);
|
2012-05-09 04:29:03 +08:00
|
|
|
if (retval)
|
|
|
|
goto free_pools;
|
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
ci->gadget.ep0 = &ci->ep0in->ep;
|
2010-12-07 20:24:02 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
retval = usb_add_gadget_udc(dev, &ci->gadget);
|
2011-06-28 21:33:47 +08:00
|
|
|
if (retval)
|
2013-09-24 12:47:53 +08:00
|
|
|
goto destroy_eps;
|
2011-06-28 21:33:47 +08:00
|
|
|
|
2008-11-18 06:14:51 +08:00
|
|
|
return retval;
|
|
|
|
|
2012-09-12 19:58:03 +08:00
|
|
|
destroy_eps:
|
|
|
|
destroy_eps(ci);
|
2012-05-09 04:29:03 +08:00
|
|
|
free_pools:
|
2012-07-07 22:56:40 +08:00
|
|
|
dma_pool_destroy(ci->td_pool);
|
2012-05-09 04:29:03 +08:00
|
|
|
free_qh_pool:
|
2012-07-07 22:56:40 +08:00
|
|
|
dma_pool_destroy(ci->qh_pool);
|
2008-11-18 06:14:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:41:30 +08:00
|
|
|
/*
|
2013-08-14 17:44:07 +08:00
|
|
|
* ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
|
2008-11-18 06:14:51 +08:00
|
|
|
*
|
|
|
|
* No interrupts active, the IRQ has been released
|
|
|
|
*/
|
2013-08-14 17:44:07 +08:00
|
|
|
void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
|
2008-11-18 06:14:51 +08:00
|
|
|
{
|
2013-08-14 17:44:07 +08:00
|
|
|
if (!ci->roles[CI_ROLE_GADGET])
|
2008-11-18 06:14:51 +08:00
|
|
|
return;
|
2012-05-09 04:29:02 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
usb_del_gadget_udc(&ci->gadget);
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2012-09-12 19:58:03 +08:00
|
|
|
destroy_eps(ci);
|
2012-05-09 04:29:03 +08:00
|
|
|
|
2012-07-07 22:56:40 +08:00
|
|
|
dma_pool_destroy(ci->td_pool);
|
|
|
|
dma_pool_destroy(ci->qh_pool);
|
2013-08-14 17:44:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int udc_id_switch_for_device(struct ci_hdrc *ci)
|
|
|
|
{
|
2018-09-04 23:18:55 +08:00
|
|
|
if (ci->platdata->pins_device)
|
|
|
|
pinctrl_select_state(ci->platdata->pctl,
|
|
|
|
ci->platdata->pins_device);
|
|
|
|
|
2014-04-23 15:56:38 +08:00
|
|
|
if (ci->is_otg)
|
|
|
|
/* Clear and enable BSV irq */
|
|
|
|
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
|
|
|
|
OTGSC_BSVIS | OTGSC_BSVIE);
|
2013-08-14 17:44:07 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void udc_id_switch_for_host(struct ci_hdrc *ci)
|
|
|
|
{
|
2014-04-23 15:56:38 +08:00
|
|
|
/*
|
|
|
|
* host doesn't care B_SESSION_VALID event
|
|
|
|
* so clear and disbale BSV irq
|
|
|
|
*/
|
|
|
|
if (ci->is_otg)
|
|
|
|
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
|
2017-03-27 10:54:27 +08:00
|
|
|
|
|
|
|
ci->vbus_active = 0;
|
2018-09-04 23:18:55 +08:00
|
|
|
|
|
|
|
if (ci->platdata->pins_device && ci->platdata->pins_default)
|
|
|
|
pinctrl_select_state(ci->platdata->pctl,
|
|
|
|
ci->platdata->pins_default);
|
2012-05-11 22:25:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ci_hdrc_gadget_init - initialize device related bits
|
2020-07-04 01:41:30 +08:00
|
|
|
* @ci: the controller
|
2012-05-11 22:25:47 +08:00
|
|
|
*
|
2013-08-14 17:44:07 +08:00
|
|
|
* This function initializes the gadget, if the device is "device capable".
|
2012-05-11 22:25:47 +08:00
|
|
|
*/
|
2013-06-24 19:46:36 +08:00
|
|
|
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
|
2012-05-11 22:25:47 +08:00
|
|
|
{
|
|
|
|
struct ci_role_driver *rdrv;
|
2017-04-24 20:35:51 +08:00
|
|
|
int ret;
|
2012-05-11 22:25:47 +08:00
|
|
|
|
|
|
|
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
|
|
|
|
return -ENXIO;
|
|
|
|
|
2016-09-08 20:34:32 +08:00
|
|
|
rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
|
2012-05-11 22:25:47 +08:00
|
|
|
if (!rdrv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-08-14 17:44:07 +08:00
|
|
|
rdrv->start = udc_id_switch_for_device;
|
|
|
|
rdrv->stop = udc_id_switch_for_host;
|
2012-05-11 22:25:47 +08:00
|
|
|
rdrv->irq = udc_irq;
|
|
|
|
rdrv->name = "gadget";
|
2008-11-18 06:14:51 +08:00
|
|
|
|
2017-04-24 20:35:51 +08:00
|
|
|
ret = udc_start(ci);
|
|
|
|
if (!ret)
|
|
|
|
ci->roles[CI_ROLE_GADGET] = rdrv;
|
|
|
|
|
|
|
|
return ret;
|
2008-11-18 06:14:51 +08:00
|
|
|
}
|