mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
USB fixes for 4.18-rc7
Here are a number of USB fixes and new device ids for 4.18-rc7. The largest number are a bunch of gadget driver fixes that got delayed in being submitted earlier due to vacation schedules, but nothing really huge is present in them. There are some new device ids and some PHY driver fixes that were connected to some USB ones. Full details are in the shortlog. All have been in linux-next for a while with no reported issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCW1nXnQ8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ymoWACfTbc0TF6u9hNALIS9nsgLxevZLjYAnA5RJ12y TTBeXMIAvCUKILAXPQok =spWc -----END PGP SIGNATURE----- Merge tag 'usb-4.18-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb Pull USB fixes from Greg KH: "Here are a number of USB fixes and new device ids for 4.18-rc7. The largest number are a bunch of gadget driver fixes that got delayed in being submitted earlier due to vacation schedules, but nothing really huge is present in them. There are some new device ids and some PHY driver fixes that were connected to some USB ones. Full details are in the shortlog. All have been in linux-next for a while with no reported issues" * tag 'usb-4.18-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (28 commits) usb: core: handle hub C_PORT_OVER_CURRENT condition usb: xhci: Fix memory leak in xhci_endpoint_reset() usb: typec: tcpm: Fix sink PDO starting index for PPS APDO selection usb: gadget: f_fs: Only return delayed status when len is 0 usb: gadget: f_uac2: fix endianness of 'struct cntrl_*_lay3' usb: dwc2: Fix inefficient copy of unaligned buffers usb: dwc2: Fix DMA alignment to start at allocated boundary usb: dwc3: rockchip: Fix PHY documentation links. tools: usb: ffs-test: Fix build on big endian systems usb: gadget: aspeed: Workaround memory ordering issue usb: dwc3: gadget: remove redundant variable maxpacket usb: dwc2: avoid NULL dereferences usb/phy: fix PPC64 build errors in phy-fsl-usb.c usb: dwc2: host: do not delay retries for CONTROL IN transfers usb: gadget: u_audio: protect stream runtime fields with stream spinlock usb: gadget: u_audio: remove cached period bytes value usb: gadget: u_audio: remove caching of stream buffer parameters usb: gadget: u_audio: update hw_ptr in iso_complete after data copied usb: gadget: u_audio: fix pcm/card naming in g_audio_setup() usb: gadget: f_uac2: fix error handling in afunc_bind (again) ...
This commit is contained in:
commit
cd3f77d74a
@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
|
||||
the node is not important. The content of the node is defined in dwc3.txt.
|
||||
|
||||
Phy documentation is provided in the following places:
|
||||
Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
|
||||
Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
|
||||
Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
|
||||
|
||||
Example device nodes:
|
||||
|
||||
|
@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
|
||||
{
|
||||
void __iomem *ctrl = params->ctrl_regs;
|
||||
|
||||
USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
|
||||
/* 1 millisecond - for USB clocks to settle down */
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
if (BRCM_ID(params->family_id) == 0x7366) {
|
||||
/*
|
||||
* The PHY3_SOFT_RESETB bits default to the wrong state.
|
||||
|
@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
|
||||
ddata = container_of(work, struct phy_mdm6600, status_work.work);
|
||||
dev = ddata->dev;
|
||||
|
||||
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
|
||||
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
|
||||
ddata->status_gpios->desc,
|
||||
values);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
|
||||
for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
|
||||
val |= values[i] << i;
|
||||
dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
|
||||
__func__, i, values[i], val);
|
||||
|
@ -3,6 +3,7 @@ config USB_CHIPIDEA
|
||||
depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
|
||||
select EXTCON
|
||||
select RESET_CONTROLLER
|
||||
select USB_ULPI_BUS
|
||||
help
|
||||
Say Y here if your system has a dual role high speed USB
|
||||
controller based on ChipIdea silicon IP. It supports:
|
||||
@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
|
||||
help
|
||||
Say Y here to enable host controller functionality of the
|
||||
ChipIdea driver.
|
||||
|
||||
config USB_CHIPIDEA_ULPI
|
||||
bool "ChipIdea ULPI PHY support"
|
||||
depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
|
||||
help
|
||||
Say Y here if you have a ULPI PHY attached to your ChipIdea
|
||||
controller.
|
||||
|
||||
endif
|
||||
|
@ -1,11 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
|
||||
|
||||
ci_hdrc-y := core.o otg.o debug.o
|
||||
ci_hdrc-y := core.o otg.o debug.o ulpi.o
|
||||
ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
|
||||
ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
|
||||
ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
|
||||
ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o
|
||||
|
||||
# Glue/Bridge layers go here
|
||||
|
||||
|
@ -240,10 +240,8 @@ struct ci_hdrc {
|
||||
|
||||
struct ci_hdrc_platform_data *platdata;
|
||||
int vbus_active;
|
||||
#ifdef CONFIG_USB_CHIPIDEA_ULPI
|
||||
struct ulpi *ulpi;
|
||||
struct ulpi_ops ulpi_ops;
|
||||
#endif
|
||||
struct phy *phy;
|
||||
/* old usb_phy interface */
|
||||
struct usb_phy *usb_phy;
|
||||
@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
|
||||
#endif
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
|
||||
int ci_ulpi_init(struct ci_hdrc *ci);
|
||||
void ci_ulpi_exit(struct ci_hdrc *ci);
|
||||
int ci_ulpi_resume(struct ci_hdrc *ci);
|
||||
#else
|
||||
static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
|
||||
static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
|
||||
static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
|
||||
#endif
|
||||
|
||||
u32 hw_read_intr_enable(struct ci_hdrc *ci);
|
||||
|
||||
|
@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
|
||||
{
|
||||
int cnt = 100000;
|
||||
|
||||
if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
|
||||
return 0;
|
||||
|
||||
while (cnt-- > 0) {
|
||||
if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
|
||||
return 0;
|
||||
|
@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
|
||||
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
|
||||
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
|
||||
},
|
||||
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
|
||||
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
|
||||
},
|
||||
|
||||
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
|
||||
.driver_info = CLEAR_HALT_CONDITIONS,
|
||||
|
@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
||||
|
||||
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
|
||||
/* Tell hub_wq to disconnect the device or
|
||||
* check for a new connection
|
||||
* check for a new connection or over current condition.
|
||||
* Based on USB2.0 Spec Section 11.12.5,
|
||||
* C_PORT_OVER_CURRENT could be set while
|
||||
* PORT_OVER_CURRENT is not. So check for any of them.
|
||||
*/
|
||||
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
|
||||
(portstatus & USB_PORT_STAT_OVERCURRENT))
|
||||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
|
||||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
|
||||
set_bit(port1, hub->change_bits);
|
||||
|
||||
} else if (portstatus & USB_PORT_STAT_ENABLE) {
|
||||
|
@ -3430,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
|
||||
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
|
||||
hs_ep = hsotg->eps_in[idx];
|
||||
/* Proceed only unmasked ISOC EPs */
|
||||
if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
|
||||
if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
|
||||
continue;
|
||||
|
||||
epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
|
||||
@ -3476,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
|
||||
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
|
||||
hs_ep = hsotg->eps_out[idx];
|
||||
/* Proceed only unmasked ISOC EPs */
|
||||
if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
|
||||
if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
|
||||
continue;
|
||||
|
||||
epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
|
||||
@ -3650,7 +3650,7 @@ irq_retry:
|
||||
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
|
||||
hs_ep = hsotg->eps_out[idx];
|
||||
/* Proceed only unmasked ISOC EPs */
|
||||
if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
|
||||
if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
|
||||
continue;
|
||||
|
||||
epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
|
||||
|
@ -2665,34 +2665,35 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
|
||||
|
||||
#define DWC2_USB_DMA_ALIGN 4
|
||||
|
||||
struct dma_aligned_buffer {
|
||||
void *kmalloc_ptr;
|
||||
void *old_xfer_buffer;
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
static void dwc2_free_dma_aligned_buffer(struct urb *urb)
|
||||
{
|
||||
struct dma_aligned_buffer *temp;
|
||||
void *stored_xfer_buffer;
|
||||
size_t length;
|
||||
|
||||
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
|
||||
return;
|
||||
|
||||
temp = container_of(urb->transfer_buffer,
|
||||
struct dma_aligned_buffer, data);
|
||||
/* Restore urb->transfer_buffer from the end of the allocated area */
|
||||
memcpy(&stored_xfer_buffer, urb->transfer_buffer +
|
||||
urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
|
||||
|
||||
if (usb_urb_dir_in(urb))
|
||||
memcpy(temp->old_xfer_buffer, temp->data,
|
||||
urb->transfer_buffer_length);
|
||||
urb->transfer_buffer = temp->old_xfer_buffer;
|
||||
kfree(temp->kmalloc_ptr);
|
||||
if (usb_urb_dir_in(urb)) {
|
||||
if (usb_pipeisoc(urb->pipe))
|
||||
length = urb->transfer_buffer_length;
|
||||
else
|
||||
length = urb->actual_length;
|
||||
|
||||
memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
|
||||
}
|
||||
kfree(urb->transfer_buffer);
|
||||
urb->transfer_buffer = stored_xfer_buffer;
|
||||
|
||||
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
|
||||
}
|
||||
|
||||
static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
|
||||
{
|
||||
struct dma_aligned_buffer *temp, *kmalloc_ptr;
|
||||
void *kmalloc_ptr;
|
||||
size_t kmalloc_size;
|
||||
|
||||
if (urb->num_sgs || urb->sg ||
|
||||
@ -2700,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
|
||||
!((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
|
||||
return 0;
|
||||
|
||||
/* Allocate a buffer with enough padding for alignment */
|
||||
/*
|
||||
* Allocate a buffer with enough padding for original transfer_buffer
|
||||
* pointer. This allocation is guaranteed to be aligned properly for
|
||||
* DMA
|
||||
*/
|
||||
kmalloc_size = urb->transfer_buffer_length +
|
||||
sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
|
||||
sizeof(urb->transfer_buffer);
|
||||
|
||||
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
|
||||
if (!kmalloc_ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Position our struct dma_aligned_buffer such that data is aligned */
|
||||
temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
|
||||
temp->kmalloc_ptr = kmalloc_ptr;
|
||||
temp->old_xfer_buffer = urb->transfer_buffer;
|
||||
/*
|
||||
* Position value of original urb->transfer_buffer pointer to the end
|
||||
* of allocation for later referencing
|
||||
*/
|
||||
memcpy(kmalloc_ptr + urb->transfer_buffer_length,
|
||||
&urb->transfer_buffer, sizeof(urb->transfer_buffer));
|
||||
|
||||
if (usb_urb_dir_out(urb))
|
||||
memcpy(temp->data, urb->transfer_buffer,
|
||||
memcpy(kmalloc_ptr, urb->transfer_buffer,
|
||||
urb->transfer_buffer_length);
|
||||
urb->transfer_buffer = temp->data;
|
||||
urb->transfer_buffer = kmalloc_ptr;
|
||||
|
||||
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
|
||||
|
||||
|
@ -1231,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
|
||||
* avoid interrupt storms we'll wait before retrying if we've got
|
||||
* several NAKs. If we didn't do this we'd retry directly from the
|
||||
* interrupt handler and could end up quickly getting another
|
||||
* interrupt (another NAK), which we'd retry.
|
||||
* interrupt (another NAK), which we'd retry. Note that we do not
|
||||
* delay retries for IN parts of control requests, as those are expected
|
||||
* to complete fairly quickly, and if we delay them we risk confusing
|
||||
* the device and cause it issue STALL.
|
||||
*
|
||||
* Note that in DMA mode software only gets involved to re-send NAKed
|
||||
* transfers for split transactions, so we only need to apply this
|
||||
@ -1244,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
|
||||
qtd->error_count = 0;
|
||||
qtd->complete_split = 0;
|
||||
qtd->num_naks++;
|
||||
qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
|
||||
qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
|
||||
!(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
|
||||
chan->ep_is_in);
|
||||
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
|
||||
goto handle_nak_done;
|
||||
}
|
||||
|
@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
|
||||
ret = dwc3_ep0_start_trans(dep);
|
||||
} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
|
||||
req->request.length && req->request.zero) {
|
||||
u32 maxpacket;
|
||||
|
||||
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
|
||||
&req->request, dep->number);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
maxpacket = dep->endpoint.maxpacket;
|
||||
|
||||
/* prepare normal TRB */
|
||||
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
|
||||
req->request.length,
|
||||
|
@ -1819,7 +1819,6 @@ unknown:
|
||||
if (cdev->use_os_string && cdev->os_desc_config &&
|
||||
(ctrl->bRequestType & USB_TYPE_VENDOR) &&
|
||||
ctrl->bRequest == cdev->b_vendor_code) {
|
||||
struct usb_request *req;
|
||||
struct usb_configuration *os_desc_cfg;
|
||||
u8 *buf;
|
||||
int interface;
|
||||
|
@ -3263,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
|
||||
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
|
||||
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
|
||||
|
||||
return USB_GADGET_DELAYED_STATUS;
|
||||
return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
|
||||
}
|
||||
|
||||
static bool ffs_func_req_match(struct usb_function *f,
|
||||
|
@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
|
||||
};
|
||||
|
||||
struct cntrl_cur_lay3 {
|
||||
__u32 dCUR;
|
||||
__le32 dCUR;
|
||||
};
|
||||
|
||||
struct cntrl_range_lay3 {
|
||||
__u16 wNumSubRanges;
|
||||
__u32 dMIN;
|
||||
__u32 dMAX;
|
||||
__u32 dRES;
|
||||
__le16 wNumSubRanges;
|
||||
__le32 dMIN;
|
||||
__le32 dMAX;
|
||||
__le32 dRES;
|
||||
} __packed;
|
||||
|
||||
static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
|
||||
@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
|
||||
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
|
||||
if (!agdev->out_ep) {
|
||||
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
|
||||
return ret;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
|
||||
if (!agdev->in_ep) {
|
||||
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
|
||||
return ret;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
agdev->in_ep_maxpsize = max_t(u16,
|
||||
@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
|
||||
memset(&c, 0, sizeof(struct cntrl_cur_lay3));
|
||||
|
||||
if (entity_id == USB_IN_CLK_ID)
|
||||
c.dCUR = p_srate;
|
||||
c.dCUR = cpu_to_le32(p_srate);
|
||||
else if (entity_id == USB_OUT_CLK_ID)
|
||||
c.dCUR = c_srate;
|
||||
c.dCUR = cpu_to_le32(c_srate);
|
||||
|
||||
value = min_t(unsigned, w_length, sizeof c);
|
||||
memcpy(req->buf, &c, value);
|
||||
@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
|
||||
|
||||
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
|
||||
if (entity_id == USB_IN_CLK_ID)
|
||||
r.dMIN = p_srate;
|
||||
r.dMIN = cpu_to_le32(p_srate);
|
||||
else if (entity_id == USB_OUT_CLK_ID)
|
||||
r.dMIN = c_srate;
|
||||
r.dMIN = cpu_to_le32(c_srate);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r.dMAX = r.dMIN;
|
||||
r.dRES = 0;
|
||||
r.wNumSubRanges = 1;
|
||||
r.wNumSubRanges = cpu_to_le16(1);
|
||||
|
||||
value = min_t(unsigned, w_length, sizeof r);
|
||||
memcpy(req->buf, &r, value);
|
||||
|
@ -32,9 +32,6 @@ struct uac_req {
|
||||
struct uac_rtd_params {
|
||||
struct snd_uac_chip *uac; /* parent chip */
|
||||
bool ep_enabled; /* if the ep is enabled */
|
||||
/* Size of the ring buffer */
|
||||
size_t dma_bytes;
|
||||
unsigned char *dma_area;
|
||||
|
||||
struct snd_pcm_substream *ss;
|
||||
|
||||
@ -43,8 +40,6 @@ struct uac_rtd_params {
|
||||
|
||||
void *rbuf;
|
||||
|
||||
size_t period_size;
|
||||
|
||||
unsigned max_psize; /* MaxPacketSize of endpoint */
|
||||
struct uac_req *ureq;
|
||||
|
||||
@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
|
||||
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
{
|
||||
unsigned pending;
|
||||
unsigned long flags;
|
||||
unsigned long flags, flags2;
|
||||
unsigned int hw_ptr;
|
||||
bool update_alsa = false;
|
||||
int status = req->status;
|
||||
struct uac_req *ur = req->context;
|
||||
struct snd_pcm_substream *substream;
|
||||
struct snd_pcm_runtime *runtime;
|
||||
struct uac_rtd_params *prm = ur->pp;
|
||||
struct snd_uac_chip *uac = prm->uac;
|
||||
|
||||
@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
if (!substream)
|
||||
goto exit;
|
||||
|
||||
snd_pcm_stream_lock_irqsave(substream, flags2);
|
||||
|
||||
runtime = substream->runtime;
|
||||
if (!runtime || !snd_pcm_running(substream)) {
|
||||
snd_pcm_stream_unlock_irqrestore(substream, flags2);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&prm->lock, flags);
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
req->actual = req->length;
|
||||
}
|
||||
|
||||
pending = prm->hw_ptr % prm->period_size;
|
||||
pending += req->actual;
|
||||
if (pending >= prm->period_size)
|
||||
update_alsa = true;
|
||||
|
||||
hw_ptr = prm->hw_ptr;
|
||||
prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
|
||||
|
||||
spin_unlock_irqrestore(&prm->lock, flags);
|
||||
|
||||
/* Pack USB load in ALSA ring buffer */
|
||||
pending = prm->dma_bytes - hw_ptr;
|
||||
pending = runtime->dma_bytes - hw_ptr;
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
if (unlikely(pending < req->actual)) {
|
||||
memcpy(req->buf, prm->dma_area + hw_ptr, pending);
|
||||
memcpy(req->buf + pending, prm->dma_area,
|
||||
memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
|
||||
memcpy(req->buf + pending, runtime->dma_area,
|
||||
req->actual - pending);
|
||||
} else {
|
||||
memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
|
||||
memcpy(req->buf, runtime->dma_area + hw_ptr,
|
||||
req->actual);
|
||||
}
|
||||
} else {
|
||||
if (unlikely(pending < req->actual)) {
|
||||
memcpy(prm->dma_area + hw_ptr, req->buf, pending);
|
||||
memcpy(prm->dma_area, req->buf + pending,
|
||||
memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
|
||||
memcpy(runtime->dma_area, req->buf + pending,
|
||||
req->actual - pending);
|
||||
} else {
|
||||
memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
|
||||
memcpy(runtime->dma_area + hw_ptr, req->buf,
|
||||
req->actual);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&prm->lock, flags);
|
||||
/* update hw_ptr after data is copied to memory */
|
||||
prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
|
||||
hw_ptr = prm->hw_ptr;
|
||||
spin_unlock_irqrestore(&prm->lock, flags);
|
||||
snd_pcm_stream_unlock_irqrestore(substream, flags2);
|
||||
|
||||
if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
|
||||
snd_pcm_period_elapsed(substream);
|
||||
|
||||
exit:
|
||||
if (usb_ep_queue(ep, req, GFP_ATOMIC))
|
||||
dev_err(uac->card->dev, "%d Error!\n", __LINE__);
|
||||
|
||||
if (update_alsa)
|
||||
snd_pcm_period_elapsed(substream);
|
||||
}
|
||||
|
||||
static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
|
||||
@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
|
||||
static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
|
||||
struct snd_pcm_hw_params *hw_params)
|
||||
{
|
||||
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
|
||||
struct uac_rtd_params *prm;
|
||||
int err;
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
||||
prm = &uac->p_prm;
|
||||
else
|
||||
prm = &uac->c_prm;
|
||||
|
||||
err = snd_pcm_lib_malloc_pages(substream,
|
||||
return snd_pcm_lib_malloc_pages(substream,
|
||||
params_buffer_bytes(hw_params));
|
||||
if (err >= 0) {
|
||||
prm->dma_bytes = substream->runtime->dma_bytes;
|
||||
prm->dma_area = substream->runtime->dma_area;
|
||||
prm->period_size = params_period_bytes(hw_params);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
|
||||
struct uac_rtd_params *prm;
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
||||
prm = &uac->p_prm;
|
||||
else
|
||||
prm = &uac->c_prm;
|
||||
|
||||
prm->dma_area = NULL;
|
||||
prm->dma_bytes = 0;
|
||||
prm->period_size = 0;
|
||||
|
||||
return snd_pcm_lib_free_pages(substream);
|
||||
}
|
||||
|
||||
@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
|
||||
if (err < 0)
|
||||
goto snd_fail;
|
||||
|
||||
strcpy(pcm->name, pcm_name);
|
||||
strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
|
||||
pcm->private_data = uac;
|
||||
uac->pcm = pcm;
|
||||
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
|
||||
|
||||
strcpy(card->driver, card_name);
|
||||
strcpy(card->shortname, card_name);
|
||||
strlcpy(card->driver, card_name, sizeof(card->driver));
|
||||
strlcpy(card->shortname, card_name, sizeof(card->shortname));
|
||||
sprintf(card->longname, "%s %i", card_name, card->dev->id);
|
||||
|
||||
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
|
||||
|
@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
|
||||
/* Check our state, cancel pending requests if needed */
|
||||
if (ep->ep0.state != ep0_state_token) {
|
||||
EPDBG(ep, "wrong state\n");
|
||||
ast_vhub_nuke(ep, -EIO);
|
||||
|
||||
/*
|
||||
* Accept the packet regardless, this seems to happen
|
||||
* when stalling a SETUP packet that has an OUT data
|
||||
* phase.
|
||||
*/
|
||||
ast_vhub_nuke(ep, 0);
|
||||
goto stall;
|
||||
}
|
||||
@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
|
||||
if (chunk && req->req.buf)
|
||||
memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
|
||||
|
||||
vhub_dma_workaround(ep->buf);
|
||||
|
||||
/* Remember chunk size and trigger send */
|
||||
reg = VHUB_EP0_SET_TX_LEN(chunk);
|
||||
writel(reg, ep->ep0.ctlstat);
|
||||
@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
|
||||
EPVDBG(ep, "rx prime\n");
|
||||
|
||||
/* Prime endpoint for receiving data */
|
||||
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
|
||||
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
|
||||
}
|
||||
|
||||
static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
|
||||
|
@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
|
||||
if (!req->req.dma) {
|
||||
|
||||
/* For IN transfers, copy data over first */
|
||||
if (ep->epn.is_in)
|
||||
if (ep->epn.is_in) {
|
||||
memcpy(ep->buf, req->req.buf + act, chunk);
|
||||
vhub_dma_workaround(ep->buf);
|
||||
}
|
||||
writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
|
||||
} else
|
||||
} else {
|
||||
if (ep->epn.is_in)
|
||||
vhub_dma_workaround(req->req.buf);
|
||||
writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
|
||||
}
|
||||
|
||||
/* Start DMA */
|
||||
req->active = true;
|
||||
@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
|
||||
static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
|
||||
struct ast_vhub_req *req)
|
||||
{
|
||||
struct ast_vhub_desc *desc = NULL;
|
||||
unsigned int act = req->act_count;
|
||||
unsigned int len = req->req.length;
|
||||
unsigned int chunk;
|
||||
@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
|
||||
|
||||
/* While we can create descriptors */
|
||||
while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
|
||||
struct ast_vhub_desc *desc;
|
||||
unsigned int d_num;
|
||||
|
||||
/* Grab next free descriptor */
|
||||
@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
|
||||
req->act_count = act = act + chunk;
|
||||
}
|
||||
|
||||
if (likely(desc))
|
||||
vhub_dma_workaround(desc);
|
||||
|
||||
/* Tell HW about new descriptors */
|
||||
writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
|
||||
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
|
||||
|
@ -462,6 +462,39 @@ enum std_req_rc {
|
||||
#define DDBG(d, fmt, ...) do { } while(0)
|
||||
#endif
|
||||
|
||||
static inline void vhub_dma_workaround(void *addr)
|
||||
{
|
||||
/*
|
||||
* This works around a confirmed HW issue with the Aspeed chip.
|
||||
*
|
||||
* The core uses a different bus to memory than the AHB going to
|
||||
* the USB device controller. Due to the latter having a higher
|
||||
* priority than the core for arbitration on that bus, it's
|
||||
* possible for an MMIO to the device, followed by a DMA by the
|
||||
* device from memory to all be performed and services before
|
||||
* a previous store to memory gets completed.
|
||||
*
|
||||
* This the following scenario can happen:
|
||||
*
|
||||
* - Driver writes to a DMA descriptor (Mbus)
|
||||
* - Driver writes to the MMIO register to start the DMA (AHB)
|
||||
* - The gadget sees the second write and sends a read of the
|
||||
* descriptor to the memory controller (Mbus)
|
||||
* - The gadget hits memory before the descriptor write
|
||||
* causing it to read an obsolete value.
|
||||
*
|
||||
* Thankfully the problem is limited to the USB gadget device, other
|
||||
* masters in the SoC all have a lower priority than the core, thus
|
||||
* ensuring that the store by the core arrives first.
|
||||
*
|
||||
* The workaround consists of using a dummy read of the memory before
|
||||
* doing the MMIO writes. This will ensure that the previous writes
|
||||
* have been "pushed out".
|
||||
*/
|
||||
mb();
|
||||
(void)__raw_readl((void __iomem *)addr);
|
||||
}
|
||||
|
||||
/* core.c */
|
||||
void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
|
||||
int status);
|
||||
|
@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
|
||||
|
||||
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
|
||||
|
||||
msleep(3);
|
||||
mdelay(3);
|
||||
|
||||
r8a66597_bset(r8a66597, PLLC, SYSCFG0);
|
||||
|
||||
msleep(1);
|
||||
mdelay(1);
|
||||
|
||||
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
|
||||
|
||||
@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
|
||||
r8a66597->ep0_req->length = 2;
|
||||
/* AV: what happens if we get called again before that gets through? */
|
||||
spin_unlock(&r8a66597->lock);
|
||||
r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
|
||||
r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
|
||||
spin_lock(&r8a66597->lock);
|
||||
}
|
||||
|
||||
|
@ -3051,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
if (!list_empty(&ep->ring->td_list)) {
|
||||
dev_err(&udev->dev, "EP not empty, refuse reset\n");
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_free_command(xhci, cfg_cmd);
|
||||
goto cleanup;
|
||||
}
|
||||
xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
|
||||
|
@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
|
||||
if (pdata->init && pdata->init(pdev) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
if (pdata->big_endian_mmio) {
|
||||
_fsl_readl = _fsl_readl_be;
|
||||
_fsl_writel = _fsl_writel_be;
|
||||
@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
|
||||
_fsl_readl = _fsl_readl_le;
|
||||
_fsl_writel = _fsl_writel_le;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* request irq */
|
||||
p_otg->irq = platform_get_irq(pdev, 0);
|
||||
@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
|
||||
/*
|
||||
* state file in sysfs
|
||||
*/
|
||||
static int show_fsl_usb2_otg_state(struct device *dev,
|
||||
static ssize_t show_fsl_usb2_otg_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct otg_fsm *fsm = &fsl_otg_dev->fsm;
|
||||
|
@ -2140,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
|
||||
* PPS APDO. Again skip the first sink PDO as this will
|
||||
* always be 5V 3A.
|
||||
*/
|
||||
for (j = i; j < port->nr_snk_pdo; j++) {
|
||||
for (j = 1; j < port->nr_snk_pdo; j++) {
|
||||
pdo = port->snk_pdo[j];
|
||||
|
||||
switch (pdo_type(pdo)) {
|
||||
|
@ -44,12 +44,25 @@
|
||||
|
||||
/******************** Little Endian Handling ********************************/
|
||||
|
||||
#define cpu_to_le16(x) htole16(x)
|
||||
#define cpu_to_le32(x) htole32(x)
|
||||
/*
|
||||
* cpu_to_le16/32 are used when initializing structures, a context where a
|
||||
* function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
|
||||
* that allows them to be used when initializing structures.
|
||||
*/
|
||||
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#define cpu_to_le16(x) (x)
|
||||
#define cpu_to_le32(x) (x)
|
||||
#else
|
||||
#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
|
||||
#define cpu_to_le32(x) \
|
||||
((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
|
||||
(((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
|
||||
#endif
|
||||
|
||||
#define le32_to_cpu(x) le32toh(x)
|
||||
#define le16_to_cpu(x) le16toh(x)
|
||||
|
||||
|
||||
/******************** Messages and Errors ***********************************/
|
||||
|
||||
static const char argv0[] = "ffs-test";
|
||||
|
Loading…
Reference in New Issue
Block a user