[PATCH v4] usb: gadget: mv: Add USB 3.0 device driver for Marvell PXA2128 chip.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It supports Marvell USB 3.0 device controller for PXA2128 chip.

Signed-off-by: Yu Xu <yuxu@xxxxxxxxxxx>
---
 drivers/usb/gadget/Kconfig       |    9 +
 drivers/usb/gadget/Makefile      |    1 +
 drivers/usb/gadget/mv_u3d.h      |  310 ++++++
 drivers/usb/gadget/mv_u3d_core.c | 2043 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 2363 insertions(+)
 create mode 100644 drivers/usb/gadget/mv_u3d.h
 create mode 100644 drivers/usb/gadget/mv_u3d_core.c

diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1f93861..4eb92d0 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -316,6 +316,15 @@ config USB_MV_UDC
 	  USB2.0 OTG controller, which can be configured as high speed or
 	  full speed USB peripheral.
 
+config USB_MV_U3D
+	tristate "MARVELL PXA2128 USB 3.0 controller"
+	depends on CPU_MMP3
+	select USB_GADGET_DUALSPEED
+	select USB_GADGET_SUPERSPEED
+	help
+	  MARVELL PXA2128 Processor series include a super speed USB3.0 device
+	  controller, which support super speed USB peripheral.
+
 #
 # Controllers available in both integrated and discrete versions
 #
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b7f6eef..3e7cdd7 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_USB_MV_UDC)	+= mv_udc.o
 mv_udc-y			:= mv_udc_core.o
 obj-$(CONFIG_USB_CI13XXX_MSM)	+= ci13xxx_msm.o
 obj-$(CONFIG_USB_FUSB300)	+= fusb300_udc.o
+obj-$(CONFIG_USB_MV_U3D)	+= mv_u3d_core.o
 
 #
 # USB gadget drivers
diff --git a/drivers/usb/gadget/mv_u3d.h b/drivers/usb/gadget/mv_u3d.h
new file mode 100644
index 0000000..2a5ea7c
--- /dev/null
+++ b/drivers/usb/gadget/mv_u3d.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef __MV_U3D_H
+#define __MV_U3D_H
+
+#define EP_CONTEXT_ALIGNMENT	32
+#define MV_U3D_TRB_ALIGNMENT	16
+#define MV_U3D_DMA_BOUNDARY	4096
+
+#define EP_DIR_IN		1
+#define EP_DIR_OUT		0
+
+#define EP0_MAX_PKT_SIZE	512
+
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP		0
+#define DATA_STATE_XMIT		1
+#define DATA_STATE_NEED_ZLP	2
+#define WAIT_FOR_OUT_STATUS	3
+#define DATA_STATE_RECV		4
+#define STATUS_STAGE		5
+
+#define EP_MAX_LENGTH_TRANSFER	0x10000
+
+/* USB3 Interrupt Status */
+#define USBINT_SETUP		0x00000001
+#define USBINT_RX_COMPLETE	0x00000002
+#define USBINT_TX_COMPLETE	0x00000004
+#define USBINT_UNDER_RUN	0x00000008
+#define USBINT_RXDESC_ERR	0x00000010
+#define USBINT_TXDESC_ERR	0x00000020
+#define USBINT_RX_TRB_COMPLETE	0x00000040
+#define USBINT_TX_TRB_COMPLETE	0x00000080
+#define USBINT_VBUS_VALID	0x00010000
+#define USBINT_STORAGE_CMD_FULL	0x00020000
+#define USBINT_LINK_CHG		0x01000000
+
+/* USB3 Interrupt Enable */
+#define INTR_ENABLE_SETUP		0x00000001
+#define INTR_ENABLE_RX_COMPLETE		0x00000002
+#define INTR_ENABLE_TX_COMPLETE		0x00000004
+#define INTR_ENABLE_UNDER_RUN		0x00000008
+#define INTR_ENABLE_RXDESC_ERR		0x00000010
+#define INTR_ENABLE_TXDESC_ERR		0x00000020
+#define INTR_ENABLE_RX_TRB_COMPLETE	0x00000040
+#define INTR_ENABLE_TX_TRB_COMPLETE	0x00000080
+#define INTR_ENABLE_RX_BUFFER_ERR	0x00000100
+#define INTR_ENABLE_VBUS_VALID		0x00010000
+#define INTR_ENABLE_STORAGE_CMD_FULL	0x00020000
+#define INTR_ENABLE_LINK_CHG		0x01000000
+#define INTR_ENABLE_PRIME_STATUS	0x02000000
+
+/* USB3 Link Change */
+#define LINK_CHANGE_LINK_UP		0x00000001
+#define LINK_CHANGE_SUSPEND		0x00000002
+#define LINK_CHANGE_RESUME		0x00000004
+#define LINK_CHANGE_WRESET		0x00000008
+#define LINK_CHANGE_HRESET		0x00000010
+#define LINK_CHANGE_VBUS_INVALID	0x00000020
+#define LINK_CHANGE_INACT		0x00000040
+#define LINK_CHANGE_DISABLE_AFTER_U0	0x00000080
+#define LINK_CHANGE_U1			0x00000100
+#define LINK_CHANGE_U2			0x00000200
+#define LINK_CHANGE_U3			0x00000400
+
+/* bridge setting */
+#define BRIDGE_SETTING_VBUS_VALID	(1 << 16)
+
+/* Command Register Bit Masks */
+#define CMD_RUN_STOP		0x00000001
+#define CMD_CTRL_RESET		0x00000002
+
+/* ep control register */
+#define EPXCR_EP_TYPE_CONTROL		0
+#define EPXCR_EP_TYPE_ISOC		1
+#define EPXCR_EP_TYPE_BULK		2
+#define EPXCR_EP_TYPE_INT		3
+#define EPXCR_EP_ENABLE_SHIFT		4
+#define EPXCR_MAX_BURST_SIZE_SHIFT	12
+#define EPXCR_MAX_PACKET_SIZE_SHIFT	16
+#define USB_BULK_BURST_OUT		6
+#define USB_BULK_BURST_IN		14
+
+#define EPXCR_EP_FLUSH		(1 << 7)
+#define EPXCR_EP_HALT		(1 << 1)
+#define EPXCR_EP_INIT		(1)
+
+/* TX/RX Status Register */
+#define XFERSTATUS_COMPLETE_SHIFT	24
+#define COMPLETE_INVALID	0
+#define COMPLETE_SUCCESS	1
+#define COMPLETE_BUFF_ERR	2
+#define COMPLETE_SHORT_PACKET	3
+#define COMPLETE_TRB_ERR	5
+#define XFERSTATUS_TRB_LENGTH_MASK	(0xFFFFFF)
+
+#define USB_LINK_BYPASS_VBUS	0x8
+
+#define LTSSM_PHY_INIT_DONE		0x80000000
+#define LTSSM_NEVER_GO_COMPLIANCE	0x40000000
+
+#define USB3_OP_REGS_OFFSET	0x100
+#define USB3_PHY_OFFSET		0xB800
+
+#define DCS_ENABLE	0x1
+
+/* usb capability registers */
+struct mv_u3d_cap_regs {
+	u32	rsvd[5];
+	u32	dboff;	/* doorbell register offset */
+	u32	rtsoff;	/* runtime register offset */
+	u32	vuoff;	/* vendor unique register offset */
+};
+
+/* operation registers */
+struct mv_u3d_op_regs {
+	u32	usbcmd;		/* Command register */
+	u32	rsvd1[11];
+	u32	dcbaapl;	/* Device Context Base Address low register */
+	u32	dcbaaph;	/* Device Context Base Address high register */
+	u32	rsvd2[243];
+	u32	portsc;		/* port status and control register*/
+	u32	portlinkinfo;	/* port link info register*/
+	u32	rsvd3[9917];
+	u32	doorbell;	/* doorbell register */
+};
+
+/* control enpoint enable registers */
+struct epxcr {
+	u32	epxoutcr0;	/* ep out control 0 register */
+	u32	epxoutcr1;	/* ep out control 1 register */
+	u32	epxincr0;	/* ep in control 0 register */
+	u32	epxincr1;	/* ep in control 1 register */
+};
+
+/* transfer status registers */
+struct xferstatus {
+	u32	curdeqlo;	/* current TRB pointer low */
+	u32	curdeqhi;	/* current TRB pointer high */
+	u32	statuslo;	/* transfer status low */
+	u32	statushi;	/* transfer status high */
+};
+
+/* vendor unique control registers */
+struct mv_u3d_vuc_regs {
+	u32	ctrlepenable;	/* control endpoint enable register */
+	u32	setuplock;	/* setup lock register */
+	u32	endcomplete;	/* endpoint transfer complete register */
+	u32	intrcause;	/* interrupt cause register */
+	u32	intrenable;	/* interrupt enable register */
+	u32	trbcomplete;	/* TRB complete register */
+	u32	linkchange;	/* link change register */
+	u32	rsvd1[5];
+	u32	trbunderrun;	/* TRB underrun register */
+	u32	rsvd2[43];
+	u32	bridgesetting;	/* bridge setting register */
+	u32	rsvd3[7];
+	struct xferstatus	txst[16];	/* TX status register */
+	struct xferstatus	rxst[16];	/* RX status register */
+	u32	ltssm;		/* LTSSM control register */
+	u32	pipe;		/* PIPE control register */
+	u32	linkcr0;	/* link control 0 register */
+	u32	linkcr1;	/* link control 1 register */
+	u32	rsvd6[60];
+	u32	mib0;		/* MIB0 counter register */
+	u32	usblink;	/* usb link control register */
+	u32	ltssmstate;	/* LTSSM state register */
+	u32	linkerrorcause;	/* link error cause register */
+	u32	rsvd7[60];
+	u32	devaddrtiebrkr;	/* device address and tie breaker */
+	u32	itpinfo0;	/* ITP info 0 register */
+	u32	itpinfo1;	/* ITP info 1 register */
+	u32	rsvd8[61];
+	struct epxcr	epcr[16];	/* ep control register */
+	u32	rsvd9[64];
+	u32	phyaddr;	/* PHY address register */
+	u32	phydata;	/* PHY data register */
+};
+
+/* Endpoint context structure */
+struct ep_context {
+	u32	rsvd0;
+	u32	rsvd1;
+	u32	trb_addr_lo;		/* TRB address low 32 bit */
+	u32	trb_addr_hi;		/* TRB address high 32 bit */
+	u32	rsvd2;
+	u32	rsvd3;
+	struct usb_ctrlrequest setup_buffer;	/* setup data buffer */
+};
+
+/* TRB control data structure */
+struct trb_ctrl {
+	u32	own:1;		/* owner of TRB */
+	u32	rsvd1:3;
+	u32	chain:1;	/* associate this TRB with the
+				next TRB on the Ring */
+	u32	ioc:1;		/* interrupt on complete */
+	u32	rsvd2:4;
+	u32	type:6;		/* TRB type */
+#define TYPE_NORMAL	1
+#define TYPE_DATA	3
+#define TYPE_LINK	6
+	u32	dir:1;		/* Working at data stage of control endpoint
+				operation. 0 is OUT and 1 is IN. */
+	u32	rsvd3:15;
+};
+
+/* TRB data structure
+ * For multiple TRB, all the TRBs' physical address should be continuous.
+ */
+struct trb_hw {
+	u32	buf_addr_lo;	/* data buffer address low 32 bit */
+	u32	buf_addr_hi;	/* data buffer address high 32 bit */
+	u32	trb_len;	/* transfer length */
+	struct trb_ctrl	ctrl;	/* TRB control data */
+};
+
+/* TRB structure */
+struct trb {
+	struct trb_hw *trb_hw;		/* point to the trb_hw structure */
+	dma_addr_t trb_dma;		/* dma address for this trb_hw */
+	struct list_head trb_list;	/* trb list */
+};
+
+/* device data structure */
+struct mv_u3d {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	spinlock_t			lock;	/* device lock */
+	struct completion		*done;
+	struct device			*dev;
+	int				irq;
+
+	/* usb controller registers */
+	struct mv_u3d_cap_regs __iomem	*cap_regs;
+	struct mv_u3d_op_regs __iomem	*op_regs;
+	struct mv_u3d_vuc_regs __iomem	*vuc_regs;
+	void __iomem			*phy_regs;
+
+	unsigned int			max_eps;
+	struct ep_context		*ep_context;
+	size_t				ep_context_size;
+	dma_addr_t			ep_context_dma;
+
+	struct dma_pool			*trb_pool; /* for TRB data structure */
+	struct mv_ep			*eps;
+
+	struct mv_req			*status_req; /* ep0 status request */
+	struct usb_ctrlrequest		local_setup_buff; /* store setup data*/
+
+	unsigned int		resume_state;	/* USB state to resume */
+	unsigned int		usb_state;	/* USB current state */
+	unsigned int		ep0_state;	/* Endpoint zero state */
+	unsigned int		ep0_dir;
+
+	unsigned int		dev_addr;	/* device address */
+
+	unsigned int		errors;
+
+	unsigned		softconnect:1;
+	unsigned		vbus_active:1;	/* vbus is active or not */
+	unsigned		remote_wakeup:1; /* support remote wakeup */
+	unsigned		clock_gating:1;	/* clock gating or not */
+	unsigned		active:1;	/* udc is active or not */
+	unsigned		vbus_valid_detect:1; /* udc vbus detection */
+
+	struct mv_usb_addon_irq *vbus;
+	unsigned int		power;
+
+	struct clk		*clk;
+};
+
+/* endpoint data structure */
+struct mv_ep {
+	struct usb_ep		ep;
+	struct mv_u3d		*u3d;
+	struct list_head	queue;	/* ep request queued hardware */
+	struct list_head	req_list; /* list of ep request */
+	struct ep_context	*ep_context; /* ep context */
+	u32			direction;
+	char			name[14];
+	u32			processing; /* there is ep request
+						queued on haredware */
+	spinlock_t		req_lock; /* ep lock */
+	unsigned		wedge:1;
+	unsigned		enabled:1;
+	unsigned		ep_type:2;
+	unsigned		ep_num:8;
+};
+
+/* request data structure */
+struct mv_req {
+	struct usb_request	req;
+	struct mv_ep		*ep;
+	struct list_head	queue;	/* ep requst queued on hardware */
+	struct list_head	list;	/* ep request list */
+	struct list_head	trb_list; /* trb list of a request */
+
+	struct trb		*trb_head; /* point to first trb of a request */
+	unsigned		trb_count; /* TRB number in the chain */
+	unsigned		chain;	   /* TRB chain or not */
+};
+
+#endif
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
new file mode 100644
index 0000000..fc064e8
--- /dev/null
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -0,0 +1,2043 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/mv_usb.h>
+#include <linux/clk.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+#include "mv_u3d.h"
+
+#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
+
+#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
+				((ep)->u3d->ep0_dir) : ((ep)->direction))
+
+#define RESET_TIMEOUT		10000
+#define FLUSH_TIMEOUT		100000
+#define OWN_TIMEOUT		10000
+#define LOOPS_USEC_SHIFT	4
+#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
+#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
+
+static const char driver_name[] = "mv_u3d";
+static const char driver_desc[] = DRIVER_DESC;
+
+static void mv_nuke(struct mv_ep *ep, int status);
+static void mv_stop_activity(struct mv_u3d *u3d,
+			struct usb_gadget_driver *driver);
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor mv_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
+};
+
+static void mv_ep0_reset(struct mv_u3d *u3d)
+{
+	struct mv_ep *ep;
+	u32 epxcr;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		ep = &u3d->eps[i];
+		ep->u3d = u3d;
+
+		/* ep0 ep context, ep0 in and out share the same ep context */
+		ep->ep_context = &u3d->ep_context[1];
+	}
+
+	/* reset ep state machine */
+	/* reset ep0 out */
+	epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
+	epxcr |= EPXCR_EP_INIT;
+	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
+	udelay(5);
+	epxcr &= ~EPXCR_EP_INIT;
+	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
+
+	epxcr = ((EP0_MAX_PKT_SIZE
+		<< EPXCR_MAX_PACKET_SIZE_SHIFT)
+		| (1 << EPXCR_MAX_BURST_SIZE_SHIFT)
+		| (1 << EPXCR_EP_ENABLE_SHIFT)
+		| EPXCR_EP_TYPE_CONTROL);
+	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
+
+	/* reset ep0 in */
+	epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
+	epxcr |= EPXCR_EP_INIT;
+	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
+	udelay(5);
+	epxcr &= ~EPXCR_EP_INIT;
+	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
+
+	epxcr = ((EP0_MAX_PKT_SIZE
+		<< EPXCR_MAX_PACKET_SIZE_SHIFT)
+		| (1 << EPXCR_MAX_BURST_SIZE_SHIFT)
+		| (1 << EPXCR_EP_ENABLE_SHIFT)
+		| EPXCR_EP_TYPE_CONTROL);
+	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
+}
+
+static void mv_ep0_stall(struct mv_u3d *u3d)
+{
+	u32 tmp;
+	dev_dbg(u3d->dev, "%s\n", __func__);
+
+	/* set TX and RX to stall */
+	tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
+	tmp |= EPXCR_EP_HALT;
+	iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+
+	tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
+	tmp |= EPXCR_EP_HALT;
+	iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+
+	/* update ep0 state */
+	u3d->ep0_state = WAIT_FOR_SETUP;
+	u3d->ep0_dir = EP_DIR_OUT;
+}
+
+static int mv_process_ep_req(struct mv_u3d *u3d, int index,
+	struct mv_req *curr_req)
+{
+	struct trb	*curr_trb;
+	dma_addr_t cur_deq_lo;
+	struct ep_context	*curr_ep_context;
+	int trb_complete, actual, remaining_length;
+	int direction, ep_num;
+	int retval = 0;
+	u32 tmp, status, length;
+
+	curr_ep_context = &u3d->ep_context[index];
+	direction = index % 2;
+	ep_num = index / 2;
+
+	trb_complete = 0;
+	actual = curr_req->req.length;
+
+	while (!list_empty(&curr_req->trb_list)) {
+		curr_trb = list_entry(curr_req->trb_list.next,
+					struct trb, trb_list);
+		if (!curr_trb->trb_hw->ctrl.own) {
+			dev_err(u3d->dev, "%s, TRB own error!\n",
+				u3d->eps[index].name);
+			return 1;
+		}
+
+		curr_trb->trb_hw->ctrl.own = 0;
+		if (direction == EP_DIR_OUT) {
+			tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
+			cur_deq_lo =
+				ioread32(&u3d->vuc_regs->rxst[ep_num].curdeqlo);
+		} else {
+			tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
+			cur_deq_lo =
+				ioread32(&u3d->vuc_regs->txst[ep_num].curdeqlo);
+		}
+
+		status = tmp >> XFERSTATUS_COMPLETE_SHIFT;
+		length = tmp & XFERSTATUS_TRB_LENGTH_MASK;
+
+		if (status == COMPLETE_SUCCESS ||
+			status == COMPLETE_SHORT_PACKET) {
+			remaining_length += length;
+			actual -= remaining_length;
+		} else {
+			dev_err(u3d->dev,
+				"complete_tr error: ep=%d %s: error = 0x%x\n",
+				index >> 1, direction ? "SEND" : "RECV",
+				status);
+			retval = -EPROTO;
+		}
+
+		list_del_init(&curr_trb->trb_list);
+	}
+	if (retval)
+		return retval;
+
+	curr_req->req.actual = actual;
+	return 0;
+}
+
+/*
+ * mv_u3d_done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ */
+static void mv_u3d_done(struct mv_ep *ep, struct mv_req *req, int status)
+{
+	struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
+
+	dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
+	/* Removed the req from ep queue */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* Free trb for the request */
+	if (!req->chain)
+		dma_pool_free(u3d->trb_pool,
+			req->trb_head->trb_hw, req->trb_head->trb_dma);
+	else {
+		dma_unmap_single(ep->u3d->gadget.dev.parent,
+			(dma_addr_t)req->trb_head->trb_dma,
+			req->trb_count * sizeof(struct trb_hw),
+			DMA_BIDIRECTIONAL);
+		kfree(req->trb_head->trb_hw);
+	}
+	kfree(req->trb_head);
+
+	usb_gadget_unmap_request(&u3d->gadget, &req->req, ep_dir(ep));
+
+	if (status && (status != -ESHUTDOWN)) {
+		dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+	}
+
+	spin_unlock(&ep->u3d->lock);
+	/*
+	 * complete() is from gadget layer,
+	 * eg fsg->bulk_in_complete()
+	 */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&ep->u3d->lock);
+}
+
+static int mv_queue_trb(struct mv_ep *ep, struct mv_req *req)
+{
+	u32 tmp, direction;
+	struct mv_u3d *u3d;
+	struct ep_context *ep_context;
+	int retval = 0;
+
+	u3d = ep->u3d;
+	direction = ep_dir(ep);
+
+	/* ep0 in and out share the same ep context slot 1*/
+	if (ep->ep_num == 0)
+		ep_context = &(u3d->ep_context[1]);
+	else
+		ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
+
+	/* check if the pipe is empty or not */
+	if (!list_empty(&ep->queue)) {
+		dev_err(u3d->dev, "add trb to non-empty queue!\n");
+		retval = -ENOMEM;
+		WARN_ON(1);
+	} else {
+		ep_context->rsvd0 = cpu_to_le32(1);
+		ep_context->rsvd1 = 0;
+
+		/* Configure the trb address and set the DCS bit.
+		 * Both DCS bit and own bit in trb should be set.
+		 */
+		ep_context->trb_addr_lo =
+			cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
+		ep_context->trb_addr_hi = 0;
+
+		/* Ensure that updates to the EP Context will
+		 * occure before Ring Bell.
+		 */
+		wmb();
+
+		/* ring bell the ep */
+		if (ep->ep_num == 0)
+			tmp = 0x1;
+		else
+			tmp = ep->ep_num * 2
+				+ ((direction == EP_DIR_OUT) ? 0 : 1);
+
+		iowrite32(tmp, &u3d->op_regs->doorbell);
+	}
+	return retval;
+}
+
+static struct trb *mv_build_trb_one(struct mv_req *req, unsigned *length,
+		dma_addr_t *dma)
+{
+	u32 temp;
+	unsigned int direction;
+	struct trb *trb;
+	struct trb_hw *trb_hw;
+	struct mv_u3d *u3d;
+
+	/* how big will this transfer be? */
+	*length = req->req.length - req->req.actual;
+	BUG_ON(*length > (unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	u3d = req->ep->u3d;
+
+	trb = kzalloc(sizeof(struct trb), GFP_ATOMIC);
+	if (!trb) {
+		dev_err(u3d->dev, "%s, trb alloc fail\n", __func__);
+		return NULL;
+	}
+
+	/*
+	 * Be careful that no _GFP_HIGHMEM is set,
+	 * or we can not use dma_to_virt
+	 * cannot use GFP_KERNEL in spin lock
+	 */
+	trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
+	if (!trb_hw) {
+		dev_err(u3d->dev,
+			"%s, dma_pool_alloc fail\n", __func__);
+		return NULL;
+	}
+	trb->trb_dma = *dma;
+	trb->trb_hw = trb_hw;
+
+	/* initialize buffer page pointers */
+	temp = (u32)(req->req.dma + req->req.actual);
+
+	trb_hw->buf_addr_lo = cpu_to_le32(temp);
+	trb_hw->buf_addr_hi = 0;
+	trb_hw->trb_len = cpu_to_le32(*length);
+	trb_hw->ctrl.own = 1;
+
+	if (req->ep->ep_num == 0)
+		trb_hw->ctrl.type = TYPE_DATA;
+	else
+		trb_hw->ctrl.type = TYPE_NORMAL;
+
+	req->req.actual += *length;
+
+	direction = ep_dir(req->ep);
+	if (direction == EP_DIR_IN)
+		trb_hw->ctrl.dir = 1;
+	else
+		trb_hw->ctrl.dir = 0;
+
+	/* Enable interrupt for the last trb of a request */
+	if (!req->req.no_interrupt)
+		trb_hw->ctrl.ioc = 1;
+
+	trb_hw->ctrl.chain = 0;
+
+	wmb();
+	return trb;
+}
+
+static int mv_build_trb_chain(struct mv_req *req, unsigned *length,
+		struct trb *trb, int *is_last)
+{
+	u32 temp;
+	unsigned int direction;
+	struct mv_u3d *u3d;
+
+	/* how big will this transfer be? */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	u3d = req->ep->u3d;
+
+	trb->trb_dma = 0;
+
+	/* initialize buffer page pointers */
+	temp = (u32)(req->req.dma + req->req.actual);
+
+	trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
+	trb->trb_hw->buf_addr_hi = 0;
+	trb->trb_hw->trb_len = cpu_to_le32(*length);
+	trb->trb_hw->ctrl.own = 1;
+
+	if (req->ep->ep_num == 0)
+		trb->trb_hw->ctrl.type = TYPE_DATA;
+	else
+		trb->trb_hw->ctrl.type = TYPE_NORMAL;
+
+	req->req.actual += *length;
+
+	direction = ep_dir(req->ep);
+	if (direction == EP_DIR_IN)
+		trb->trb_hw->ctrl.dir = 1;
+	else
+		trb->trb_hw->ctrl.dir = 0;
+
+	/* zlp is needed if req->req.zero is set */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual)
+		*is_last = 1;
+	else
+		*is_last = 0;
+
+	/* Enable interrupt for the last trb of a request */
+	if (*is_last && !req->req.no_interrupt)
+		trb->trb_hw->ctrl.ioc = 1;
+
+	if (*is_last)
+		trb->trb_hw->ctrl.chain = 0;
+	else {
+		trb->trb_hw->ctrl.chain = 1;
+		dev_dbg(u3d->dev, "chain trb\n");
+	}
+
+	wmb();
+
+	return 0;
+}
+
+/* generate TRB linked list for a request
+ * usb controller only supports continous trb chain,
+ * that trb structure physical address should be continous.
+ */
+static int mv_req_to_trb(struct mv_req *req)
+{
+	unsigned count;
+	int is_last;
+	struct trb *trb;
+	struct trb_hw *trb_hw;
+	struct mv_u3d *u3d;
+	dma_addr_t dma;
+	unsigned length;
+	unsigned trb_num;
+
+	u3d = req->ep->u3d;
+
+	INIT_LIST_HEAD(&req->trb_list);
+
+	length = req->req.length - req->req.actual;
+	/* normally the request transfer length is less than 16KB.
+	 * we use buil_trb_one() to optimize it.
+	 */
+	if (length <= (unsigned)EP_MAX_LENGTH_TRANSFER) {
+		trb = mv_build_trb_one(req, &count, &dma);
+		list_add_tail(&trb->trb_list, &req->trb_list);
+		req->trb_head = trb;
+		req->trb_count = 1;
+		req->chain = 0;
+	} else {
+		trb_num = length / EP_MAX_LENGTH_TRANSFER;
+		if (length % EP_MAX_LENGTH_TRANSFER)
+			trb_num++;
+
+		trb = kzalloc(trb_num
+				* sizeof(struct trb), GFP_ATOMIC);
+		if (!trb) {
+			dev_err(u3d->dev,
+					"%s, trb alloc fail\n", __func__);
+			return -ENOMEM;
+		}
+
+		trb_hw = kzalloc(trb_num
+				* sizeof(struct trb_hw), GFP_ATOMIC);
+		if (!trb_hw) {
+			dev_err(u3d->dev,
+					"%s, trb_hw alloc fail\n", __func__);
+			return -ENOMEM;
+		}
+
+		do {
+			trb->trb_hw = trb_hw;
+			if (mv_build_trb_chain(req, &count, trb, &is_last)) {
+				dev_err(u3d->dev,
+					"%s, mv_build_trb_chain fail\n",
+					__func__);
+				return -EIO;
+			}
+
+			list_add_tail(&trb->trb_list, &req->trb_list);
+			req->trb_count++;
+			trb++;
+			trb_hw++;
+		} while (!is_last);
+
+		req->trb_head = list_entry(req->trb_list.next,
+					struct trb, trb_list);
+		req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
+					req->trb_head->trb_hw,
+					trb_num * sizeof(struct trb_hw),
+					DMA_BIDIRECTIONAL);
+
+		req->chain = 1;
+	}
+
+	return 0;
+}
+
+static int
+mv_start_queue(struct mv_ep *ep)
+{
+	struct mv_u3d *u3d = ep->u3d;
+	struct mv_req *req;
+	int ret;
+
+	if (!list_empty(&ep->req_list) && !ep->processing)
+		req = list_entry(ep->req_list.next, struct mv_req, list);
+	else
+		return 0;
+
+	ep->processing = 1;
+
+	/* set up dma mapping */
+	ret = usb_gadget_map_request(&u3d->gadget, &req->req, ep_dir(ep));
+	if (ret)
+		return ret;
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->trb_count = 0;
+
+	/* build trbs and push them to device queue */
+	if (!mv_req_to_trb(req)) {
+		ret = mv_queue_trb(ep, req);
+		if (ret) {
+			ep->processing = 0;
+			return ret;
+		}
+	} else {
+		ep->processing = 0;
+		dev_err(u3d->dev, "%s, mv_req_to_trb fail\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* irq handler advances the queue */
+	if (req)
+		list_add_tail(&req->queue, &ep->queue);
+
+	return 0;
+}
+
+static int mv_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct mv_u3d *u3d;
+	struct mv_ep *ep;
+	struct ep_context *ep_context;
+	u16 max = 0;
+	unsigned maxburst = 0;
+	u32 epxcr, direction;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	u3d = ep->u3d;
+
+	if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	direction = ep_dir(ep);
+	max = le16_to_cpu(desc->wMaxPacketSize);
+
+	if (!_ep->maxburst)
+		_ep->maxburst = 1;
+	maxburst = _ep->maxburst;
+
+	/* Get the endpoint context address */
+	ep_context = (struct ep_context *)ep->ep_context;
+
+	/* Set the max burst size */
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (maxburst > 16) {
+			dev_err(u3d->dev,
+				"max burst should not be greater "
+				"than 1 on bulk ep\n");
+			_ep->maxburst = maxburst = 1;
+		}
+		dev_dbg(u3d->dev,
+			"maxburst: %d on bulk %s\n", maxburst, ep->name);
+		break;
+	case USB_ENDPOINT_XFER_CONTROL:
+		if (maxburst != 1) {
+			dev_err(u3d->dev,
+				"max burst should be 1 on control ep\n");
+			_ep->maxburst = maxburst = 1;
+		}
+	case USB_ENDPOINT_XFER_INT:
+		if (maxburst != 1) {
+			dev_err(u3d->dev,
+				"max burst should be 1 on int ep "
+				"if transfer size is not 1024\n");
+			_ep->maxburst = maxburst = 1;
+		}
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (maxburst != 1) {
+			dev_err(u3d->dev,
+				"max burst should be 1 on isoc ep "
+				"if transfer size is not 1024\n");
+			_ep->maxburst = maxburst = 1;
+		}
+		break;
+	default:
+		goto en_done;
+	}
+
+	ep->ep.maxpacket = max;
+	ep->ep.desc = desc;
+	ep->enabled = 1;
+
+	/* Enable the endpoint for Rx or Tx and set the endpoint type */
+	if (direction == EP_DIR_OUT) {
+		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		epxcr |= EPXCR_EP_INIT;
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		udelay(5);
+		epxcr &= ~EPXCR_EP_INIT;
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+
+		epxcr = ((max << EPXCR_MAX_PACKET_SIZE_SHIFT)
+		      | ((maxburst - 1) << EPXCR_MAX_BURST_SIZE_SHIFT)
+		      | (1 << EPXCR_EP_ENABLE_SHIFT)
+		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+	} else {
+		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		epxcr |= EPXCR_EP_INIT;
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		udelay(5);
+		epxcr &= ~EPXCR_EP_INIT;
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+
+		epxcr = ((max << EPXCR_MAX_PACKET_SIZE_SHIFT)
+		      | ((maxburst - 1) << EPXCR_MAX_BURST_SIZE_SHIFT)
+		      | (1 << EPXCR_EP_ENABLE_SHIFT)
+		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+	}
+
+	return 0;
+en_done:
+	return -EINVAL;
+}
+
+static int  mv_ep_disable(struct usb_ep *_ep)
+{
+	struct mv_u3d *u3d;
+	struct mv_ep *ep;
+	struct ep_context *ep_context;
+	u32 epxcr, direction;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	if ((!_ep) || !ep->ep.desc)
+		return -EINVAL;
+
+	u3d = ep->u3d;
+
+	/* Get the endpoint context address */
+	ep_context = ep->ep_context;
+
+	direction = ep_dir(ep);
+
+	/* nuke all pending requests (does flush) */
+	mv_nuke(ep, -ESHUTDOWN);
+
+	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
+	if (direction == EP_DIR_OUT) {
+		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+		epxcr &= ~((1 << EPXCR_EP_ENABLE_SHIFT)
+		      | USB_ENDPOINT_XFERTYPE_MASK);
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+	} else {
+		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+		epxcr &= ~((1 << EPXCR_EP_ENABLE_SHIFT)
+		      | USB_ENDPOINT_XFERTYPE_MASK);
+		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+	}
+
+	ep->enabled = 0;
+
+	ep->ep.desc = NULL;
+	return 0;
+}
+
+static struct usb_request *
+mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct mv_req *req = NULL;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_req *req = container_of(_req, struct mv_req, req);
+
+	kfree(req);
+}
+
+static void mv_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct mv_u3d *u3d;
+	u32 direction;
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	unsigned int loops;
+	u32 tmp;
+
+	/* if endpoint is not enabled, cannot flush endpoint */
+	if (!ep->enabled)
+		return;
+
+	u3d = ep->u3d;
+	direction = ep_dir(ep);
+
+	/* ep0 need clear bit after flushing fifo. */
+	if (!ep->ep_num) {
+		if (direction == EP_DIR_OUT) {
+			tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
+			tmp |= EPXCR_EP_FLUSH;
+			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+			udelay(10);
+			tmp &= ~EPXCR_EP_FLUSH;
+			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+		} else {
+			tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
+			tmp |= EPXCR_EP_FLUSH;
+			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+			udelay(10);
+			tmp &= ~EPXCR_EP_FLUSH;
+			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+		}
+		return;
+	}
+
+	if (direction == EP_DIR_OUT) {
+		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		tmp |= EPXCR_EP_FLUSH;
+		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+
+		/* Wait until flushing completed */
+		loops = LOOPS(FLUSH_TIMEOUT);
+		while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
+			EPXCR_EP_FLUSH) {
+			/*
+			 * EP_FLUSH bit should be cleared to indicate this
+			 * operation is complete
+			 */
+			if (loops == 0) {
+				dev_dbg(u3d->dev,
+				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
+				    direction ? "in" : "out");
+				return;
+			}
+			loops--;
+			udelay(LOOPS_USEC);
+		}
+	} else {	/* EP_DIR_IN */
+		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		tmp |= EPXCR_EP_FLUSH;
+		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+
+		/* Wait until flushing completed */
+		loops = LOOPS(FLUSH_TIMEOUT);
+		while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
+			EPXCR_EP_FLUSH) {
+			/*
+			* EP_FLUSH bit should be cleared to indicate this
+			* operation is complete
+			*/
+			if (loops == 0) {
+				dev_dbg(u3d->dev,
+				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
+				    direction ? "in" : "out");
+				return;
+			}
+			loops--;
+			udelay(LOOPS_USEC);
+		}
+	}
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req = container_of(_req, struct mv_req, req);
+	struct mv_u3d *u3d = ep->u3d;
+	unsigned long flags;
+	int is_first_req = 0;
+
+	if (!ep->ep_num && u3d->ep0_state == STATUS_STAGE && !_req->length) {
+		dev_dbg(u3d->dev, "ep0 status stage\n");
+		u3d->ep0_state = WAIT_FOR_SETUP;
+		return 0;
+	}
+
+	dev_dbg(u3d->dev, "%s: %s, req: 0x%x\n",
+			__func__, _ep->name, (u32)req);
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		dev_err(u3d->dev,
+			"%s, bad params, _req: 0x%x,"
+			"req->req.complete: 0x%x, req->req.buf: 0x%x,"
+			"list_empty: 0x%x\n",
+			__func__, (u32)_req,
+			(u32)req->req.complete, (u32)req->req.buf,
+			(u32)list_empty(&req->queue));
+		return -EINVAL;
+	}
+	if (unlikely(!_ep || !ep->ep.desc)) {
+		dev_err(u3d->dev, "%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+	}
+
+	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
+		dev_err(u3d->dev,
+			"%s, bad params of driver/speed\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	req->ep = ep;
+
+	/* Software list handles usb request. */
+	spin_lock_irqsave(&ep->req_lock, flags);
+	is_first_req = list_empty(&ep->req_list);
+	list_add_tail(&req->list, &ep->req_list);
+	spin_unlock_irqrestore(&ep->req_lock, flags);
+	if (!is_first_req) {
+		dev_dbg(u3d->dev, "list is not empty\n");
+		return 0;
+	}
+
+	dev_dbg(u3d->dev, "call mv_start_queue from usb_ep_queue\n");
+	spin_lock_irqsave(&u3d->lock, flags);
+	mv_start_queue(ep);
+	spin_unlock_irqrestore(&u3d->lock, flags);
+	return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req;
+	struct mv_u3d *u3d = ep->u3d;
+	struct ep_context *ep_context;
+	struct mv_req *next_req;
+
+	unsigned long flags;
+	int ret = 0;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->u3d->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* The request is in progress, or completed but not dequeued */
+	if (ep->queue.next == &req->queue) {
+		_req->status = -ECONNRESET;
+		mv_ep_fifo_flush(_ep);
+
+		/* The request isn't the last request in this ep queue */
+		if (req->queue.next != &ep->queue) {
+			dev_dbg(u3d->dev,
+				"it is the last request in this ep queue\n");
+			ep_context = ep->ep_context;
+			next_req = list_entry(req->queue.next, struct mv_req,
+					queue);
+
+			/* Point first TRB of next request to the EP context. */
+			iowrite32((u32) next_req->trb_head,
+					&ep_context->trb_addr_lo);
+		} else {
+			struct ep_context *ep_context;
+			ep_context = ep->ep_context;
+			ep_context->trb_addr_lo = 0;
+			ep_context->trb_addr_hi = 0;
+		}
+
+	} else
+		WARN_ON(1);
+
+	mv_u3d_done(ep, req, -ECONNRESET);
+
+	/* remove the req from the ep req list */
+	if (!list_empty(&ep->req_list)) {
+		struct mv_req *curr_req;
+		curr_req = list_entry(ep->req_list.next, struct mv_req, list);
+		if (curr_req == req) {
+			list_del_init(&req->list);
+			ep->processing = 0;
+		}
+	}
+
+out:
+	spin_unlock_irqrestore(&ep->u3d->lock, flags);
+	return ret;
+}
+
+static void
+mv_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
+{
+	u32 tmp;
+	struct mv_ep *ep = u3d->eps;
+
+	dev_dbg(u3d->dev, "%s\n", __func__);
+	if (direction == EP_DIR_OUT) {
+		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		if (stall)
+			tmp |= EPXCR_EP_HALT;
+		else
+			tmp &= ~EPXCR_EP_HALT;
+		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+	} else {
+		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		if (stall)
+			tmp |= EPXCR_EP_HALT;
+		else
+			tmp &= ~EPXCR_EP_HALT;
+		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+	}
+}
+
+static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+	struct mv_ep *ep;
+	unsigned long flags = 0;
+	int status = 0;
+	struct mv_u3d *u3d;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	u3d = ep->u3d;
+	if (!_ep || !ep->ep.desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		status = -EOPNOTSUPP;
+		goto out;
+	}
+
+	/*
+	 * Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue
+	 */
+	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	spin_lock_irqsave(&ep->u3d->lock, flags);
+	mv_ep_set_stall(u3d, ep->ep_num, ep_dir(ep), halt);
+	if (halt && wedge)
+		ep->wedge = 1;
+	else if (!halt)
+		ep->wedge = 0;
+	spin_unlock_irqrestore(&ep->u3d->lock, flags);
+
+	if (ep->ep_num == 0)
+		u3d->ep0_dir = EP_DIR_OUT;
+out:
+	return status;
+}
+
+static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+	return mv_ep_set_halt_wedge(_ep, halt, 0);
+}
+
+static int mv_ep_set_wedge(struct usb_ep *_ep)
+{
+	return mv_ep_set_halt_wedge(_ep, 1, 1);
+}
+
+static struct usb_ep_ops mv_ep_ops = {
+	.enable		= mv_ep_enable,
+	.disable	= mv_ep_disable,
+
+	.alloc_request	= mv_alloc_request,
+	.free_request	= mv_free_request,
+
+	.queue		= mv_ep_queue,
+	.dequeue	= mv_ep_dequeue,
+
+	.set_wedge	= mv_ep_set_wedge,
+	.set_halt	= mv_ep_set_halt,
+	.fifo_flush	= mv_ep_fifo_flush,
+};
+
+static void u3d_stop(struct mv_u3d *u3d)
+{
+	u32 tmp;
+
+	if (!u3d->clock_gating && u3d->vbus_valid_detect)
+		iowrite32(INTR_ENABLE_VBUS_VALID, &u3d->vuc_regs->intrenable);
+	else
+		iowrite32(0, &u3d->vuc_regs->intrenable);
+	iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
+	iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
+	iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
+	iowrite32(~0x0, &u3d->vuc_regs->linkchange);
+	iowrite32(0x1, &u3d->vuc_regs->setuplock);
+
+	/* Reset the RUN bit in the command register to stop USB */
+	tmp = ioread32(&u3d->op_regs->usbcmd);
+	tmp &= ~CMD_RUN_STOP;
+	iowrite32(tmp, &u3d->op_regs->usbcmd);
+	dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
+		ioread32(&u3d->op_regs->usbcmd));
+}
+
+static void u3d_start(struct mv_u3d *u3d)
+{
+	u32 usbintr;
+	u32 temp;
+
+	/* enable link LTSSM state machine */
+	temp = ioread32(&u3d->vuc_regs->ltssm);
+	temp |= LTSSM_PHY_INIT_DONE;
+	iowrite32(temp, &u3d->vuc_regs->ltssm);
+
+	/* Enable interrupts */
+	usbintr = INTR_ENABLE_LINK_CHG | INTR_ENABLE_TXDESC_ERR |
+		INTR_ENABLE_RXDESC_ERR | INTR_ENABLE_TX_COMPLETE |
+		INTR_ENABLE_RX_COMPLETE | INTR_ENABLE_SETUP |
+		(u3d->vbus_valid_detect ? INTR_ENABLE_VBUS_VALID : 0);
+	iowrite32(usbintr, &u3d->vuc_regs->intrenable);
+
+	/* Enable ctrl ep */
+	iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
+
+	/* Set the Run bit in the command register */
+	iowrite32(CMD_RUN_STOP, &u3d->op_regs->usbcmd);
+	dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
+		ioread32(&u3d->op_regs->usbcmd));
+}
+
+static int u3d_reset(struct mv_u3d *u3d)
+{
+	unsigned int loops;
+	u32 tmp;
+
+	/* Stop the controller */
+	tmp = ioread32(&u3d->op_regs->usbcmd);
+	tmp &= ~CMD_RUN_STOP;
+	iowrite32(tmp, &u3d->op_regs->usbcmd);
+
+	/* Reset the controller to get default values */
+	iowrite32(CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
+
+	/* wait for reset to complete */
+	loops = LOOPS(RESET_TIMEOUT);
+	while (ioread32(&u3d->op_regs->usbcmd) & CMD_CTRL_RESET) {
+		if (loops == 0) {
+			dev_err(u3d->dev,
+				"Wait for RESET completed TIMEOUT\n");
+			return -ETIMEDOUT;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+
+	/* Configure the Endpoint Context Address */
+	iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
+	iowrite32(0, &u3d->op_regs->dcbaaph);
+
+	return 0;
+}
+
+static int mv_u3d_enable(struct mv_u3d *u3d)
+{
+	struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
+	int retval;
+
+	if (u3d->active)
+		return 0;
+
+	if (u3d->clock_gating == 0) {
+		u3d->active = 1;
+		return 0;
+	}
+
+	dev_dbg(u3d->dev, "enable u3d\n");
+	clk_enable(u3d->clk);
+	if (pdata->phy_init) {
+		retval = pdata->phy_init(u3d->phy_regs);
+		if (retval) {
+			dev_err(u3d->dev,
+				"init phy error %d\n", retval);
+			clk_disable(u3d->clk);
+			return retval;
+		}
+	}
+	u3d->active = 1;
+
+	return 0;
+}
+
+static void mv_u3d_disable(struct mv_u3d *u3d)
+{
+	struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
+	if (u3d->clock_gating && u3d->active) {
+		dev_dbg(u3d->dev, "disable u3d\n");
+		if (pdata->phy_deinit)
+			pdata->phy_deinit(u3d->phy_regs);
+		clk_disable(u3d->clk);
+		u3d->active = 0;
+	}
+}
+
+static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct mv_u3d *u3d;
+	unsigned long flags;
+	int retval = 0;
+
+	u3d = container_of(gadget, struct mv_u3d, gadget);
+
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	u3d->vbus_active = (is_active != 0);
+	dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
+		__func__, u3d->softconnect, u3d->vbus_active);
+	/*
+	 * 1. external VBUS detect: we can disable/enable clock on demand.
+	 * 2. UDC VBUS detect: we have to enable clock all the time.
+	 * 3. No VBUS detect: we have to enable clock all the time.
+	 */
+	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
+		retval = mv_u3d_enable(u3d);
+		if (retval == 0) {
+			/*
+			 * after clock is disabled, we lost all the register
+			 *  context. We have to re-init registers
+			 */
+			u3d_reset(u3d);
+			mv_ep0_reset(u3d);
+			u3d_start(u3d);
+		}
+	} else if (u3d->driver && u3d->softconnect) {
+		if (!u3d->active)
+			goto out;
+
+		/* stop all the transfer in queue*/
+		mv_stop_activity(u3d, u3d->driver);
+		u3d_stop(u3d);
+		mv_u3d_disable(u3d);
+	}
+
+out:
+	spin_unlock_irqrestore(&u3d->lock, flags);
+	return retval;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
+
+	u3d->power = mA;
+
+	return 0;
+}
+
+static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
+	unsigned long flags;
+	int retval = 0;
+
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
+		__func__, u3d->softconnect, u3d->vbus_active);
+	u3d->softconnect = (is_on != 0);
+	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
+		retval = mv_u3d_enable(u3d);
+		if (retval == 0) {
+			/*
+			 * after clock is disabled, we lost all the register
+			 *  context. We have to re-init registers
+			 */
+			u3d_reset(u3d);
+			mv_ep0_reset(u3d);
+			u3d_start(u3d);
+		}
+	} else if (u3d->driver && u3d->vbus_active) {
+		/* stop all the transfer in queue*/
+		mv_stop_activity(u3d, u3d->driver);
+		u3d_stop(u3d);
+		mv_u3d_disable(u3d);
+	}
+
+	spin_unlock_irqrestore(&u3d->lock, flags);
+
+	return retval;
+}
+
+static int mv_u3d_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
+	unsigned long flags;
+
+	if (u3d->driver)
+		return -EBUSY;
+
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	/* hook up the driver ... */
+	driver->driver.bus = NULL;
+	u3d->driver = driver;
+	u3d->gadget.dev.driver = &driver->driver;
+
+	u3d->ep0_dir = USB_DIR_OUT;
+
+	spin_unlock_irqrestore(&u3d->lock, flags);
+
+	u3d->vbus_valid_detect = 1;
+
+	return 0;
+}
+
+static int mv_u3d_stop(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
+	unsigned long flags;
+
+	u3d->vbus_valid_detect = 0;
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	mv_u3d_enable(u3d);
+	u3d_stop(u3d);
+	/* stop all usb activities */
+	u3d->gadget.speed = USB_SPEED_UNKNOWN;
+	mv_stop_activity(u3d, driver);
+	mv_u3d_disable(u3d);
+
+	spin_unlock_irqrestore(&u3d->lock, flags);
+
+	u3d->gadget.dev.driver = NULL;
+	u3d->driver = NULL;
+
+	return 0;
+}
+
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops mv_ops = {
+	/* notify controller that VBUS is powered or not */
+	.vbus_session	= mv_u3d_vbus_session,
+
+	/* constrain controller's VBUS power usage */
+	.vbus_draw	= mv_u3d_vbus_draw,
+
+	.pullup		= mv_u3d_pullup,
+	.udc_start	= mv_u3d_start,
+	.udc_stop	= mv_u3d_stop,
+};
+
+static int mv_eps_init(struct mv_u3d *u3d)
+{
+	struct mv_ep	*ep;
+	char name[14];
+	int i;
+
+	/* initialize ep0, ep0 in/out use eps[1] */
+	ep = &u3d->eps[1];
+	ep->u3d = u3d;
+	strncpy(ep->name, "ep0", sizeof(ep->name));
+	ep->ep.name = ep->name;
+	ep->ep.ops = &mv_ep_ops;
+	ep->wedge = 0;
+	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+	ep->ep_num = 0;
+	ep->ep.desc = &mv_ep0_desc;
+	INIT_LIST_HEAD(&ep->queue);
+	INIT_LIST_HEAD(&ep->req_list);
+	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+	/* add ep0 ep_context */
+	ep->ep_context = &u3d->ep_context[1];
+
+	/* initialize other endpoints */
+	for (i = 2; i < u3d->max_eps * 2; i++) {
+		ep = &u3d->eps[i];
+		if (i & 1) {
+			snprintf(name, sizeof(name), "ep%din", i >> 1);
+			ep->direction = EP_DIR_IN;
+		} else {
+			snprintf(name, sizeof(name), "ep%dout", i >> 1);
+			ep->direction = EP_DIR_OUT;
+		}
+		ep->u3d = u3d;
+		strncpy(ep->name, name, sizeof(ep->name));
+		ep->ep.name = ep->name;
+
+		ep->ep.ops = &mv_ep_ops;
+		ep->ep.maxpacket = (unsigned short) ~0;
+		ep->ep_num = i / 2;
+
+		INIT_LIST_HEAD(&ep->queue);
+		list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
+
+		INIT_LIST_HEAD(&ep->req_list);
+		spin_lock_init(&ep->req_lock);
+		ep->ep_context = &u3d->ep_context[i];
+	}
+
+	return 0;
+}
+
+/* delete all endpoint requests, called with spinlock held */
+static void mv_nuke(struct mv_ep *ep, int status)
+{
+	/* endpoint fifo flush */
+	mv_ep_fifo_flush(&ep->ep);
+
+	while (!list_empty(&ep->queue)) {
+		struct mv_req *req = NULL;
+		req = list_entry(ep->queue.next, struct mv_req, queue);
+		mv_u3d_done(ep, req, status);
+	}
+}
+
+/* stop all USB activities */
+static
+void mv_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
+{
+	struct mv_ep	*ep;
+
+	mv_nuke(&u3d->eps[1], -ESHUTDOWN);
+
+	list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
+		mv_nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock(&u3d->lock);
+		driver->disconnect(&u3d->gadget);
+		spin_lock(&u3d->lock);
+	}
+}
+
+static void mv_irq_process_error(struct mv_u3d *u3d)
+{
+	/* Increment the error count */
+	u3d->errors++;
+	dev_err(u3d->dev, "%s\n", __func__);
+}
+
+static void mv_irq_process_link_change(struct mv_u3d *u3d)
+{
+	u32 linkchange;
+
+	linkchange = ioread32(&u3d->vuc_regs->linkchange);
+	iowrite32(linkchange, &u3d->vuc_regs->linkchange);
+
+	dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
+
+	if (linkchange & LINK_CHANGE_LINK_UP) {
+		dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
+			ioread32(&u3d->vuc_regs->ltssmstate));
+
+		u3d->usb_state = USB_STATE_ATTACHED;
+		u3d->ep0_dir = EP_DIR_OUT;
+		u3d->ep0_state = WAIT_FOR_SETUP;
+
+		/* set speed */
+		u3d->gadget.speed = USB_SPEED_SUPER;
+	}
+
+	if (linkchange & LINK_CHANGE_SUSPEND)
+		dev_dbg(u3d->dev, "link suspend\n");
+
+	if (linkchange & LINK_CHANGE_RESUME)
+		dev_dbg(u3d->dev, "link resume\n");
+
+	if (linkchange & LINK_CHANGE_WRESET)
+		dev_dbg(u3d->dev, "warm reset\n");
+
+	if (linkchange & LINK_CHANGE_HRESET)
+		dev_dbg(u3d->dev, "hot reset\n");
+
+	if (linkchange & LINK_CHANGE_INACT)
+		dev_dbg(u3d->dev, "inactive\n");
+
+	if (linkchange & LINK_CHANGE_DISABLE_AFTER_U0)
+		dev_dbg(u3d->dev, "ss.disabled\n");
+
+	if (linkchange & LINK_CHANGE_VBUS_INVALID) {
+		dev_dbg(u3d->dev, "vbus invalid\n");
+		u3d->vbus_valid_detect = 1;
+		/* if external vbus detect is not supported,
+		 * we handle it here.
+		 */
+		if (!u3d->vbus) {
+			spin_unlock(&u3d->lock);
+			mv_u3d_vbus_session(&u3d->gadget, 0);
+			spin_lock(&u3d->lock);
+		}
+	}
+}
+
+static void mv_ch9setaddress(struct mv_u3d *u3d, struct usb_ctrlrequest *setup)
+{
+	u32 tmp;
+	u3d->dev_addr = (u8)setup->wValue;
+
+	/* add a delay here, or hot reset will occur */
+	dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
+
+	if (u3d->dev_addr > 127) {
+		dev_err(u3d->dev,
+			"%s, u3d address is wrong (out of range)\n", __func__);
+		u3d->dev_addr = 0;
+		return;
+	}
+
+	/* update usb state */
+	u3d->usb_state = USB_STATE_ADDRESS;
+
+	/* set the new address */
+	tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
+	tmp &= ~0x7F;
+	tmp |= (u32)u3d->dev_addr;
+	iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
+}
+
+static void mv_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
+	struct usb_ctrlrequest *setup)
+{
+	bool delegate = false;
+
+	mv_nuke(&u3d->eps[ep_num * 2 + EP_DIR_IN], -ESHUTDOWN);
+
+	dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+			setup->bRequestType, setup->bRequest,
+			setup->wValue, setup->wIndex, setup->wLength);
+
+	/* We process some stardard setup requests here */
+	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (setup->bRequest) {
+		case USB_REQ_GET_STATUS:
+			delegate = true;
+			break;
+
+		case USB_REQ_SET_ADDRESS:
+			mv_ch9setaddress(u3d, setup);
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+			delegate = true;
+			break;
+
+		case USB_REQ_SET_FEATURE:
+			delegate = true;
+			break;
+
+		default:
+			delegate = true;
+		}
+	} else
+		delegate = true;
+
+	/* delegate USB standard requests to the gadget driver */
+	if (delegate == true) {
+		/* USB requests handled by gadget */
+		if (setup->wLength) {
+			/* DATA phase from gadget, STATUS phase from u3d */
+			u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+					? EP_DIR_IN : EP_DIR_OUT;
+			spin_unlock(&u3d->lock);
+			if (u3d->driver->setup(&u3d->gadget,
+				&u3d->local_setup_buff) < 0) {
+				dev_err(u3d->dev, "setup error!\n");
+				mv_ep0_stall(u3d);
+			}
+			spin_lock(&u3d->lock);
+		} else {
+			/* no DATA phase, STATUS phase from gadget */
+			u3d->ep0_dir = EP_DIR_IN;
+			u3d->ep0_state = STATUS_STAGE;
+			spin_unlock(&u3d->lock);
+			if (u3d->driver->setup(&u3d->gadget,
+				&u3d->local_setup_buff) < 0)
+				mv_ep0_stall(u3d);
+			spin_lock(&u3d->lock);
+		}
+	}
+}
+
+static void mv_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
+{
+	struct ep_context *epcontext;
+
+	epcontext = &u3d->ep_context[ep_num * 2 + EP_DIR_IN];
+
+	/* Copy the setup packet to local buffer */
+	memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
+}
+
+static void mv_irq_process_setup(struct mv_u3d *u3d)
+{
+	u32 tmp, i;
+	/* Process all Setup packet received interrupts */
+	tmp = ioread32(&u3d->vuc_regs->setuplock);
+	if (tmp) {
+		for (i = 0; i < u3d->max_eps; i++) {
+			if (tmp & (1 << i)) {
+				mv_get_setup_data(u3d, i,
+					(u8 *)(&u3d->local_setup_buff));
+				mv_handle_setup_packet(u3d, i,
+					&u3d->local_setup_buff);
+			}
+		}
+	}
+
+	iowrite32(tmp, &u3d->vuc_regs->setuplock);
+}
+
+static void mv_irq_process_tr_complete(struct mv_u3d *u3d)
+{
+	u32 tmp, bit_pos;
+	int i, ep_num = 0, direction = 0;
+	struct mv_ep	*curr_ep;
+	struct mv_req *curr_req, *temp_req;
+	int status;
+
+	tmp = ioread32(&u3d->vuc_regs->endcomplete);
+
+	dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
+	if (!tmp)
+		return;
+	iowrite32(tmp, &u3d->vuc_regs->endcomplete);
+
+	for (i = 0; i < u3d->max_eps * 2; i++) {
+		ep_num = i >> 1;
+		direction = i % 2;
+
+		bit_pos = 1 << (ep_num + 16 * direction);
+
+		if (!(bit_pos & tmp))
+			continue;
+
+		if (i == 0)
+			curr_ep = &u3d->eps[1];
+		else
+			curr_ep = &u3d->eps[i];
+
+		/* remove req out of ep request list after completion */
+		dev_dbg(u3d->dev, "tr comp: check req_list\n");
+		spin_lock(&curr_ep->req_lock);
+		if (!list_empty(&curr_ep->req_list)) {
+			struct mv_req *req;
+			req = list_entry(curr_ep->req_list.next,
+						struct mv_req, list);
+			list_del_init(&req->list);
+			curr_ep->processing = 0;
+		}
+		spin_unlock(&curr_ep->req_lock);
+
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req,
+			&curr_ep->queue, queue) {
+			status = mv_process_ep_req(u3d, i, curr_req);
+			if (status)
+				break;
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			/* ep0 request completion */
+			if (ep_num == 0) {
+				mv_u3d_done(curr_ep, curr_req, 0);
+				break;
+			} else {
+				mv_u3d_done(curr_ep, curr_req, status);
+			}
+		}
+
+		dev_dbg(u3d->dev, "call mv_start_queue from ep complete\n");
+		mv_start_queue(curr_ep);
+	}
+}
+
+static irqreturn_t mv_u3d_irq(int irq, void *dev)
+{
+	struct mv_u3d *u3d = (struct mv_u3d *)dev;
+	u32 status, intr;
+	u32 bridgesetting;
+	u32 trbunderrun;
+
+	spin_lock(&u3d->lock);
+
+	status = ioread32(&u3d->vuc_regs->intrcause);
+	intr = ioread32(&u3d->vuc_regs->intrenable);
+	status &= intr;
+
+	if (status == 0) {
+		spin_unlock(&u3d->lock);
+		dev_err(u3d->dev, "irq error!\n");
+		return IRQ_NONE;
+	}
+
+	if (status & USBINT_VBUS_VALID) {
+		bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
+		if (bridgesetting & BRIDGE_SETTING_VBUS_VALID) {
+			/* write vbus valid bit of bridge setting to clear */
+			bridgesetting = BRIDGE_SETTING_VBUS_VALID;
+			iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
+			dev_dbg(u3d->dev, "vbus valid\n");
+
+			u3d->vbus_valid_detect = 0;
+			/* if external vbus detect is not supported,
+			 * we handle it here.
+			 */
+			if (!u3d->vbus) {
+				spin_unlock(&u3d->lock);
+				mv_u3d_vbus_session(&u3d->gadget, 1);
+				spin_lock(&u3d->lock);
+			}
+		} else
+			dev_err(u3d->dev, "vbus bit is not set\n");
+	}
+
+	/* RX data is already in the 16KB FIFO.*/
+	if (status & USBINT_UNDER_RUN) {
+		trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
+		dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
+		iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
+		mv_irq_process_error(u3d);
+	}
+
+	if (status & (USBINT_RXDESC_ERR | USBINT_TXDESC_ERR)) {
+		/* write one to clear */
+		iowrite32(status & (USBINT_RXDESC_ERR | USBINT_TXDESC_ERR),
+			&u3d->vuc_regs->intrcause);
+		dev_err(u3d->dev, "desc err 0x%x\n", status);
+		mv_irq_process_error(u3d);
+	}
+
+	if (status & USBINT_LINK_CHG)
+		mv_irq_process_link_change(u3d);
+
+	if (status & USBINT_TX_COMPLETE)
+		mv_irq_process_tr_complete(u3d);
+
+	if (status & USBINT_RX_COMPLETE)
+		mv_irq_process_tr_complete(u3d);
+
+	if (status & USBINT_SETUP)
+		mv_irq_process_setup(u3d);
+
+	spin_unlock(&u3d->lock);
+	return IRQ_HANDLED;
+}
+
+/* release device structure */
+static void mv_u3d_gadget_release(struct device *dev)
+{
+	struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
+	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
+
+	complete(u3d->done);
+}
+
+static __devexit int mv_u3d_remove(struct platform_device *dev)
+{
+	struct mv_u3d *u3d = platform_get_drvdata(dev);
+
+	BUG_ON(u3d == NULL);
+
+	usb_del_gadget_udc(&u3d->gadget);
+
+	/* free memory allocated in probe */
+	if (u3d->trb_pool)
+		dma_pool_destroy(u3d->trb_pool);
+
+	if (u3d->ep_context)
+		dma_free_coherent(&dev->dev, u3d->ep_context_size,
+			u3d->ep_context, u3d->ep_context_dma);
+
+	kfree(u3d->eps);
+
+	if (u3d->irq)
+		free_irq(u3d->irq, &dev->dev);
+
+	mv_u3d_disable(u3d);
+
+	if (u3d->cap_regs)
+		iounmap(u3d->cap_regs);
+	u3d->cap_regs = NULL;
+
+	kfree(u3d->status_req);
+
+	clk_put(u3d->clk);
+
+	device_unregister(&u3d->gadget.dev);
+
+	platform_set_drvdata(dev, NULL);
+
+	/* free dev, wait for the release() finished */
+	wait_for_completion(u3d->done);
+	kfree(u3d);
+
+	return 0;
+}
+
+static int mv_u3d_probe(struct platform_device *dev)
+{
+	struct mv_u3d *u3d = NULL;
+	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
+	int retval = 0;
+	struct resource *r;
+	size_t size;
+
+	if (!dev->dev.platform_data) {
+		dev_err(&dev->dev, "missing platform_data\n");
+		retval = -ENODEV;
+		goto err_pdata;
+	}
+
+	u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
+	if (!u3d) {
+		dev_err(&dev->dev, "failed to allocate memory for u3d\n");
+		retval = -ENOMEM;
+		goto err_alloc_private;
+	}
+
+	spin_lock_init(&u3d->lock);
+
+	platform_set_drvdata(dev, u3d);
+
+	u3d->dev = &dev->dev;
+	u3d->vbus = pdata->vbus;
+
+	u3d->clk = clk_get(&dev->dev, pdata->clkname[0]);
+	if (IS_ERR(u3d->clk)) {
+		retval = PTR_ERR(u3d->clk);
+		goto err_get_clk;
+	}
+
+	r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
+	if (!r) {
+		dev_err(&dev->dev, "no I/O memory resource defined\n");
+		retval = -ENODEV;
+		goto err_get_cap_regs;
+	}
+
+	u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
+		ioremap(r->start, resource_size(r));
+	if (!u3d->cap_regs) {
+		dev_err(&dev->dev, "failed to map I/O memory\n");
+		retval = -EBUSY;
+		goto err_map_cap_regs;
+	} else {
+		dev_dbg(&dev->dev, "cap_regs address: 0x%x/0x%x\n",
+			(unsigned int)r->start, (unsigned int)u3d->cap_regs);
+	}
+
+	/* we will access controller register, so enable the u3d controller */
+	clk_enable(u3d->clk);
+
+	if (pdata->phy_init) {
+		retval = pdata->phy_init(u3d->phy_regs);
+		if (retval) {
+			dev_err(&dev->dev, "init phy error %d\n", retval);
+			goto err_u3d_enable;
+		}
+	}
+
+	u3d->op_regs = (struct mv_u3d_op_regs __iomem *)((u32)u3d->cap_regs
+		+ USB3_OP_REGS_OFFSET);
+
+	u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)((u32)u3d->cap_regs
+		+ ioread32(&u3d->cap_regs->vuoff));
+
+	u3d->max_eps = 16;
+
+	/*
+	 * some platform will use usb to download image, it may not disconnect
+	 * usb gadget before loading kernel. So first stop u3d here.
+	 */
+	u3d_stop(u3d);
+	iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
+
+	size = u3d->max_eps * sizeof(struct ep_context) * 2;
+	size = (size + EP_CONTEXT_ALIGNMENT - 1) & ~(EP_CONTEXT_ALIGNMENT - 1);
+	u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
+					&u3d->ep_context_dma, GFP_KERNEL);
+	if (!u3d->ep_context) {
+		dev_err(&dev->dev, "allocate ep context memory failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_ep_context;
+	}
+	u3d->ep_context_size = size;
+
+	/* create TRB dma_pool resource */
+	u3d->trb_pool = dma_pool_create("u3d_trb",
+			&dev->dev,
+			sizeof(struct trb_hw),
+			MV_U3D_TRB_ALIGNMENT,
+			MV_U3D_DMA_BOUNDARY);
+
+	if (!u3d->trb_pool) {
+		retval = -ENOMEM;
+		goto err_alloc_trb_pool;
+	}
+
+	size = u3d->max_eps * sizeof(struct mv_ep) * 2;
+	u3d->eps = kzalloc(size, GFP_KERNEL);
+	if (!u3d->eps) {
+		dev_err(&dev->dev, "allocate ep memory failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_eps;
+	}
+
+	/* initialize ep0 status request structure */
+	u3d->status_req = kzalloc(sizeof(struct mv_req) + 8, GFP_KERNEL);
+	if (!u3d->status_req) {
+		dev_err(&dev->dev, "allocate status_req memory failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_status_req;
+	}
+	INIT_LIST_HEAD(&u3d->status_req->queue);
+
+	/* allocate a small amount of memory to get valid address */
+	u3d->status_req->req.buf = (char *)u3d->status_req
+					+ sizeof(struct mv_req);
+	u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
+
+	u3d->resume_state = USB_STATE_NOTATTACHED;
+	u3d->usb_state = USB_STATE_POWERED;
+	u3d->ep0_dir = EP_DIR_OUT;
+	u3d->remote_wakeup = 0;
+
+	r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+	if (!r) {
+		dev_err(&dev->dev, "no IRQ resource defined\n");
+		retval = -ENODEV;
+		goto err_get_irq;
+	}
+	u3d->irq = r->start;
+	if (request_irq(u3d->irq, mv_u3d_irq,
+		IRQF_DISABLED | IRQF_SHARED, driver_name, u3d)) {
+		u3d->irq = 0;
+		dev_err(&dev->dev, "Request irq %d for u3d failed\n",
+			u3d->irq);
+		retval = -ENODEV;
+		goto err_request_irq;
+	}
+
+	/* initialize gadget structure */
+	u3d->gadget.ops = &mv_ops;	/* usb_gadget_ops */
+	u3d->gadget.ep0 = &u3d->eps[1].ep;	/* gadget ep0 */
+	INIT_LIST_HEAD(&u3d->gadget.ep_list);	/* ep_list */
+	u3d->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&u3d->gadget.dev, "gadget");
+	u3d->gadget.dev.parent = &dev->dev;
+	u3d->gadget.dev.dma_mask = dev->dev.dma_mask;
+	u3d->gadget.dev.release = mv_u3d_gadget_release;
+	u3d->gadget.name = driver_name;		/* gadget name */
+
+	retval = device_register(&u3d->gadget.dev);
+	if (retval)
+		goto err_register_gadget_device;
+
+	mv_eps_init(u3d);
+
+	/* external vbus detection */
+	if (u3d->vbus) {
+		u3d->clock_gating = 1;
+		dev_err(&dev->dev, "external vbus detection\n");
+	}
+
+	/*
+	 * For saving power disable clk. When clock is disabled,
+	 * phy is deinit too.
+	 * When clock is always on, It means that VBUS can not be detected,
+	 * so enable vbus_active to make controller run at all the time.
+	 */
+	if (u3d->clock_gating) {
+		if (pdata->phy_deinit)
+			pdata->phy_deinit(u3d->phy_regs);
+		clk_disable(u3d->clk);
+	} else
+		u3d->vbus_active = 1;
+
+	/* enable usb3 controller vbus detection */
+	u3d->vbus_valid_detect = 1;
+
+	retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
+	if (retval)
+		goto err_unregister;
+
+	dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
+		u3d->clock_gating ? "with" : "without");
+
+	return 0;
+
+err_unregister:
+	device_unregister(&u3d->gadget.dev);
+err_register_gadget_device:
+	free_irq(u3d->irq, &dev->dev);
+err_request_irq:
+err_get_irq:
+	kfree(u3d->status_req);
+err_alloc_status_req:
+	kfree(u3d->eps);
+err_alloc_eps:
+	dma_pool_destroy(u3d->trb_pool);
+err_alloc_trb_pool:
+	dma_free_coherent(&dev->dev, u3d->ep_context_size,
+		u3d->ep_context, u3d->ep_context_dma);
+err_alloc_ep_context:
+	if (pdata->phy_deinit)
+		pdata->phy_deinit(u3d->phy_regs);
+	clk_disable(u3d->clk);
+err_u3d_enable:
+	iounmap(u3d->cap_regs);
+err_map_cap_regs:
+err_get_cap_regs:
+err_get_clk:
+	clk_put(u3d->clk);
+	platform_set_drvdata(dev, NULL);
+	kfree(u3d);
+err_alloc_private:
+err_pdata:
+	return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_u3d_suspend(struct device *dev)
+{
+	struct mv_u3d *u3d = dev_get_drvdata(dev);
+
+	/*
+	 * only cable is unplugged, usb can suspend.
+	 * So do not care about clock_gating == 1, it is handled by
+	 * vbus session.
+	 */
+	if (!u3d->clock_gating) {
+		u3d_stop(u3d);
+
+		spin_lock_irq(&u3d->lock);
+		/* stop all usb activities */
+		mv_stop_activity(u3d, u3d->driver);
+		spin_unlock_irq(&u3d->lock);
+
+		mv_u3d_disable(u3d);
+	}
+
+	return 0;
+}
+
+static int mv_u3d_resume(struct device *dev)
+{
+	struct mv_u3d *u3d = dev_get_drvdata(dev);
+	int retval;
+
+	if (!u3d->clock_gating) {
+		retval = mv_u3d_enable(u3d);
+		if (retval)
+			return retval;
+
+		if (u3d->driver && u3d->softconnect) {
+			u3d_reset(u3d);
+			mv_ep0_reset(u3d);
+			u3d_start(u3d);
+		}
+	}
+
+	return 0;
+}
+
+SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
+#endif
+
+static void mv_u3d_shutdown(struct platform_device *dev)
+{
+	struct mv_u3d *u3d = dev_get_drvdata(&dev->dev);
+	u32 tmp;
+
+	tmp = ioread32(&u3d->op_regs->usbcmd);
+	tmp &= ~CMD_RUN_STOP;
+	iowrite32(tmp, &u3d->op_regs->usbcmd);
+}
+
+static struct platform_driver u3d_driver = {
+	.probe		= mv_u3d_probe,
+	.remove		= __exit_p(mv_u3d_remove),
+	.shutdown	= mv_u3d_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "mv-u3d",
+#ifdef CONFIG_PM
+		.pm	= &mv_u3d_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(u3d_driver);
+MODULE_ALIAS("platform:mv-u3d");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Yu Xu <yuxu@xxxxxxxxxxx>");
+MODULE_LICENSE("GPL");
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux