[PATCH v2] usb: gadget: mv: Add USB 3.0 device driver for Marvell PXA2128 chip.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It supports Marvell USB 3.0 device controller for PXA2128 chip.

Signed-off-by: Yu Xu <yuxu@xxxxxxxxxxx>
---
 drivers/usb/gadget/Kconfig       |    9 +
 drivers/usb/gadget/Makefile      |    1 +
 drivers/usb/gadget/mv_u3d.h      |  302 ++++++
 drivers/usb/gadget/mv_u3d_core.c | 2095 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 2407 insertions(+)
 create mode 100644 drivers/usb/gadget/mv_u3d.h
 create mode 100644 drivers/usb/gadget/mv_u3d_core.c

diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1f93861..4eb92d0 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -316,6 +316,15 @@ config USB_MV_UDC
 	  USB2.0 OTG controller, which can be configured as high speed or
 	  full speed USB peripheral.
 
+config USB_MV_U3D
+	tristate "MARVELL PXA2128 USB 3.0 controller"
+	depends on CPU_MMP3
+	select USB_GADGET_DUALSPEED
+	select USB_GADGET_SUPERSPEED
+	help
+	  MARVELL PXA2128 Processor series include a super speed USB3.0 device
+	  controller, which support super speed USB peripheral.
+
 #
 # Controllers available in both integrated and discrete versions
 #
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b7f6eef..3e7cdd7 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_USB_MV_UDC)	+= mv_udc.o
 mv_udc-y			:= mv_udc_core.o
 obj-$(CONFIG_USB_CI13XXX_MSM)	+= ci13xxx_msm.o
 obj-$(CONFIG_USB_FUSB300)	+= fusb300_udc.o
+obj-$(CONFIG_USB_MV_U3D)	+= mv_u3d_core.o
 
 #
 # USB gadget drivers
diff --git a/drivers/usb/gadget/mv_u3d.h b/drivers/usb/gadget/mv_u3d.h
new file mode 100644
index 0000000..243481e
--- /dev/null
+++ b/drivers/usb/gadget/mv_u3d.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_U3D_H
+#define __MV_U3D_H
+
+#define EP_CONTEXT_ALIGNMENT	32
+#define TRB_ALIGNMENT		16
+#define DMA_BOUNDARY		4096
+
+#define EP_DIR_IN		1
+#define EP_DIR_OUT		0
+
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define EP0_MAX_PKT_SIZE	512
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP		0
+#define DATA_STATE_XMIT		1
+#define DATA_STATE_NEED_ZLP	2
+#define WAIT_FOR_OUT_STATUS	3
+#define DATA_STATE_RECV		4
+#define STATUS_STAGE		5
+
+#define EP_MAX_LENGTH_TRANSFER	0x10000
+
+/* USB3 Interrupt Status */
+#define USBINT_SETUP		0x00000001
+#define USBINT_RX_COMPLETE	0x00000002
+#define USBINT_TX_COMPLETE	0x00000004
+#define USBINT_UNDER_RUN	0x00000008
+#define USBINT_RXDESC_ERR	0x00000010
+#define USBINT_TXDESC_ERR	0x00000020
+#define USBINT_RX_TRB_COMPLETE	0x00000040
+#define USBINT_TX_TRB_COMPLETE	0x00000080
+#define USBINT_VBUS_VALID	0x00010000
+#define USBINT_STORAGE_CMD_FULL	0x00020000
+#define USBINT_LINK_CHG		0x01000000
+
+/* USB3 Interrupt Enable */
+#define INTR_ENABLE_SETUP		0x00000001
+#define INTR_ENABLE_RX_COMPLETE		0x00000002
+#define INTR_ENABLE_TX_COMPLETE		0x00000004
+#define INTR_ENABLE_UNDER_RUN		0x00000008
+#define INTR_ENABLE_RXDESC_ERR		0x00000010
+#define INTR_ENABLE_TXDESC_ERR		0x00000020
+#define INTR_ENABLE_RX_TRB_COMPLETE	0x00000040
+#define INTR_ENABLE_TX_TRB_COMPLETE	0x00000080
+#define INTR_ENABLE_RX_BUFFER_ERR	0x00000100
+#define INTR_ENABLE_VBUS_VALID		0x00010000
+#define INTR_ENABLE_STORAGE_CMD_FULL	0x00020000
+#define INTR_ENABLE_LINK_CHG		0x01000000
+#define INTR_ENABLE_PRIME_STATUS	0x02000000
+
+/* USB3 Link Change */
+#define LINK_CHANGE_LINK_UP		0x00000001
+#define LINK_CHANGE_SUSPEND		0x00000002
+#define LINK_CHANGE_RESUME		0x00000004
+#define LINK_CHANGE_WRESET		0x00000008
+#define LINK_CHANGE_HRESET		0x00000010
+#define LINK_CHANGE_VBUS_INVALID	0x00000020
+#define LINK_CHANGE_INACT		0x00000040
+#define LINK_CHANGE_DISABLE_AFTER_U0	0x00000080
+#define LINK_CHANGE_U1			0x00000100
+#define LINK_CHANGE_U2			0x00000200
+#define LINK_CHANGE_U3			0x00000400
+
+/* bridge setting */
+#define BRIDGE_SETTING_VBUS_VALID	(1 << 16)
+
+/* Command Register Bit Masks */
+#define CMD_RUN_STOP		0x00000001
+#define CMD_CTRL_RESET		0x00000002
+
+
+#define EPXCR_EP_TYPE_CONTROL		0
+#define EPXCR_EP_TYPE_ISOC		1
+#define EPXCR_EP_TYPE_BULK		2
+#define EPXCR_EP_TYPE_INT		3
+#define EPXCR_EP_ENABLE_SHIFT		4
+#define EPXCR_MAX_BURST_SIZE_SHIFT	12
+#define EPXCR_MAX_PACKET_SIZE_SHIFT	16
+#define USB_BULK_BURST_OUT		6
+#define USB_BULK_BURST_IN		14
+
+#define EPXCR_EP_FLUSH		(1 << 7)
+#define EPXCR_EP_HALT		(1 << 1)
+#define EPXCR_EP_INIT		(1)
+
+/* TX/RX Status Register */
+#define XFERSTATUS_COMPLETE_SHIFT	24
+#define COMPLETE_INVALID	0
+#define COMPLETE_SUCCESS	1
+#define COMPLETE_BUFF_ERR	2
+#define COMPLETE_SHORT_PACKET	3
+#define COMPLETE_TRB_ERR	5
+#define XFERSTATUS_TRB_LENGTH_MASK	(0xFFFFFF)
+
+#define USB_LINK_BYPASS_VBUS	0x8
+
+#define LTSSM_PHY_INIT_DONE		0x80000000
+#define LTSSM_NEVER_GO_COMPLIANCE	0x40000000
+
+
+#define USB3_OP_REGS_OFFSET	0x100
+#define USB3_PHY_OFFSET		0xB800
+
+#define DCS_ENABLE	0x1
+
+struct mv_u3d_cap_regs {
+	u32	rsvd[5];
+	u32	dboff;
+	u32	rtsoff;
+	u32	vuoff;
+};
+
+struct mv_u3d_op_regs {
+	u32	usbcmd;		/* Command register */
+	u32	rsvd1[11];
+	u32	dcbaapl;
+	u32	dcbaaph;
+	u32	rsvd2[243];
+	u32	portsc;
+	u32	portlinkinfo;
+	u32	rsvd3[9917];
+	u32	doorbell;
+};
+
+struct epxcr {
+	u32	epxoutcr0;
+	u32	epxoutcr1;
+	u32	epxincr0;
+	u32	epxincr1;
+};
+
+struct xferstatus {
+	u32	curdeqlo;
+	u32	curdeqhi;
+	u32	statuslo;
+	u32	statushi;
+};
+
+struct mv_u3d_vuc_regs {	/* vendor unique control registers */
+	u32	ctrlepenable;
+	u32	setuplock;
+	u32	endcomplete;
+	u32	intrcause;
+	u32	intrenable;
+	u32	trbcomplete;
+	u32	linkchange;
+	u32	rsvd1[5];
+	u32	trbunderrun;
+	u32	rsvd2[43];
+	u32	bridgesetting;
+	u32	rsvd3[7];
+	struct xferstatus	txst[16];
+	struct xferstatus	rxst[16];
+	u32	ltssm;
+	u32	pipe;
+	u32	linkcr0;
+	u32	linkcr1;
+	u32	rsvd6[60];
+	u32	mib0;
+	u32	usblink;
+	u32	ltssmstate;
+	u32	linkerrorcause;
+	u32	rsvd7[60];
+	u32	devaddrtiebrkr;
+	u32	itpinfo0;
+	u32	itpinfo1;
+	u32	rsvd8[61];
+	struct epxcr	epcr[16];
+	u32	rsvd9[64];
+	u32	phyaddr;
+	u32	phydata;
+};
+
+struct ep_context {
+	u32	rsvd0;
+	u32	rsvd1;
+	u32	trb_addr_lo;
+	u32	trb_addr_hi;
+	u32	rsvd2;
+	u32	rsvd3;
+	struct usb_ctrlrequest setup_buffer;
+};
+
+struct trb_ctrl {
+	u32	own:1;
+	u32	toggle:1;
+	u32	rsvd1:2;
+	u32	chain:1;
+	u32	ioc:1;
+	u32	rsvd2:4;
+	u32	type:6;
+#define TYPE_NORMAL	1
+#define TYPE_DATA	3
+#define TYPE_LINK	6
+	u32	dir:1;
+	u32	rsvd3:15;
+};
+
+struct trb_hw {
+	u32	buf_addr_lo;
+	u32	buf_addr_hi;
+	u32	trb_len;
+	struct trb_ctrl	ctrl;
+};
+
+struct trb {
+	struct trb_hw *trb_hw;
+	dma_addr_t trb_dma;              /* dma address for this trb */
+	struct trb *next_trb_virt;
+};
+
+struct mv_u3d {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	spinlock_t			lock;
+	struct completion		*done;
+	struct platform_device		*dev;
+	int				irq;
+
+	struct mv_u3d_cap_regs __iomem	*cap_regs;
+	struct mv_u3d_op_regs __iomem	*op_regs;
+	struct mv_u3d_vuc_regs __iomem	*vuc_regs;
+	unsigned int			phy_regs;
+	unsigned int			max_eps;
+	struct ep_context		*ep_context;
+	size_t				ep_context_size;
+	dma_addr_t			ep_context_dma;
+
+	struct dma_pool			*trb_pool;
+	struct mv_ep			*eps;
+
+	struct trb			*trb_head;
+	struct trb			*trb_tail;
+
+	struct mv_req			*status_req;
+	struct usb_ctrlrequest		local_setup_buff;
+
+	unsigned int		resume_state;	/* USB state to resume */
+	unsigned int		usb_state;	/* USB current state */
+	unsigned int		ep0_state;	/* Endpoint zero state */
+	unsigned int		ep0_dir;
+
+	unsigned int		dev_addr;
+
+	int			errors;
+	unsigned		softconnect:1,
+				vbus_active:1,
+				remote_wakeup:1,
+				softconnected:1,
+				clock_gating:1,
+				active:1,
+				/* usb controller vbus detection */
+				vbus_valid_detect:1;
+
+	unsigned int		power;
+
+	struct mv_usb_platform_data	*pdata;
+
+	struct clk	*clk;
+};
+
+/* endpoint data structure */
+struct mv_ep {
+	struct usb_ep		ep;
+	struct mv_u3d		*u3d;
+	struct list_head	queue;
+	struct list_head	req_list;	/* list of ep's request */
+	struct ep_context	*ep_context;
+	const struct usb_endpoint_descriptor	*desc;
+	u32			direction;
+	char			name[14];
+	u32			processing;
+	spinlock_t		req_lock;
+	unsigned		wedge:1,
+				enabled:1,
+				ep_type:2,
+				ep_num:8;
+};
+
+/* request data structure */
+struct mv_req {
+	struct usb_request	req;
+	struct trb		*trb, *head, *tail;
+	struct mv_ep		*ep;
+	struct list_head	queue;
+	struct list_head	list;	/* add to list of ep's request */
+	unsigned		trb_count;
+	unsigned		chain;
+	unsigned		mapped:1;
+};
+
+#endif
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
new file mode 100644
index 0000000..2261a18
--- /dev/null
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -0,0 +1,2095 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/mv_usb.h>
+#include <linux/clk.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "mv_u3d.h"
+
+#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
+#define DRIVER_VERSION		"14 May 2012"
+
+#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
+				((ep)->u3d->ep0_dir) : ((ep)->direction))
+
+#define RESET_TIMEOUT		10000
+#define FLUSH_TIMEOUT		100000
+#define OWN_TIMEOUT		10000
+#define LOOPS_USEC_SHIFT	4
+#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
+#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
+
+static const char driver_name[] = "mv_u3d";
+static const char driver_desc[] = DRIVER_DESC;
+
+static void nuke(struct mv_ep *ep, int status);
+static void stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver);
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor mv_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
+};
+
+
+static void ep0_reset(struct mv_u3d *u3d)
+{
+	struct mv_ep *ep;
+	u32 epxcr;
+	int i = 0;
+
+	/* ep0 in and out */
+	for (i = 0; i < 2; i++) {
+		ep = &u3d->eps[i];
+		ep->u3d = u3d;
+
+		/* ep0 ep context, ep0 in and out share the same ep context */
+		ep->ep_context = &u3d->ep_context[1];
+
+		/* reset ep state machine */
+		if (i) {	/* RX */
+			epxcr = readl(&u3d->vuc_regs->epcr[0].epxoutcr0);
+			epxcr |= EPXCR_EP_INIT;
+			writel(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
+			udelay(5);
+			epxcr &= ~EPXCR_EP_INIT;
+			writel(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
+
+			epxcr = ((EP0_MAX_PKT_SIZE
+					<< EPXCR_MAX_PACKET_SIZE_SHIFT)
+			      | (1 << EPXCR_MAX_BURST_SIZE_SHIFT)
+			      | (1 << EPXCR_EP_ENABLE_SHIFT)
+			      | EPXCR_EP_TYPE_CONTROL);
+			writel(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
+		} else {	/* TX */
+			epxcr = readl(&u3d->vuc_regs->epcr[0].epxincr0);
+			epxcr |= EPXCR_EP_INIT;
+			writel(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
+			udelay(5);
+			epxcr &= ~EPXCR_EP_INIT;
+			writel(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
+
+			epxcr = ((EP0_MAX_PKT_SIZE
+					<< EPXCR_MAX_PACKET_SIZE_SHIFT)
+			      | (1 << EPXCR_MAX_BURST_SIZE_SHIFT)
+			      | (1 << EPXCR_EP_ENABLE_SHIFT)
+			      | EPXCR_EP_TYPE_CONTROL);
+			writel(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
+		}
+	}
+}
+
+static void ep0_stall(struct mv_u3d *u3d)
+{
+	u32 tmp;
+	dev_dbg(&u3d->dev->dev, "%s\n", __func__);
+
+	/* set TX and RX to stall */
+	tmp = readl(&u3d->vuc_regs->epcr[0].epxoutcr0);
+	tmp |= EPXCR_EP_HALT;
+	writel(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+
+	tmp = readl(&u3d->vuc_regs->epcr[0].epxincr0);
+	tmp |= EPXCR_EP_HALT;
+	writel(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+
+	/* update ep0 state */
+	u3d->ep0_state = WAIT_FOR_SETUP;
+	u3d->ep0_dir = EP_DIR_OUT;
+}
+
+static int process_ep_req(struct mv_u3d *u3d, int index,
+	struct mv_req *curr_req)
+{
+	struct trb	*curr_trb;
+	dma_addr_t cur_deq_lo;
+	struct ep_context	*curr_ep_context;
+	int trb_complete, actual, remaining_length;
+	int i, direction, ep_num;
+	int retval = 0;
+	u32 tmp, status, length;
+
+	curr_ep_context = &u3d->ep_context[index];
+	direction = index % 2;
+	ep_num = index / 2;
+
+	curr_trb = curr_req->head;
+	trb_complete = 0;
+	actual = curr_req->req.length;
+
+	for (i = 0; i < curr_req->trb_count; i++) {
+		if (!curr_trb->trb_hw->ctrl.own) {
+			dev_err(&u3d->dev->dev, "%s, TRB own error!\n",
+				u3d->eps[index].name);
+			return 1;
+		}
+
+		curr_trb->trb_hw->ctrl.own = 0;
+		if (direction == EP_DIR_OUT) {
+			tmp = readl(&u3d->vuc_regs->rxst[ep_num].statuslo);
+			cur_deq_lo =
+				readl(&u3d->vuc_regs->rxst[ep_num].curdeqlo);
+		} else {
+			tmp = readl(&u3d->vuc_regs->txst[ep_num].statuslo);
+			cur_deq_lo =
+				readl(&u3d->vuc_regs->txst[ep_num].curdeqlo);
+		}
+
+		status = tmp >> XFERSTATUS_COMPLETE_SHIFT;
+		length = tmp & XFERSTATUS_TRB_LENGTH_MASK;
+
+		if (status == COMPLETE_SUCCESS ||
+			status == COMPLETE_SHORT_PACKET) {
+			remaining_length += length;
+			actual -= remaining_length;
+		} else {
+			dev_err(&u3d->dev->dev,
+				"complete_tr error: ep=%d %s: error = 0x%x\n",
+				index >> 1, direction ? "SEND" : "RECV",
+				status);
+			retval = -EPROTO;
+		}
+		if (i != curr_req->trb_count - 1)
+			curr_trb = (struct trb *)curr_trb->next_trb_virt;
+	}
+	if (retval)
+		return retval;
+
+	curr_req->req.actual = actual;
+	return 0;
+}
+
+/*
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ */
+static void done(struct mv_ep *ep, struct mv_req *req, int status)
+{
+	struct mv_u3d *u3d = NULL;
+
+	u3d = (struct mv_u3d *)ep->u3d;
+
+	dev_dbg(&u3d->dev->dev, "done: remove req->queue\n");
+	/* Removed the req from ep queue */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* Free trb for the request */
+	if (!req->chain)
+		dma_pool_free(u3d->trb_pool,
+			req->head->trb_hw, req->head->trb_dma);
+	else {
+		dma_unmap_single(ep->u3d->gadget.dev.parent,
+			(dma_addr_t)req->head->trb_dma,
+			req->trb_count * sizeof(struct trb_hw),
+			DMA_BIDIRECTIONAL);
+		kfree(req->head->trb_hw);
+	}
+	kfree(req->head);
+
+	if (req->mapped) {
+		dma_unmap_single(ep->u3d->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			((ep_dir(ep) == EP_DIR_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE));
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(ep->u3d->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			((ep_dir(ep) == EP_DIR_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+	if (status && (status != -ESHUTDOWN)) {
+		dev_dbg(&u3d->dev->dev, "complete %s req %p stat %d len %u/%u",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+	}
+
+	spin_unlock(&ep->u3d->lock);
+	/*
+	 * complete() is from gadget layer,
+	 * eg fsg->bulk_in_complete()
+	 */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&ep->u3d->lock);
+}
+
+static int queue_trb(struct mv_ep *ep, struct mv_req *req)
+{
+	u32 tmp, direction;
+	struct mv_u3d *u3d;
+	struct ep_context *ep_context;
+	int retval = 0;
+
+	u3d = ep->u3d;
+	direction = ep_dir(ep);
+
+	/* ep0 in and out share the same ep context slot 1*/
+	if (ep->ep_num == 0)
+		ep_context = &(u3d->ep_context[1]);
+	else
+		ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
+
+	/* check if the pipe is empty or not */
+	if (!list_empty(&ep->queue)) {
+		dev_err(&u3d->dev->dev, "add trb to non-empty queue!\n");
+		retval = -ENOMEM;
+		WARN_ON(1);
+	} else {
+		ep_context->rsvd0 = 1;
+		ep_context->rsvd1 = 0;
+		/* Configure the trb address and set the DCS bit.
+		 * Both DCS bit and own bit in trb should be set.
+		 */
+		ep_context->trb_addr_lo = req->head->trb_dma | DCS_ENABLE;
+		ep_context->trb_addr_hi = 0;
+
+		/* Ensure that updates to the EP Context will
+		 * occure before Ring Bell.
+		 */
+		wmb();
+
+		/* ring bell the ep */
+		if (ep->ep_num == 0)
+			tmp = 0x1;
+		else
+			tmp = ep->ep_num * 2
+				+ ((direction == EP_DIR_OUT) ? 0 : 1);
+
+		writel(tmp, &u3d->op_regs->doorbell);
+	}
+	return retval;
+}
+
+static struct trb *build_trb_one(struct mv_req *req, unsigned *length,
+		dma_addr_t *dma)
+{
+	u32 temp;
+	unsigned int direction;
+	struct trb *trb;
+	struct trb_hw *trb_hw;
+	struct mv_u3d *u3d;
+
+	/* how big will this transfer be? */
+	*length = req->req.length - req->req.actual;
+	BUG_ON(*length > (unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	u3d = req->ep->u3d;
+
+	trb = kzalloc(sizeof(struct trb), GFP_ATOMIC);
+	if (!trb) {
+		dev_err(&u3d->dev->dev, "%s, trb alloc fail\n", __func__);
+		return NULL;
+	}
+
+	/*
+	 * Be careful that no _GFP_HIGHMEM is set,
+	 * or we can not use dma_to_virt
+	 * cannot use GFP_KERNEL in spin lock
+	 */
+	trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
+	if (!trb_hw) {
+		dev_err(&u3d->dev->dev,
+			"%s, dma_pool_alloc fail\n", __func__);
+		return NULL;
+	}
+	trb->trb_dma = *dma;
+	trb->trb_hw = trb_hw;
+
+	/* initialize buffer page pointers */
+	temp = (u32)(req->req.dma + req->req.actual);
+
+	trb_hw->buf_addr_lo = temp;
+	trb_hw->buf_addr_hi = 0;
+	trb_hw->trb_len = *length;
+	trb_hw->ctrl.own = 1;
+
+	if (req->ep->ep_num == 0)
+		trb_hw->ctrl.type = TYPE_DATA;
+	else
+		trb_hw->ctrl.type = TYPE_NORMAL;
+
+	req->req.actual += *length;
+
+	direction = ep_dir(req->ep);
+	if (direction == EP_DIR_IN)
+		trb_hw->ctrl.dir = 1;
+	else
+		trb_hw->ctrl.dir = 0;
+
+	/* Enable interrupt for the last trb of a request */
+	if (!req->req.no_interrupt)
+		trb_hw->ctrl.ioc = 1;
+
+	trb_hw->ctrl.chain = 0;
+
+	wmb();
+	return trb;
+}
+
+static int build_trb_chain(struct mv_req *req, unsigned *length,
+		struct trb *trb, int *is_last)
+{
+	u32 temp;
+	unsigned int direction;
+	struct mv_u3d *u3d;
+
+	/* how big will this transfer be? */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	u3d = req->ep->u3d;
+
+	trb->trb_dma = 0;
+
+	/* initialize buffer page pointers */
+	temp = (u32)(req->req.dma + req->req.actual);
+
+	trb->trb_hw->buf_addr_lo = temp;
+	trb->trb_hw->buf_addr_hi = 0;
+	trb->trb_hw->trb_len = *length;
+	trb->trb_hw->ctrl.own = 1;
+
+	if (req->ep->ep_num == 0)
+		trb->trb_hw->ctrl.type = TYPE_DATA;
+	else
+		trb->trb_hw->ctrl.type = TYPE_NORMAL;
+
+	req->req.actual += *length;
+
+	direction = ep_dir(req->ep);
+	if (direction == EP_DIR_IN)
+		trb->trb_hw->ctrl.dir = 1;
+	else
+		trb->trb_hw->ctrl.dir = 0;
+
+	/* zlp is needed if req->req.zero is set */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual)
+		*is_last = 1;
+	else
+		*is_last = 0;
+
+	/* Enable interrupt for the last trb of a request */
+	if (*is_last && !req->req.no_interrupt)
+		trb->trb_hw->ctrl.ioc = 1;
+
+	if (*is_last)
+		trb->trb_hw->ctrl.chain = 0;
+	else {
+		trb->trb_hw->ctrl.chain = 1;
+		dev_dbg(&u3d->dev->dev, "chain trb\n");
+	}
+
+	wmb();
+
+	return 0;
+}
+
+/* generate TRB linked list for a request
+ * usb controller only supports continous trb chain,
+ * that trb structure physical address should be continous.
+ */
+static int req_to_trb(struct mv_req *req)
+{
+	unsigned count;
+	int is_last, is_first = 1;
+	struct trb *trb, *last_trb = NULL;
+	struct trb_hw *trb_hw;
+	struct mv_u3d *u3d;
+	dma_addr_t dma;
+	unsigned length;
+	unsigned trb_num;
+
+	u3d = req->ep->u3d;
+
+	length = req->req.length - req->req.actual;
+	/* normally the request transfer length is less than 16KB.
+	 * we use buil_trb_one() to optimize it.
+	 */
+	if (length <= (unsigned)EP_MAX_LENGTH_TRANSFER) {
+		trb = build_trb_one(req, &count, &dma);
+		req->head = trb;
+		last_trb = trb;
+		req->trb_count = 1;
+		req->chain = 0;
+	} else {
+		trb_num = length / EP_MAX_LENGTH_TRANSFER;
+		if (length % EP_MAX_LENGTH_TRANSFER)
+			trb_num++;
+
+		trb = kzalloc(trb_num
+				* sizeof(struct trb), GFP_ATOMIC);
+		if (!trb) {
+			dev_err(&u3d->dev->dev,
+					"%s, trb alloc fail\n", __func__);
+			return -ENOMEM;
+		}
+
+		trb_hw = kzalloc(trb_num
+				* sizeof(struct trb_hw), GFP_ATOMIC);
+		if (!trb_hw) {
+			dev_err(&u3d->dev->dev,
+					"%s, trb_hw alloc fail\n", __func__);
+			return -ENOMEM;
+		}
+
+		do {
+			trb->trb_hw = trb_hw;
+			if (build_trb_chain(req, &count, trb, &is_last)) {
+				dev_err(&u3d->dev->dev,
+					"%s, build_trb_chain fail\n",
+					__func__);
+				return -EIO;
+			}
+
+			if (is_first) {
+				is_first = 0;
+				req->head = trb;
+			} else {
+				last_trb->next_trb_virt = trb;
+			}
+			last_trb = trb;
+			req->trb_count++;
+			trb++;
+			trb_hw++;
+		} while (!is_last);
+
+		req->head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
+					req->head->trb_hw,
+					trb_num * sizeof(struct trb_hw),
+					DMA_BIDIRECTIONAL);
+
+		req->chain = 1;
+	}
+
+	req->tail = trb;
+
+	return 0;
+}
+
+static int
+start_queue(struct mv_ep *ep)
+{
+	struct mv_u3d *u3d = ep->u3d;
+	struct mv_req *req;
+
+	if (!list_empty(&ep->req_list) && !ep->processing)
+		req = list_entry(ep->req_list.next, struct mv_req, list);
+	else
+		return 0;
+
+	ep->processing = 1;
+
+	/* map virtual address to hardware */
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->u3d->gadget.dev.parent,
+					req->req.buf,
+					req->req.length, ep_dir(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(ep->u3d->gadget.dev.parent,
+					req->req.dma, req->req.length,
+					ep_dir(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->trb_count = 0;
+
+	/* build trbs and push them to device queue */
+	if (!req_to_trb(req)) {
+		int retval;
+		retval = queue_trb(ep, req);
+		if (retval) {
+			ep->processing = 0;
+			return retval;
+		}
+	} else {
+		ep->processing = 0;
+		dev_err(&u3d->dev->dev, "%s, req_to_trb fail\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* irq handler advances the queue */
+	if (req != NULL)
+		list_add_tail(&req->queue, &ep->queue);
+
+	return 0;
+}
+
+static int mv_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct mv_u3d *u3d;
+	struct mv_ep *ep;
+	struct ep_context *ep_context;
+	u16 max = 0;
+	unsigned maxburst = 0;
+	u32 epxcr, direction;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	u3d = ep->u3d;
+
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	direction = ep_dir(ep);
+	max = le16_to_cpu(desc->wMaxPacketSize);
+
+	if (!_ep->maxburst)
+		_ep->maxburst = 1;
+	maxburst = _ep->maxburst;
+
+	/* Get the endpoint context address */
+	ep_context = (struct ep_context *)ep->ep_context;
+
+	/* Set the max burst size */
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (maxburst > 16) {
+			dev_err(&u3d->dev->dev,
+				"max burst should not be greater "
+				"than 1 on bulk ep\n");
+			_ep->maxburst = maxburst = 1;
+		}
+		dev_dbg(&u3d->dev->dev,
+			"maxburst: %d on bulk %s\n", maxburst, ep->name);
+		break;
+	case USB_ENDPOINT_XFER_CONTROL:
+		if (maxburst != 1) {
+			dev_err(&u3d->dev->dev,
+				"max burst should be 1 on control ep\n");
+			_ep->maxburst = maxburst = 1;
+		}
+	case USB_ENDPOINT_XFER_INT:
+		if (maxburst != 1) {
+			dev_err(&u3d->dev->dev,
+				"max burst should be 1 on int ep "
+				"if transfer size is not 1024\n");
+			_ep->maxburst = maxburst = 1;
+		}
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (maxburst != 1) {
+			dev_err(&u3d->dev->dev,
+				"max burst should be 1 on isoc ep "
+				"if transfer size is not 1024\n");
+			_ep->maxburst = maxburst = 1;
+		}
+		break;
+	default:
+		goto en_done;
+	}
+
+	ep->ep.maxpacket = max;
+	ep->desc = desc;
+	ep->enabled = 1;
+
+	/* Enable the endpoint for Rx or Tx and set the endpoint type */
+	if (direction == EP_DIR_OUT) {
+		epxcr = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		epxcr |= EPXCR_EP_INIT;
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		udelay(5);
+		epxcr &= ~EPXCR_EP_INIT;
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+
+		epxcr = ((max << EPXCR_MAX_PACKET_SIZE_SHIFT)
+		      | ((maxburst - 1) << EPXCR_MAX_BURST_SIZE_SHIFT)
+		      | (1 << EPXCR_EP_ENABLE_SHIFT)
+		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+	} else {
+		epxcr = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		epxcr |= EPXCR_EP_INIT;
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		udelay(5);
+		epxcr &= ~EPXCR_EP_INIT;
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+
+		epxcr = ((max << EPXCR_MAX_PACKET_SIZE_SHIFT)
+		      | ((maxburst - 1) << EPXCR_MAX_BURST_SIZE_SHIFT)
+		      | (1 << EPXCR_EP_ENABLE_SHIFT)
+		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+	}
+
+	return 0;
+en_done:
+	return -EINVAL;
+}
+
+static int  mv_ep_disable(struct usb_ep *_ep)
+{
+	struct mv_u3d *u3d;
+	struct mv_ep *ep;
+	struct ep_context *ep_context;
+	u32 epxcr, direction;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	if ((_ep == NULL) || !ep->desc)
+		return -EINVAL;
+
+	u3d = ep->u3d;
+
+	/* Get the endpoint context address */
+	ep_context = ep->ep_context;
+
+	direction = ep_dir(ep);
+
+	/* nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
+	if (direction == EP_DIR_OUT) {
+		epxcr = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+		epxcr &= ~((1 << EPXCR_EP_ENABLE_SHIFT)
+		      | USB_ENDPOINT_XFERTYPE_MASK);
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+	} else {
+		epxcr = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+		epxcr &= ~((1 << EPXCR_EP_ENABLE_SHIFT)
+		      | USB_ENDPOINT_XFERTYPE_MASK);
+		writel(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+	}
+
+	ep->enabled = 0;
+
+	ep->desc = NULL;
+	return 0;
+}
+
+static struct usb_request *
+mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct mv_req *req = NULL;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_req *req = NULL;
+
+	req = container_of(_req, struct mv_req, req);
+
+	if (_req)
+		kfree(req);
+}
+
+static void mv_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct mv_u3d *u3d;
+	u32 direction;
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	unsigned int loops;
+	u32 tmp;
+
+	/* if endpoint is not enabled, cannot flush endpoint */
+	if (!ep->enabled)
+		return;
+
+	u3d = ep->u3d;
+	direction = ep_dir(ep);
+
+	/* ep0 need clear bit after flushing fifo. */
+	if (!ep->ep_num) {
+		if (direction == EP_DIR_OUT) {
+			tmp = readl(&u3d->vuc_regs->epcr[0].epxoutcr0);
+			tmp |= EPXCR_EP_FLUSH;
+			writel(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+			udelay(10);
+			tmp &= ~EPXCR_EP_FLUSH;
+			writel(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+		} else {
+			tmp = readl(&u3d->vuc_regs->epcr[0].epxincr0);
+			tmp |= EPXCR_EP_FLUSH;
+			writel(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+			udelay(10);
+			tmp &= ~EPXCR_EP_FLUSH;
+			writel(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+		}
+		return;
+	}
+
+	if (direction == EP_DIR_OUT) {
+		tmp = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		tmp |= EPXCR_EP_FLUSH;
+		writel(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+
+		/* Wait until flushing completed */
+		loops = LOOPS(FLUSH_TIMEOUT);
+		while (readl(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
+			EPXCR_EP_FLUSH) {
+			/*
+			 * EP_FLUSH bit should be cleared to indicate this
+			 * operation is complete
+			 */
+			if (loops == 0) {
+				dev_dbg(&u3d->dev->dev,
+				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
+				    direction ? "in" : "out");
+				return;
+			}
+			loops--;
+			udelay(LOOPS_USEC);
+		}
+	} else {	/* EP_DIR_IN */
+		tmp = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		tmp |= EPXCR_EP_FLUSH;
+		writel(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+
+		/* Wait until flushing completed */
+		loops = LOOPS(FLUSH_TIMEOUT);
+		while (readl(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
+			EPXCR_EP_FLUSH) {
+			/*
+			* EP_FLUSH bit should be cleared to indicate this
+			* operation is complete
+			*/
+			if (loops == 0) {
+				dev_dbg(&u3d->dev->dev,
+				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
+				    direction ? "in" : "out");
+				return;
+			}
+			loops--;
+			udelay(LOOPS_USEC);
+		}
+	}
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req = container_of(_req, struct mv_req, req);
+	struct mv_u3d *u3d = ep->u3d;
+	unsigned long flags;
+	int is_first_req = 0;
+
+	if (!ep->ep_num && u3d->ep0_state == STATUS_STAGE && !_req->length) {
+		dev_dbg(&u3d->dev->dev, "ep0 status stage\n");
+		u3d->ep0_state = WAIT_FOR_SETUP;
+		return 0;
+	}
+
+	dev_dbg(&u3d->dev->dev, "%s: %s, req: 0x%x\n",
+			__func__, _ep->name, (u32)req);
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		dev_err(&u3d->dev->dev,
+			"%s, bad params, _req: 0x%x,"
+			"req->req.complete: 0x%x, req->req.buf: 0x%x,"
+			"list_empty: 0x%x\n",
+			__func__, (u32)_req,
+			(u32)req->req.complete, (u32)req->req.buf,
+			(u32)list_empty(&req->queue));
+		return -EINVAL;
+	}
+	if (unlikely(!_ep || !ep->desc)) {
+		dev_err(&u3d->dev->dev, "%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+	}
+
+	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
+		dev_err(&u3d->dev->dev,
+			"%s, bad params of driver/speed\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	req->ep = ep;
+
+	/* Software list handles usb request. */
+	spin_lock_irqsave(&ep->req_lock, flags);
+	is_first_req = list_empty(&ep->req_list);
+	list_add_tail(&req->list, &ep->req_list);
+	spin_unlock_irqrestore(&ep->req_lock, flags);
+	if (!is_first_req) {
+		dev_dbg(&u3d->dev->dev, "list is not empty\n");
+		return 0;
+	}
+
+	dev_dbg(&u3d->dev->dev, "call start_queue from usb_ep_queue\n");
+	spin_lock_irqsave(&u3d->lock, flags);
+	start_queue(ep);
+	spin_unlock_irqrestore(&u3d->lock, flags);
+	return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req;
+	struct mv_u3d *u3d = ep->u3d;
+	struct ep_context *ep_context;
+	struct mv_req *next_req;
+
+	unsigned long flags;
+	int ret = 0;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->u3d->lock, flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* The request is in progress, or completed but not dequeued */
+	if (ep->queue.next == &req->queue) {
+		_req->status = -ECONNRESET;
+		mv_ep_fifo_flush(_ep);	/* flush current transfer */
+
+		/* The request isn't the last request in this ep queue */
+		if (req->queue.next != &ep->queue) {
+			dev_dbg(&u3d->dev->dev,
+				"it is the last request in this ep queue\n");
+			ep_context = ep->ep_context;
+			next_req = list_entry(req->queue.next, struct mv_req,
+					queue);
+
+			/* Point the EP context to the
+			 *first TRB of next request.
+			 */
+			writel((u32) next_req->head, &ep_context->trb_addr_lo);
+		} else {
+			struct ep_context *ep_context;
+			ep_context = ep->ep_context;
+			ep_context->trb_addr_lo = 0;
+			ep_context->trb_addr_hi = 0;
+		}
+
+	} else
+		WARN_ON(1);
+
+	done(ep, req, -ECONNRESET);
+
+	/* remove the req from the ep req list */
+	if (!list_empty(&ep->req_list)) {
+		struct mv_req *curr_req;
+		curr_req = list_entry(ep->req_list.next, struct mv_req, list);
+		if (curr_req == req) {
+			list_del_init(&req->list);
+			ep->processing = 0;
+		}
+	}
+
+out:
+	spin_unlock_irqrestore(&ep->u3d->lock, flags);
+	return ret;
+}
+
+static void
+ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
+{
+	u32 tmp;
+	struct mv_ep *ep = u3d->eps;
+
+	dev_dbg(&u3d->dev->dev, "%s\n", __func__);
+	if (direction == EP_DIR_OUT) {
+		tmp = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+		if (stall)
+			tmp |= EPXCR_EP_HALT;
+		else
+			tmp &= ~EPXCR_EP_HALT;
+		writel(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+	} else {
+		tmp = readl(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+		if (stall)
+			tmp |= EPXCR_EP_HALT;
+		else
+			tmp &= ~EPXCR_EP_HALT;
+		writel(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+	}
+}
+
+static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+	struct mv_ep *ep;
+	unsigned long flags = 0;
+	int status = 0;
+	struct mv_u3d *u3d;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	u3d = ep->u3d;
+	if (!_ep || !ep->desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		status = -EOPNOTSUPP;
+		goto out;
+	}
+
+	/*
+	 * Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue
+	 */
+	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	spin_lock_irqsave(&ep->u3d->lock, flags);
+	ep_set_stall(u3d, ep->ep_num, ep_dir(ep), halt);
+	if (halt && wedge)
+		ep->wedge = 1;
+	else if (!halt)
+		ep->wedge = 0;
+	spin_unlock_irqrestore(&ep->u3d->lock, flags);
+
+	if (ep->ep_num == 0)
+		u3d->ep0_dir = EP_DIR_OUT;
+out:
+	return status;
+}
+
+static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+	return mv_ep_set_halt_wedge(_ep, halt, 0);
+}
+
+static int mv_ep_set_wedge(struct usb_ep *_ep)
+{
+	return mv_ep_set_halt_wedge(_ep, 1, 1);
+}
+
+static struct usb_ep_ops mv_ep_ops = {
+	.enable		= mv_ep_enable,
+	.disable	= mv_ep_disable,
+
+	.alloc_request	= mv_alloc_request,
+	.free_request	= mv_free_request,
+
+	.queue		= mv_ep_queue,
+	.dequeue	= mv_ep_dequeue,
+
+	.set_wedge	= mv_ep_set_wedge,
+	.set_halt	= mv_ep_set_halt,
+	.fifo_flush	= mv_ep_fifo_flush,
+};
+
+static void u3d_stop(struct mv_u3d *u3d)
+{
+	u32 tmp;
+
+	if (!u3d->clock_gating && u3d->vbus_valid_detect)
+		writel(INTR_ENABLE_VBUS_VALID, &u3d->vuc_regs->intrenable);
+	else
+		writel(0, &u3d->vuc_regs->intrenable);
+	writel(~0x0, &u3d->vuc_regs->endcomplete);
+	writel(~0x0, &u3d->vuc_regs->trbunderrun);
+	writel(~0x0, &u3d->vuc_regs->trbcomplete);
+	writel(~0x0, &u3d->vuc_regs->linkchange);
+	writel(0x1, &u3d->vuc_regs->setuplock);
+
+	/* Reset the RUN bit in the command register to stop USB */
+	tmp = readl(&u3d->op_regs->usbcmd);
+	tmp &= ~CMD_RUN_STOP;
+	writel(tmp, &u3d->op_regs->usbcmd);
+	dev_dbg(&u3d->dev->dev, "after u3d_stop, USBCMD 0x%x\n",
+		readl(&u3d->op_regs->usbcmd));
+}
+
+static void u3d_start(struct mv_u3d *u3d)
+{
+	u32 usbintr;
+	u32 temp;
+
+	/* enable link LTSSM state machine */
+	temp = readl(&u3d->vuc_regs->ltssm);
+	temp |= LTSSM_PHY_INIT_DONE;
+	writel(temp, &u3d->vuc_regs->ltssm);
+
+	/* Enable interrupts */
+	usbintr = INTR_ENABLE_LINK_CHG | INTR_ENABLE_TXDESC_ERR |
+		INTR_ENABLE_RXDESC_ERR | INTR_ENABLE_TX_COMPLETE |
+		INTR_ENABLE_RX_COMPLETE | INTR_ENABLE_SETUP |
+		(u3d->vbus_valid_detect ? INTR_ENABLE_VBUS_VALID : 0);
+	writel(usbintr, &u3d->vuc_regs->intrenable);
+
+	/* Enable ctrl ep */
+	writel(0x1, &u3d->vuc_regs->ctrlepenable);
+
+	/* Set the Run bit in the command register */
+	writel(CMD_RUN_STOP, &u3d->op_regs->usbcmd);
+	dev_dbg(&u3d->dev->dev, "after u3d_start, USBCMD 0x%x\n",
+		readl(&u3d->op_regs->usbcmd));
+}
+
+static int u3d_reset(struct mv_u3d *u3d)
+{
+	unsigned int loops;
+	u32 tmp;
+
+	/* Stop the controller */
+	tmp = readl(&u3d->op_regs->usbcmd);
+	tmp &= ~CMD_RUN_STOP;
+	writel(tmp, &u3d->op_regs->usbcmd);
+
+	/* Reset the controller to get default values */
+	writel(CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
+
+	/* wait for reset to complete */
+	loops = LOOPS(RESET_TIMEOUT);
+	while (readl(&u3d->op_regs->usbcmd) & CMD_CTRL_RESET) {
+		if (loops == 0) {
+			dev_err(&u3d->dev->dev,
+				"Wait for RESET completed TIMEOUT\n");
+			return -ETIMEDOUT;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+
+	/* Configure the Endpoint Context Address */
+	writel(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
+	writel(0, &u3d->op_regs->dcbaaph);
+
+	return 0;
+}
+
+static int mv_u3d_enable(struct mv_u3d *u3d)
+{
+	int retval;
+
+	if (u3d->active)
+		return 0;
+
+	if (u3d->clock_gating == 0) {
+		u3d->active = 1;
+		return 0;
+	}
+
+	dev_dbg(&u3d->dev->dev, "enable u3d\n");
+	clk_enable(u3d->clk);
+	if (u3d->pdata->phy_init) {
+		retval = u3d->pdata->phy_init(u3d->phy_regs);
+		if (retval) {
+			dev_err(&u3d->dev->dev,
+				"init phy error %d\n", retval);
+			clk_disable(u3d->clk);
+			return retval;
+		}
+	}
+	u3d->active = 1;
+
+	return 0;
+}
+
+static void mv_u3d_disable(struct mv_u3d *u3d)
+{
+	if (u3d->clock_gating && u3d->active) {
+		dev_dbg(&u3d->dev->dev, "disable u3d\n");
+		if (u3d->pdata->phy_deinit)
+			u3d->pdata->phy_deinit(u3d->phy_regs);
+		clk_disable(u3d->clk);
+		u3d->active = 0;
+	}
+}
+
+static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct mv_u3d *u3d;
+	unsigned long flags;
+	int retval = 0;
+
+	u3d = container_of(gadget, struct mv_u3d, gadget);
+
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	u3d->vbus_active = (is_active != 0);
+	dev_dbg(&u3d->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+		__func__, u3d->softconnect, u3d->vbus_active);
+	/*
+	 * 1. external VBUS detect: we can disable/enable clock on demand.
+	 * 2. UDC VBUS detect: we have to enable clock all the time.
+	 * 3. No VBUS detect: we have to enable clock all the time.
+	 */
+	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
+		retval = mv_u3d_enable(u3d);
+		if (retval == 0) {
+			/*
+			 * after clock is disabled, we lost all the register
+			 *  context. We have to re-init registers
+			 */
+			u3d_reset(u3d);
+			ep0_reset(u3d);
+			u3d_start(u3d);
+		}
+	} else if (u3d->driver && u3d->softconnect) {
+		if (!u3d->active)
+			goto out;
+
+		/* stop all the transfer in queue*/
+		stop_activity(u3d, u3d->driver);
+		u3d_stop(u3d);
+		mv_u3d_disable(u3d);
+	}
+
+out:
+	spin_unlock_irqrestore(&u3d->lock, flags);
+	return retval;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct mv_u3d *u3d;
+
+	u3d = container_of(gadget, struct mv_u3d, gadget);
+	u3d->power = mA;
+
+	return 0;
+}
+
+static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct mv_u3d *u3d;
+	unsigned long flags;
+	int retval = 0;
+
+	u3d = container_of(gadget, struct mv_u3d, gadget);
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	dev_dbg(&u3d->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+		__func__, u3d->softconnect, u3d->vbus_active);
+	u3d->softconnect = (is_on != 0);
+	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
+		retval = mv_u3d_enable(u3d);
+		if (retval == 0) {
+			/*
+			 * after clock is disabled, we lost all the register
+			 *  context. We have to re-init registers
+			 */
+			u3d_reset(u3d);
+			ep0_reset(u3d);
+			u3d_start(u3d);
+		}
+	} else if (u3d->driver && u3d->vbus_active) {
+		/* stop all the transfer in queue*/
+		stop_activity(u3d, u3d->driver);
+		u3d_stop(u3d);
+		mv_u3d_disable(u3d);
+	}
+
+	spin_unlock_irqrestore(&u3d->lock, flags);
+
+	return retval;
+}
+
+static int mv_u3d_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct mv_u3d *u3d;
+	unsigned long flags;
+
+	u3d = container_of(g, struct mv_u3d, gadget);
+	if (!u3d)
+		return -ENODEV;
+
+	if (u3d->driver)
+		return -EBUSY;
+
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	/* hook up the driver ... */
+	driver->driver.bus = NULL;
+	u3d->driver = driver;
+	u3d->gadget.dev.driver = &driver->driver;
+
+	u3d->usb_state = USB_STATE_ATTACHED;
+	u3d->ep0_dir = USB_DIR_OUT;
+
+	spin_unlock_irqrestore(&u3d->lock, flags);
+
+	u3d->vbus_valid_detect = 1;
+
+	return 0;
+}
+
+static int mv_u3d_stop(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct mv_u3d *u3d;
+	unsigned long flags;
+
+	u3d = container_of(g, struct mv_u3d, gadget);
+
+	if (!u3d)
+		return -ENODEV;
+
+	u3d->vbus_valid_detect = 0;
+	spin_lock_irqsave(&u3d->lock, flags);
+
+	mv_u3d_enable(u3d);
+	u3d_stop(u3d);
+	/* stop all usb activities */
+	u3d->gadget.speed = USB_SPEED_UNKNOWN;
+	stop_activity(u3d, driver);
+	mv_u3d_disable(u3d);
+
+	spin_unlock_irqrestore(&u3d->lock, flags);
+
+	u3d->gadget.dev.driver = NULL;
+	u3d->driver = NULL;
+
+	return 0;
+}
+
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops mv_ops = {
+	/* notify controller that VBUS is powered or not */
+	.vbus_session	= mv_u3d_vbus_session,
+
+	/* constrain controller's VBUS power usage */
+	.vbus_draw	= mv_u3d_vbus_draw,
+
+	.pullup		= mv_u3d_pullup,
+	.udc_start	= mv_u3d_start,
+	.udc_stop	= mv_u3d_stop,
+};
+
+static int eps_init(struct mv_u3d *u3d)
+{
+	struct mv_ep	*ep;
+	char name[14];
+	int i;
+
+	/* initialize ep0, ep0 in/out use eps[1] */
+	ep = &u3d->eps[1];
+	ep->u3d = u3d;
+	strncpy(ep->name, "ep0", sizeof(ep->name));
+	ep->ep.name = ep->name;
+	ep->ep.ops = &mv_ep_ops;
+	ep->wedge = 0;
+	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+	ep->ep_num = 0;
+	ep->desc = &mv_ep0_desc;
+	INIT_LIST_HEAD(&ep->queue);
+	INIT_LIST_HEAD(&ep->req_list);
+	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+	/* add ep0 ep_context */
+	ep->ep_context = &u3d->ep_context[1];
+
+	/* initialize other endpoints */
+	for (i = 2; i < u3d->max_eps * 2; i++) {
+		ep = &u3d->eps[i];
+		if (i % 2) {
+			snprintf(name, sizeof(name), "ep%din", i / 2);
+			ep->direction = EP_DIR_IN;
+		} else {
+			snprintf(name, sizeof(name), "ep%dout", i / 2);
+			ep->direction = EP_DIR_OUT;
+		}
+		ep->u3d = u3d;
+		strncpy(ep->name, name, sizeof(ep->name));
+		ep->ep.name = ep->name;
+
+		ep->ep.ops = &mv_ep_ops;
+		ep->ep.maxpacket = (unsigned short) ~0;
+		ep->ep_num = i / 2;
+
+		INIT_LIST_HEAD(&ep->queue);
+		list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
+
+		INIT_LIST_HEAD(&ep->req_list);
+		spin_lock_init(&ep->req_lock);
+		ep->ep_context = &u3d->ep_context[i];
+	}
+
+	return 0;
+}
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct mv_ep *ep, int status)
+{
+	/* endpoint fifo flush */
+	mv_ep_fifo_flush(&ep->ep);
+
+	while (!list_empty(&ep->queue)) {
+		struct mv_req *req = NULL;
+		req = list_entry(ep->queue.next, struct mv_req, queue);
+		done(ep, req, status);
+	}
+}
+
+/* stop all USB activities */
+static void stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
+{
+	struct mv_ep	*ep;
+
+	nuke(&u3d->eps[1], -ESHUTDOWN);
+
+	list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
+		nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock(&u3d->lock);
+		driver->disconnect(&u3d->gadget);
+		spin_lock(&u3d->lock);
+	}
+}
+
+static void irq_process_error(struct mv_u3d *u3d)
+{
+	/* Increment the error count */
+	u3d->errors++;
+	dev_err(&u3d->dev->dev, "%s\n", __func__);
+}
+
+static void irq_process_link_change(struct mv_u3d *u3d)
+{
+	u32 linkchange;
+
+	linkchange = readl(&u3d->vuc_regs->linkchange);
+	writel(linkchange, &u3d->vuc_regs->linkchange);
+
+	dev_dbg(&u3d->dev->dev, "linkchange: 0x%x\n", linkchange);
+
+	if (linkchange & LINK_CHANGE_LINK_UP) {
+		dev_dbg(&u3d->dev->dev, "link up: ltssm state: 0x%x\n",
+			readl(&u3d->vuc_regs->ltssmstate));
+
+		u3d->ep0_dir = EP_DIR_OUT;
+		u3d->ep0_state = WAIT_FOR_SETUP;
+
+		/* set speed */
+		u3d->gadget.speed = USB_SPEED_SUPER;
+	}
+
+	if (linkchange & LINK_CHANGE_SUSPEND)
+		dev_dbg(&u3d->dev->dev, "link suspend\n");
+
+	if (linkchange & LINK_CHANGE_RESUME)
+		dev_dbg(&u3d->dev->dev, "link resume\n");
+
+	if (linkchange & LINK_CHANGE_WRESET)
+		dev_dbg(&u3d->dev->dev, "warm reset\n");
+
+	if (linkchange & LINK_CHANGE_HRESET)
+		dev_dbg(&u3d->dev->dev, "hot reset\n");
+
+	if (linkchange & LINK_CHANGE_INACT)
+		dev_dbg(&u3d->dev->dev, "inactive\n");
+
+	if (linkchange & LINK_CHANGE_DISABLE_AFTER_U0)
+		dev_dbg(&u3d->dev->dev, "ss.disabled\n");
+
+	if (linkchange & LINK_CHANGE_VBUS_INVALID) {
+		dev_dbg(&u3d->dev->dev, "vbus invalid\n");
+		u3d->vbus_valid_detect = 1;
+		/* if external vbus detect is not supported,
+		 * we handle it here.
+		 */
+		if (!u3d->pdata->vbus) {
+			spin_unlock(&u3d->lock);
+			mv_u3d_vbus_session(&u3d->gadget, 0);
+			spin_lock(&u3d->lock);
+		}
+	}
+}
+
+static void ch9setaddress(struct mv_u3d *u3d, struct usb_ctrlrequest *setup)
+{
+	u32 tmp;
+	u3d->dev_addr = (u8)setup->wValue;
+
+	/* add a delay here, or hot reset will occur */
+	dev_dbg(&u3d->dev->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
+
+	if (u3d->dev_addr > 127) {
+		dev_err(&u3d->dev->dev,
+			"%s, u3d address is wrong (out of range)\n", __func__);
+		u3d->dev_addr = 0;
+		return;
+	}
+
+	/* update usb state */
+	u3d->usb_state = USB_STATE_ADDRESS;
+
+	/* set the new address */
+	tmp = readl(&u3d->vuc_regs->devaddrtiebrkr);
+	tmp &= ~0x7F;
+	tmp |= (u32)u3d->dev_addr;
+	writel(tmp, &u3d->vuc_regs->devaddrtiebrkr);
+}
+
+static void handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
+	struct usb_ctrlrequest *setup)
+{
+	bool delegate = false;
+
+	nuke(&u3d->eps[ep_num * 2 + EP_DIR_IN], -ESHUTDOWN);
+
+	dev_dbg(&u3d->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+			setup->bRequestType, setup->bRequest,
+			setup->wValue, setup->wIndex, setup->wLength);
+
+	/* We process some stardard setup requests here */
+	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (setup->bRequest) {
+		case USB_REQ_GET_STATUS:
+			delegate = true;
+			break;
+
+		case USB_REQ_SET_ADDRESS:
+			ch9setaddress(u3d, setup);
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+			delegate = true;
+			break;
+
+		case USB_REQ_SET_FEATURE:
+			delegate = true;
+			break;
+
+		default:
+			delegate = true;
+		}
+	} else
+		delegate = true;
+
+	/* delegate USB standard requests to the gadget driver */
+	if (delegate == true) {
+		/* USB requests handled by gadget */
+		if (setup->wLength) {
+			/* DATA phase from gadget, STATUS phase from u3d */
+			u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+					? EP_DIR_IN : EP_DIR_OUT;
+			spin_unlock(&u3d->lock);
+			if (u3d->driver->setup(&u3d->gadget,
+				&u3d->local_setup_buff) < 0) {
+				dev_err(&u3d->dev->dev, "setup error!\n");
+				ep0_stall(u3d);
+			}
+			spin_lock(&u3d->lock);
+		} else {
+			/* no DATA phase, STATUS phase from gadget */
+			u3d->ep0_dir = EP_DIR_IN;
+			u3d->ep0_state = STATUS_STAGE;
+			spin_unlock(&u3d->lock);
+			if (u3d->driver->setup(&u3d->gadget,
+				&u3d->local_setup_buff) < 0)
+				ep0_stall(u3d);
+			spin_lock(&u3d->lock);
+		}
+	}
+}
+
+static void get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
+{
+	struct ep_context *epcontext;
+
+	epcontext = &u3d->ep_context[ep_num * 2 + EP_DIR_IN];
+
+	/* Copy the setup packet to local buffer */
+	memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
+}
+
+static void irq_process_setup(struct mv_u3d *u3d)
+{
+	u32 tmp, i;
+	/* Process all Setup packet received interrupts */
+	tmp = readl(&u3d->vuc_regs->setuplock);
+	if (tmp) {
+		for (i = 0; i < u3d->max_eps; i++) {
+			if (tmp & (1 << i)) {
+				get_setup_data(u3d, i,
+					(u8 *)(&u3d->local_setup_buff));
+				handle_setup_packet(u3d, i,
+					&u3d->local_setup_buff);
+			}
+		}
+	}
+
+	writel(tmp, &u3d->vuc_regs->setuplock);
+}
+
+static void irq_process_tr_complete(struct mv_u3d *u3d)
+{
+	u32 tmp, bit_pos;
+	int i, ep_num = 0, direction = 0;
+	struct mv_ep	*curr_ep;
+	struct mv_req *curr_req, *temp_req;
+	int status;
+
+	tmp = readl(&u3d->vuc_regs->endcomplete);
+
+	dev_dbg(&u3d->dev->dev, "tr_complete: ep: 0x%x\n", tmp);
+	if (!tmp)
+		return;
+	writel(tmp, &u3d->vuc_regs->endcomplete);
+
+	for (i = 0; i < u3d->max_eps * 2; i++) {
+		ep_num = i >> 1;
+		direction = i % 2;
+
+		bit_pos = 1 << (ep_num + 16 * direction);
+
+		if (!(bit_pos & tmp))
+			continue;
+
+		if (i == 0)
+			curr_ep = &u3d->eps[1];
+		else
+			curr_ep = &u3d->eps[i];
+
+		/* remove req out of ep request list after completion */
+		dev_dbg(&u3d->dev->dev, "tr comp: check req_list\n");
+		spin_lock(&curr_ep->req_lock);
+		if (!list_empty(&curr_ep->req_list)) {
+			struct mv_req *req;
+			req = list_entry(curr_ep->req_list.next,
+						struct mv_req, list);
+			list_del_init(&req->list);
+			curr_ep->processing = 0;
+		}
+		spin_unlock(&curr_ep->req_lock);
+
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req,
+			&curr_ep->queue, queue) {
+			status = process_ep_req(u3d, i, curr_req);
+			if (status)
+				break;
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			/* ep0 request completion */
+			if (ep_num == 0) {
+				done(curr_ep, curr_req, 0);
+				break;
+			} else {
+				done(curr_ep, curr_req, status);
+			}
+		}
+
+		dev_dbg(&u3d->dev->dev, "call start_queue from ep complete\n");
+		start_queue(curr_ep);
+	}
+}
+
+static irqreturn_t mv_u3d_irq(int irq, void *dev)
+{
+	struct mv_u3d *u3d = (struct mv_u3d *)dev;
+	u32 status, intr;
+	u32 bridgesetting;
+	u32 trbunderrun;
+
+	spin_lock(&u3d->lock);
+
+	status = readl(&u3d->vuc_regs->intrcause);
+	intr = readl(&u3d->vuc_regs->intrenable);
+	status &= intr;
+
+	if (status == 0) {
+		spin_unlock(&u3d->lock);
+		dev_err(&u3d->dev->dev, "irq error!\n");
+		return IRQ_NONE;
+	}
+
+	if (status & USBINT_VBUS_VALID) {
+		bridgesetting = readl(&u3d->vuc_regs->bridgesetting);
+		if (bridgesetting & BRIDGE_SETTING_VBUS_VALID) {
+			/* write vbus valid bit of bridge setting to clear */
+			bridgesetting = BRIDGE_SETTING_VBUS_VALID;
+			writel(bridgesetting, &u3d->vuc_regs->bridgesetting);
+			dev_dbg(&u3d->dev->dev, "vbus valid\n");
+
+			u3d->vbus_valid_detect = 0;
+			/* if external vbus detect is not supported,
+			 * we handle it here.
+			 */
+			if (!u3d->pdata->vbus) {
+				spin_unlock(&u3d->lock);
+				mv_u3d_vbus_session(&u3d->gadget, 1);
+				spin_lock(&u3d->lock);
+			}
+		} else
+			dev_err(&u3d->dev->dev, "vbus bit is not set\n");
+	}
+
+	/* RX data is already in the 16KB FIFO.*/
+	if (status & USBINT_UNDER_RUN) {
+		trbunderrun = readl(&u3d->vuc_regs->trbunderrun);
+		dev_err(&u3d->dev->dev, "under run, ep%d\n", trbunderrun);
+		writel(trbunderrun, &u3d->vuc_regs->trbunderrun);
+		irq_process_error(u3d);
+	}
+
+	if (status & (USBINT_RXDESC_ERR | USBINT_TXDESC_ERR)) {
+		/* write one to clear */
+		writel(status & (USBINT_RXDESC_ERR | USBINT_TXDESC_ERR),
+			&u3d->vuc_regs->intrcause);
+		dev_err(&u3d->dev->dev, "desc err 0x%x\n", status);
+		irq_process_error(u3d);
+	}
+
+	if (status & USBINT_LINK_CHG)
+		irq_process_link_change(u3d);
+
+	if (status & USBINT_TX_COMPLETE)
+		irq_process_tr_complete(u3d);
+
+	if (status & USBINT_RX_COMPLETE)
+		irq_process_tr_complete(u3d);
+
+	if (status & USBINT_SETUP)
+		irq_process_setup(u3d);
+
+	spin_unlock(&u3d->lock);
+	return IRQ_HANDLED;
+}
+
+/* release device structure */
+static void gadget_release(struct device *dev)
+{
+	struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
+	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
+
+	complete(u3d->done);
+}
+
+static __devexit int mv_u3d_remove(struct platform_device *dev)
+{
+	struct mv_u3d *u3d = platform_get_drvdata(dev);
+
+	BUG_ON(u3d == NULL);
+
+	usb_del_gadget_udc(&u3d->gadget);
+
+	/* free memory allocated in probe */
+	if (u3d->trb_pool)
+		dma_pool_destroy(u3d->trb_pool);
+
+	if (u3d->ep_context)
+		dma_free_coherent(&dev->dev, u3d->ep_context_size,
+			u3d->ep_context, u3d->ep_context_dma);
+
+	kfree(u3d->eps);
+
+	if (u3d->irq)
+		free_irq(u3d->irq, &dev->dev);
+
+	mv_u3d_disable(u3d);
+
+	if (u3d->cap_regs)
+		iounmap(u3d->cap_regs);
+	u3d->cap_regs = NULL;
+
+	if (u3d->phy_regs)
+		iounmap((void *)u3d->phy_regs);
+	u3d->phy_regs = 0;
+
+	kfree(u3d->status_req);
+
+	clk_put(u3d->clk);
+
+	device_unregister(&u3d->gadget.dev);
+
+	platform_set_drvdata(dev, NULL);
+
+	/* free dev, wait for the release() finished */
+	wait_for_completion(u3d->done);
+	kfree(u3d);
+
+	return 0;
+}
+
+static int mv_u3d_probe(struct platform_device *dev)
+{
+	struct mv_u3d *u3d = NULL;
+	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
+	int retval = 0;
+	struct resource *r;
+	size_t size;
+
+	if (pdata == NULL) {
+		dev_err(&dev->dev, "missing platform_data\n");
+		retval = -ENODEV;
+		goto err_pdata;
+	}
+
+	size = sizeof(*u3d);
+	u3d = kzalloc(size, GFP_KERNEL);
+	if (u3d == NULL) {
+		dev_err(&dev->dev, "failed to allocate memory for u3d\n");
+		retval = -ENOMEM;
+		goto err_alloc_private;
+	}
+
+	spin_lock_init(&u3d->lock);
+
+	platform_set_drvdata(dev, u3d);
+
+	u3d->dev = dev;
+	u3d->pdata = dev->dev.platform_data;
+
+	u3d->clk = clk_get(&dev->dev, pdata->clkname[0]);
+	if (IS_ERR(u3d->clk)) {
+		retval = PTR_ERR(u3d->clk);
+		goto err_get_clk;
+	}
+
+	r = platform_get_resource_byname(u3d->dev, IORESOURCE_MEM, "capregs");
+	if (r == NULL) {
+		dev_err(&dev->dev, "no I/O memory resource defined\n");
+		retval = -ENODEV;
+		goto err_get_cap_regs;
+	}
+
+	u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
+		ioremap(r->start, resource_size(r));
+	if (u3d->cap_regs == NULL) {
+		dev_err(&dev->dev, "failed to map I/O memory\n");
+		retval = -EBUSY;
+		goto err_map_cap_regs;
+	} else {
+		dev_dbg(&dev->dev, "cap_regs address: 0x%x/0x%x\n",
+			(unsigned int)r->start, (unsigned int)u3d->cap_regs);
+	}
+
+	u3d->phy_regs = (u32)u3d->cap_regs + USB3_PHY_OFFSET;
+
+	/* we will access controller register, so enable the u3d controller */
+	clk_enable(u3d->clk);
+
+	if (pdata->phy_init) {
+		retval = pdata->phy_init(u3d->phy_regs);
+		if (retval) {
+			dev_err(&dev->dev, "init phy error %d\n", retval);
+			goto err_u3d_enable;
+		}
+	}
+
+	u3d->op_regs = (struct mv_u3d_op_regs __iomem *)((u32)u3d->cap_regs
+		+ USB3_OP_REGS_OFFSET);
+
+	u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)((u32)u3d->cap_regs
+		+ readl(&u3d->cap_regs->vuoff));
+
+	u3d->max_eps = 16;
+
+	/*
+	 * some platform will use usb to download image, it may not disconnect
+	 * usb gadget before loading kernel. So first stop u3d here.
+	 */
+	u3d_stop(u3d);
+	writel(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
+
+	size = u3d->max_eps * sizeof(struct ep_context) * 2;
+	size = (size + EP_CONTEXT_ALIGNMENT - 1) & ~(EP_CONTEXT_ALIGNMENT - 1);
+	u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
+					&u3d->ep_context_dma, GFP_KERNEL);
+	if (u3d->ep_context == NULL) {
+		dev_err(&dev->dev, "allocate ep context memory failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_ep_context;
+	}
+	u3d->ep_context_size = size;
+
+	/* create TRB dma_pool resource */
+	u3d->trb_pool = dma_pool_create("u3d_trb",
+			&dev->dev,
+			sizeof(struct trb_hw),
+			TRB_ALIGNMENT,
+			DMA_BOUNDARY);
+
+	if (!u3d->trb_pool) {
+		retval = -ENOMEM;
+		goto err_alloc_trb_pool;
+	}
+
+	size = u3d->max_eps * sizeof(struct mv_ep) * 2;
+	u3d->eps = kzalloc(size, GFP_KERNEL);
+	if (u3d->eps == NULL) {
+		dev_err(&dev->dev, "allocate ep memory failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_eps;
+	}
+
+	/* initialize ep0 status request structure */
+	u3d->status_req = kzalloc(sizeof(struct mv_req) + 8, GFP_KERNEL);
+	if (!u3d->status_req) {
+		dev_err(&dev->dev, "allocate status_req memory failed\n");
+		retval = -ENOMEM;
+		goto err_alloc_status_req;
+	}
+	INIT_LIST_HEAD(&u3d->status_req->queue);
+
+	/* allocate a small amount of memory to get valid address */
+	u3d->status_req->req.buf = (char *)u3d->status_req
+					+ sizeof(struct mv_req);
+	u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
+
+	u3d->resume_state = USB_STATE_NOTATTACHED;
+	u3d->usb_state = USB_STATE_POWERED;
+	u3d->ep0_dir = EP_DIR_OUT;
+	u3d->remote_wakeup = 0;
+
+	r = platform_get_resource(u3d->dev, IORESOURCE_IRQ, 0);
+	if (r == NULL) {
+		dev_err(&dev->dev, "no IRQ resource defined\n");
+		retval = -ENODEV;
+		goto err_get_irq;
+	}
+	u3d->irq = r->start;
+	if (request_irq(u3d->irq, mv_u3d_irq,
+		IRQF_DISABLED | IRQF_SHARED, driver_name, u3d)) {
+		u3d->irq = 0;
+		dev_err(&dev->dev, "Request irq %d for u3d failed\n",
+			u3d->irq);
+		retval = -ENODEV;
+		goto err_request_irq;
+	}
+
+	/* initialize gadget structure */
+	u3d->gadget.ops = &mv_ops;	/* usb_gadget_ops */
+	u3d->gadget.ep0 = &u3d->eps[1].ep;	/* gadget ep0 */
+	INIT_LIST_HEAD(&u3d->gadget.ep_list);	/* ep_list */
+	u3d->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&u3d->gadget.dev, "gadget");
+	u3d->gadget.dev.parent = &dev->dev;
+	u3d->gadget.dev.dma_mask = dev->dev.dma_mask;
+	u3d->gadget.dev.release = gadget_release;
+	u3d->gadget.name = driver_name;		/* gadget name */
+
+	retval = device_register(&u3d->gadget.dev);
+	if (retval)
+		goto err_register_gadget_device;
+
+	eps_init(u3d);
+
+	/* external vbus detection */
+	if (pdata->vbus) {
+		u3d->clock_gating = 1;
+		dev_err(&dev->dev, "external vbus detection\n");
+	}
+
+	/*
+	 * For saving power disable clk. When clock is disabled,
+	 * phy is deinit too.
+	 * When clock is always on, It means that VBUS can not be detected,
+	 * so enable vbus_active to make controller run at all the time.
+	 */
+	if (u3d->clock_gating) {
+		if (u3d->pdata->phy_deinit)
+			u3d->pdata->phy_deinit(u3d->phy_regs);
+		clk_disable(u3d->clk);
+	} else
+		u3d->vbus_active = 1;
+
+	/* enable usb3 controller vbus detection */
+	u3d->vbus_valid_detect = 1;
+
+	retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
+	if (retval)
+		goto err_unregister;
+
+	dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
+		u3d->clock_gating ? "with" : "without");
+
+	return 0;
+
+err_unregister:
+	device_unregister(&u3d->gadget.dev);
+err_register_gadget_device:
+	free_irq(u3d->irq, &dev->dev);
+err_request_irq:
+err_get_irq:
+	kfree(u3d->status_req);
+err_alloc_status_req:
+	kfree(u3d->eps);
+err_alloc_eps:
+	dma_pool_destroy(u3d->trb_pool);
+err_alloc_trb_pool:
+	dma_free_coherent(&dev->dev, u3d->ep_context_size,
+		u3d->ep_context, u3d->ep_context_dma);
+err_alloc_ep_context:
+	if (u3d->pdata->phy_deinit)
+		u3d->pdata->phy_deinit(u3d->phy_regs);
+	clk_disable(u3d->clk);
+err_u3d_enable:
+	iounmap(u3d->cap_regs);
+err_map_cap_regs:
+err_get_cap_regs:
+err_get_clk:
+	clk_put(u3d->clk);
+	platform_set_drvdata(dev, NULL);
+	kfree(u3d);
+err_alloc_private:
+err_pdata:
+	return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_u3d_suspend(struct device *dev)
+{
+	struct mv_u3d *u3d = dev_get_drvdata(dev);
+
+	/*
+	 * only cable is unplugged, usb can suspend.
+	 * So do not care about clock_gating == 1, it is handled by
+	 * vbus session.
+	 */
+	if (!u3d->clock_gating) {
+		u3d_stop(u3d);
+
+		spin_lock_irq(&u3d->lock);
+		/* stop all usb activities */
+		stop_activity(u3d, u3d->driver);
+		spin_unlock_irq(&u3d->lock);
+
+		mv_u3d_disable(u3d);
+	}
+
+	return 0;
+}
+
+static int mv_u3d_resume(struct device *dev)
+{
+	struct mv_u3d *u3d = dev_get_drvdata(dev);
+	int retval;
+
+	if (!u3d->clock_gating) {
+		retval = mv_u3d_enable(u3d);
+		if (retval)
+			return retval;
+
+		if (u3d->driver && u3d->softconnect) {
+			u3d_reset(u3d);
+			ep0_reset(u3d);
+			u3d_start(u3d);
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops mv_u3d_pm_ops = {
+	.suspend	= mv_u3d_suspend,
+	.resume		= mv_u3d_resume,
+};
+#endif
+
+static void mv_u3d_shutdown(struct platform_device *dev)
+{
+	struct mv_u3d *u3d = dev_get_drvdata(&dev->dev);
+	u32 tmp;
+
+	tmp = readl(&u3d->op_regs->usbcmd);
+	tmp &= ~CMD_RUN_STOP;
+	writel(tmp, &u3d->op_regs->usbcmd);
+}
+
+static struct platform_driver u3d_driver = {
+	.probe		= mv_u3d_probe,
+	.remove		= __exit_p(mv_u3d_remove),
+	.shutdown	= mv_u3d_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "mv-u3d",
+#ifdef CONFIG_PM
+		.pm	= &mv_u3d_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(u3d_driver);
+MODULE_ALIAS("platform:mv-u3d");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Yu Xu <yuxu@xxxxxxxxxxx>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux