summaryrefslogtreecommitdiff
path: root/drivers/usb/gadget/net2272.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget/net2272.c')
-rw-r--r--drivers/usb/gadget/net2272.c2752
1 files changed, 2752 insertions, 0 deletions
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
new file mode 100644
index 000000000000..7c7b0e120d88
--- /dev/null
+++ b/drivers/usb/gadget/net2272.c
@@ -0,0 +1,2752 @@
+/*
+ * Driver for PLX NET2272 USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "net2272.h"
+
+#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
+
+static const char driver_name[] = "net2272";
+static const char driver_vers[] = "2006 October 17/mainline";
+static const char driver_desc[] = DRIVER_DESC;
+
+static const char ep0name[] = "ep0";
+static const char * const ep_name[] = {
+ ep0name,
+ "ep-a", "ep-b", "ep-c",
+};
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#ifdef CONFIG_USB_GADGET_NET2272_DMA
+/*
+ * use_dma: the NET2272 can use an external DMA controller.
+ * Note that since there is no generic DMA api, some functions,
+ * notably request_dma, start_dma, and cancel_dma will need to be
+ * modified for your platform's particular dma controller.
+ *
+ * If use_dma is disabled, pio will be used instead.
+ */
+static int use_dma = 0;
+module_param(use_dma, bool, 0644);
+
+/*
+ * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
+ * The NET2272 can only use dma for a single endpoint at a time.
+ * At some point this could be modified to allow either endpoint
+ * to take control of dma as it becomes available.
+ *
+ * Note that DMA should not be used on OUT endpoints unless it can
+ * be guaranteed that no short packets will arrive on an IN endpoint
+ * while the DMA operation is pending. Otherwise the OUT DMA will
+ * terminate prematurely (See NET2272 Errata 630-0213-0101)
+ */
+static ushort dma_ep = 1;
+module_param(dma_ep, ushort, 0644);
+
+/*
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
+ * mode 0 == Slow DREQ mode
+ * mode 1 == Fast DREQ mode
+ * mode 2 == Burst mode
+ */
+static ushort dma_mode = 2;
+module_param(dma_mode, ushort, 0644);
+#else
+#define use_dma 0
+#define dma_ep 1
+#define dma_mode 2
+#endif
+
+/*
+ * fifo_mode: net2272 buffer configuration:
+ * mode 0 == ep-{a,b,c} 512db each
+ * mode 1 == ep-a 1k, ep-{b,c} 512db
+ * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
+ * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
+ */
+static ushort fifo_mode = 0;
+module_param(fifo_mode, ushort, 0644);
+
+/*
+ * enable_suspend: When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2272. Otherwise,
+ * USB suspend requests will be ignored. This is acceptible for
+ * self-powered devices. For bus powered devices set this to 1.
+ */
+static ushort enable_suspend = 0;
+module_param(enable_suspend, ushort, 0644);
+
+static void assert_out_naking(struct net2272_ep *ep, const char *where)
+{
+ u8 tmp;
+
+#ifndef DEBUG
+ return;
+#endif
+
+ tmp = net2272_ep_read(ep, EP_STAT0);
+ if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+ dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
+ ep->ep.name, where, tmp);
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+ }
+}
+#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
+
+static void stop_out_naking(struct net2272_ep *ep)
+{
+ u8 tmp = net2272_ep_read(ep, EP_STAT0);
+
+ if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+}
+
+#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
+
+static char *type_string(u8 bmAttributes)
+{
+ switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK: return "bulk";
+ case USB_ENDPOINT_XFER_ISOC: return "iso";
+ case USB_ENDPOINT_XFER_INT: return "intr";
+ default: return "control";
+ }
+}
+
+static char *buf_state_string(unsigned state)
+{
+ switch (state) {
+ case BUFF_FREE: return "free";
+ case BUFF_VALID: return "valid";
+ case BUFF_LCL: return "local";
+ case BUFF_USB: return "usb";
+ default: return "unknown";
+ }
+}
+
+static char *dma_mode_string(void)
+{
+ if (!use_dma)
+ return "PIO";
+ switch (dma_mode) {
+ case 0: return "SLOW DREQ";
+ case 1: return "FAST DREQ";
+ case 2: return "BURST";
+ default: return "invalid";
+ }
+}
+
+static void net2272_dequeue_all(struct net2272_ep *);
+static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
+static int net2272_fifo_status(struct usb_ep *);
+
+static struct usb_ep_ops net2272_ep_ops;
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct net2272 *dev;
+ struct net2272_ep *ep;
+ u32 max;
+ u8 tmp;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !desc || ep->desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ _ep->maxpacket = max & 0x7fff;
+ ep->desc = desc;
+
+ /* net2272_ep_reset() has already been called */
+ ep->stopped = 0;
+ ep->wedged = 0;
+
+ /* set speed-dependent max packet */
+ net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
+ net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
+
+ /* set type, direction, address; reset fifo counters */
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+ tmp = usb_endpoint_type(desc);
+ if (usb_endpoint_xfer_bulk(desc)) {
+ /* catch some particularly blatant driver bugs */
+ if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
+ (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ERANGE;
+ }
+ }
+ ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
+ tmp <<= ENDPOINT_TYPE;
+ tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
+ tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
+ tmp |= (1 << ENDPOINT_ENABLE);
+
+ /* for OUT transfers, block the rx fifo until a read is posted */
+ ep->is_in = usb_endpoint_dir_in(desc);
+ if (!ep->is_in)
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+ net2272_ep_write(ep, EP_CFG, tmp);
+
+ /* enable irqs */
+ tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
+ net2272_write(dev, IRQENB0, tmp);
+
+ tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | net2272_ep_read(ep, EP_IRQENB);
+ net2272_ep_write(ep, EP_IRQENB, tmp);
+
+ tmp = desc->bEndpointAddress;
+ dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
+ _ep->name, tmp & 0x0f, PIPEDIR(tmp),
+ type_string(desc->bmAttributes), max,
+ net2272_ep_read(ep, EP_CFG));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static void net2272_ep_reset(struct net2272_ep *ep)
+{
+ u8 tmp;
+
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->ep.maxpacket = ~0;
+ ep->ep.ops = &net2272_ep_ops;
+
+ /* disable irqs, endpoint */
+ net2272_ep_write(ep, EP_IRQENB, 0);
+
+ /* init to our chosen defaults, notably so that we NAK OUT
+ * packets until the driver queues a read.
+ */
+ tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
+ net2272_ep_write(ep, EP_RSPSET, tmp);
+
+ tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
+ if (ep->num != 0)
+ tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
+
+ net2272_ep_write(ep, EP_RSPCLR, tmp);
+
+ /* scrub most status bits, and flush any fifo state */
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << DATA_IN_TOKEN_INTERRUPT)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+
+ net2272_ep_write(ep, EP_STAT1,
+ (1 << TIMEOUT)
+ | (1 << USB_OUT_ACK_SENT)
+ | (1 << USB_OUT_NAK_SENT)
+ | (1 << USB_IN_ACK_RCVD)
+ | (1 << USB_IN_NAK_SENT)
+ | (1 << USB_STALL_SENT)
+ | (1 << LOCAL_OUT_ZLP)
+ | (1 << BUFFER_FLUSH));
+
+ /* fifo size is handled seperately */
+}
+
+static int net2272_disable(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !ep->desc || _ep->name == ep0name)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ net2272_dequeue_all(ep);
+ net2272_ep_reset(ep);
+
+ dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static struct usb_request *
+net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req;
+
+ if (!_ep)
+ return NULL;
+ ep = container_of(_ep, struct net2272_ep, ep);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void
+net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct net2272_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+static void
+net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
+{
+ struct net2272 *dev;
+ unsigned stopped = ep->stopped;
+
+ if (ep->num == 0) {
+ if (ep->dev->protocol_stall) {
+ ep->stopped = 1;
+ set_halt(ep);
+ }
+ allow_status(ep);
+ }
+
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ if (use_dma && req->mapped) {
+ dma_unmap_single(dev->dev, req->req.dma, req->req.length,
+ ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ }
+
+ if (status && status != -ESHUTDOWN)
+ dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length, req->req.buf);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&dev->lock);
+ req->req.complete(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+}
+
+static int
+net2272_write_packet(struct net2272_ep *ep, u8 *buf,
+ struct net2272_request *req, unsigned max)
+{
+ u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+ u16 *bufp;
+ unsigned length, count;
+ u8 tmp;
+
+ length = min(req->req.length - req->req.actual, max);
+ req->req.actual += length;
+
+ dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
+ ep->ep.name, req, max, length,
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+ count = length;
+ bufp = (u16 *)buf;
+
+ while (likely(count >= 2)) {
+ /* no byte-swap required; chip endian set during init */
+ writew(*bufp++, ep_data);
+ count -= 2;
+ }
+ buf = (u8 *)bufp;
+
+ /* write final byte by placing the NET2272 into 8-bit mode */
+ if (unlikely(count)) {
+ tmp = net2272_read(ep->dev, LOCCTL);
+ net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
+ writeb(*buf, ep_data);
+ net2272_write(ep->dev, LOCCTL, tmp);
+ }
+ return length;
+}
+
+/* returns: 0: still running, 1: completed, negative: errno */
+static int
+net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+ u8 *buf;
+ unsigned count, max;
+ int status;
+
+ dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
+ ep->ep.name, req->req.actual, req->req.length);
+
+ /*
+ * Keep loading the endpoint until the final packet is loaded,
+ * or the endpoint buffer is full.
+ */
+ top:
+ /*
+ * Clear interrupt status
+ * - Packet Transmitted interrupt will become set again when the
+ * host successfully takes another packet
+ */
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+ while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ /* force pagesel */
+ net2272_ep_read(ep, EP_STAT0);
+
+ max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
+ (net2272_ep_read(ep, EP_AVAIL0));
+
+ if (max < ep->ep.maxpacket)
+ max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+ | (net2272_ep_read(ep, EP_AVAIL0));
+
+ count = net2272_write_packet(ep, buf, req, max);
+ /* see if we are done */
+ if (req->req.length == req->req.actual) {
+ /* validate short or zlp packet */
+ if (count < ep->ep.maxpacket)
+ set_fifo_bytecount(ep, 0);
+ net2272_done(ep, req, 0);
+
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request,
+ queue);
+ status = net2272_kick_dma(ep, req);
+
+ if (status < 0)
+ if ((net2272_ep_read(ep, EP_STAT0)
+ & (1 << BUFFER_EMPTY)))
+ goto top;
+ }
+ return 1;
+ }
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+ }
+ return 0;
+}
+
+static void
+net2272_out_flush(struct net2272_ep *ep)
+{
+ ASSERT_OUT_NAKING(ep);
+
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static int
+net2272_read_packet(struct net2272_ep *ep, u8 *buf,
+ struct net2272_request *req, unsigned avail)
+{
+ u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+ unsigned is_short;
+ u16 *bufp;
+
+ req->req.actual += avail;
+
+ dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
+ ep->ep.name, req, avail,
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+ is_short = (avail < ep->ep.maxpacket);
+
+ if (unlikely(avail == 0)) {
+ /* remove any zlp from the buffer */
+ (void)readw(ep_data);
+ return is_short;
+ }
+
+ /* Ensure we get the final byte */
+ if (unlikely(avail % 2))
+ avail++;
+ bufp = (u16 *)buf;
+
+ do {
+ *bufp++ = readw(ep_data);
+ avail -= 2;
+ } while (avail);
+
+ /*
+ * To avoid false endpoint available race condition must read
+ * ep stat0 twice in the case of a short transfer
+ */
+ if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
+ net2272_ep_read(ep, EP_STAT0);
+
+ return is_short;
+}
+
+static int
+net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+ u8 *buf;
+ unsigned is_short;
+ int count;
+ int tmp;
+ int cleanup = 0;
+ int status = -1;
+
+ dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
+ ep->ep.name, req->req.actual, req->req.length);
+
+ top:
+ do {
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+
+ count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+ | net2272_ep_read(ep, EP_AVAIL0);
+
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+ (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+
+ tmp = req->req.length - req->req.actual;
+
+ if (count > tmp) {
+ if ((tmp % ep->ep.maxpacket) != 0) {
+ dev_err(ep->dev->dev,
+ "%s out fifo %d bytes, expected %d\n",
+ ep->ep.name, count, tmp);
+ cleanup = 1;
+ }
+ count = (tmp > 0) ? tmp : 0;
+ }
+
+ is_short = net2272_read_packet(ep, buf, req, count);
+
+ /* completion */
+ if (unlikely(cleanup || is_short ||
+ ((req->req.actual == req->req.length)
+ && !req->req.zero))) {
+
+ if (cleanup) {
+ net2272_out_flush(ep);
+ net2272_done(ep, req, -EOVERFLOW);
+ } else
+ net2272_done(ep, req, 0);
+
+ /* re-initialize endpoint transfer registers
+ * otherwise they may result in erroneous pre-validation
+ * for subsequent control reads
+ */
+ if (unlikely(ep->num == 0)) {
+ net2272_ep_write(ep, EP_TRANSFER2, 0);
+ net2272_ep_write(ep, EP_TRANSFER1, 0);
+ net2272_ep_write(ep, EP_TRANSFER0, 0);
+ }
+
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ status = net2272_kick_dma(ep, req);
+ if ((status < 0) &&
+ !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
+ goto top;
+ }
+ return 1;
+ }
+ } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
+
+ return 0;
+}
+
+static void
+net2272_pio_advance(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+
+ if (unlikely(list_empty(&ep->queue)))
+ return;
+
+ req = list_entry(ep->queue.next, struct net2272_request, queue);
+ (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
+ unsigned len, unsigned dir)
+{
+ dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
+ ep, buf, len, dir);
+
+ /* The NET2272 only supports a single dma channel */
+ if (dev->dma_busy)
+ return -EBUSY;
+ /*
+ * EP_TRANSFER (used to determine the number of bytes received
+ * in an OUT transfer) is 24 bits wide; don't ask for more than that.
+ */
+ if ((dir == 1) && (len > 0x1000000))
+ return -EINVAL;
+
+ dev->dma_busy = 1;
+
+ /* initialize platform's dma */
+#ifdef CONFIG_PCI
+ /* NET2272 addr, buffer addr, length, etc. */
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ /* Setup PLX 9054 DMA mode */
+ writel((1 << LOCAL_BUS_WIDTH) |
+ (1 << TA_READY_INPUT_ENABLE) |
+ (0 << LOCAL_BURST_ENABLE) |
+ (1 << DONE_INTERRUPT_ENABLE) |
+ (1 << LOCAL_ADDRESSING_MODE) |
+ (1 << DEMAND_MODE) |
+ (1 << DMA_EOT_ENABLE) |
+ (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
+ (1 << DMA_CHANNEL_INTERRUPT_SELECT),
+ dev->rdk1.plx9054_base_addr + DMAMODE0);
+
+ writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
+ writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
+ writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
+ writel((dir << DIRECTION_OF_TRANSFER) |
+ (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
+ dev->rdk1.plx9054_base_addr + DMADPR0);
+ writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
+ readl(dev->rdk1.plx9054_base_addr + INTCSR),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ break;
+ }
+#endif
+
+ net2272_write(dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (1 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (dev->dma_eot_polarity << EOT_POLARITY) |
+ (dev->dma_dack_polarity << DACK_POLARITY) |
+ (dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((ep >> 1) << DMA_ENDPOINT_SELECT));
+
+ (void) net2272_read(dev, SCRATCH);
+
+ return 0;
+}
+
+static void
+net2272_start_dma(struct net2272 *dev)
+{
+ /* start platform's dma controller */
+#ifdef CONFIG_PCI
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+ break;
+ }
+#endif
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
+{
+ unsigned size;
+ u8 tmp;
+
+ if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
+ return -EINVAL;
+
+ /* don't use dma for odd-length transfers
+ * otherwise, we'd need to deal with the last byte with pio
+ */
+ if (req->req.length & 1)
+ return -EINVAL;
+
+ dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
+ ep->ep.name, req, (unsigned long long) req->req.dma);
+
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+ /* The NET2272 can only use DMA on one endpoint at a time */
+ if (ep->dev->dma_busy)
+ return -EBUSY;
+
+ /* Make sure we only DMA an even number of bytes (we'll use
+ * pio to complete the transfer)
+ */
+ size = req->req.length;
+ size &= ~1;
+
+ /* device-to-host transfer */
+ if (ep->is_in) {
+ /* initialize platform's dma controller */
+ if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
+ /* unable to obtain DMA channel; return error and use pio mode */
+ return -EBUSY;
+ req->req.actual += size;
+
+ /* host-to-device transfer */
+ } else {
+ tmp = net2272_ep_read(ep, EP_STAT0);
+
+ /* initialize platform's dma controller */
+ if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
+ /* unable to obtain DMA channel; return error and use pio mode */
+ return -EBUSY;
+
+ if (!(tmp & (1 << BUFFER_EMPTY)))
+ ep->not_empty = 1;
+ else
+ ep->not_empty = 0;
+
+
+ /* allow the endpoint's buffer to fill */
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+
+ /* this transfer completed and data's already in the fifo
+ * return error so pio gets used.
+ */
+ if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+
+ /* deassert dreq */
+ net2272_write(ep->dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (0 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (ep->dev->dma_eot_polarity << EOT_POLARITY) |
+ (ep->dev->dma_dack_polarity << DACK_POLARITY) |
+ (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
+
+ return -EBUSY;
+ }
+ }
+
+ /* Don't use per-packet interrupts: use dma interrupts only */
+ net2272_ep_write(ep, EP_IRQENB, 0);
+
+ net2272_start_dma(ep->dev);
+
+ return 0;
+}
+
+static void net2272_cancel_dma(struct net2272 *dev)
+{
+#ifdef CONFIG_PCI
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
+ writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
+ while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
+ (1 << CHANNEL_DONE)))
+ continue; /* wait for dma to stabalize */
+
+ /* dma abort generates an interrupt */
+ writeb(1 << CHANNEL_CLEAR_INTERRUPT,
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+ break;
+ }
+#endif
+
+ dev->dma_busy = 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct net2272_request *req;
+ struct net2272_ep *ep;
+ struct net2272 *dev;
+ unsigned long flags;
+ int status = -1;
+ u8 s;
+
+ req = container_of(_req, struct net2272_request, req);
+ if (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue))
+ return -EINVAL;
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* set up dma mapping in case the caller didn't */
+ if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) {
+ _req->dma = dma_map_single(dev->dev, _req->buf, _req->length,
+ ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+ }
+
+ dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
+ _ep->name, _req, _req->length, _req->buf,
+ (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ /* maybe there's no control data, just status ack */
+ if (ep->num == 0 && _req->length == 0) {
+ net2272_done(ep, req, 0);
+ dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
+ goto done;
+ }
+
+ /* Return zlp, don't let it block subsequent packets */
+ s = net2272_ep_read(ep, EP_STAT0);
+ if (s & (1 << BUFFER_EMPTY)) {
+ /* Buffer is empty check for a blocking zlp, handle it */
+ if ((s & (1 << NAK_OUT_PACKETS)) &&
+ net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
+ dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
+ /*
+ * Request is going to terminate with a short packet ...
+ * hope the client is ready for it!
+ */
+ status = net2272_read_fifo(ep, req);
+ /* clear short packet naking */
+ net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
+ goto done;
+ }
+ }
+
+ /* try dma first */
+ status = net2272_kick_dma(ep, req);
+
+ if (status < 0) {
+ /* dma failed (most likely in use by another endpoint)
+ * fallback to pio
+ */
+ status = 0;
+
+ if (ep->is_in)
+ status = net2272_write_fifo(ep, req);
+ else {
+ s = net2272_ep_read(ep, EP_STAT0);
+ if ((s & (1 << BUFFER_EMPTY)) == 0)
+ status = net2272_read_fifo(ep, req);
+ }
+
+ if (unlikely(status != 0)) {
+ if (status > 0)
+ status = 0;
+ req = NULL;
+ }
+ }
+ }
+ if (likely(req != 0))
+ list_add_tail(&req->queue, &ep->queue);
+
+ if (likely(!list_empty(&ep->queue)))
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+ done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/* dequeue ALL requests */
+static void
+net2272_dequeue_all(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+
+ /* called with spinlock held */
+ ep->stopped = 1;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request,
+ queue);
+ net2272_done(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* dequeue JUST ONE request */
+static int
+net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req;
+ unsigned long flags;
+ int stopped;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0) || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ stopped = ep->stopped;
+ ep->stopped = 1;
+
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+ if (&req->req != _req) {
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return -EINVAL;
+ }
+
+ /* queue head may be partially complete */
+ if (ep->queue.next == &req->queue) {
+ dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
+ net2272_done(ep, req, -ECONNRESET);
+ }
+ req = NULL;
+ ep->stopped = stopped;
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+ struct net2272_ep *ep;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -EINVAL;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+ if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ if (!list_empty(&ep->queue))
+ ret = -EAGAIN;
+ else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
+ ret = -EAGAIN;
+ else {
+ dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
+ value ? "set" : "clear",
+ wedged ? "wedge" : "halt");
+ /* set/clear */
+ if (value) {
+ if (ep->num == 0)
+ ep->dev->protocol_stall = 1;
+ else
+ set_halt(ep);
+ if (wedged)
+ ep->wedged = 1;
+ } else {
+ clear_halt(ep);
+ ep->wedged = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+ return ret;
+}
+
+static int
+net2272_set_halt(struct usb_ep *_ep, int value)
+{
+ return net2272_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int
+net2272_set_wedge(struct usb_ep *_ep)
+{
+ if (!_ep || _ep->name == ep0name)
+ return -EINVAL;
+ return net2272_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int
+net2272_fifo_status(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+ u16 avail;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -ENODEV;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
+ avail |= net2272_ep_read(ep, EP_AVAIL0);
+ if (avail > ep->fifo_size)
+ return -EOVERFLOW;
+ if (ep->is_in)
+ avail = ep->fifo_size - avail;
+ return avail;
+}
+
+static void
+net2272_fifo_flush(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return;
+
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static struct usb_ep_ops net2272_ep_ops = {
+ .enable = net2272_enable,
+ .disable = net2272_disable,
+
+ .alloc_request = net2272_alloc_request,
+ .free_request = net2272_free_request,
+
+ .queue = net2272_queue,
+ .dequeue = net2272_dequeue,
+
+ .set_halt = net2272_set_halt,
+ .set_wedge = net2272_set_wedge,
+ .fifo_status = net2272_fifo_status,
+ .fifo_flush = net2272_fifo_flush,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_get_frame(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ unsigned long flags;
+ u16 ret;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = net2272_read(dev, FRAME1) << 8;
+ ret |= net2272_read(dev, FRAME0);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int
+net2272_wakeup(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ u8 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = net2272_read(dev, USBCTL0);
+ if (tmp & (1 << IO_WAKEUP_ENABLE))
+ net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static int
+net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+ struct net2272 *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ dev->is_selfpowered = value;
+
+ return 0;
+}
+
+static int
+net2272_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct net2272 *dev;
+ u8 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return -ENODEV;</