2 * BRIEF MODULE DESCRIPTION
3 * Au1000 USB Device-Side (device layer)
5 * Copyright 2001-2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc.
7 * stevel@mvista.com or source@mvista.com
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/kernel.h>
30 #include <linux/ioport.h>
31 #include <linux/sched.h>
32 #include <linux/signal.h>
33 #include <linux/errno.h>
34 #include <linux/poll.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/fcntl.h>
38 #include <linux/module.h>
39 #include <linux/spinlock.h>
40 #include <linux/list.h>
41 #include <linux/smp_lock.h>
43 #include <linux/usb.h>
46 #include <asm/uaccess.h>
48 #include <asm/mipsregs.h>
49 #include <asm/au1000.h>
50 #include <asm/au1000_dma.h>
51 #include <asm/au1000_usbdev.h>
56 #define vdbg(fmt, arg...) printk(KERN_DEBUG __FILE__ ": " fmt "\n" , ## arg)
58 #define vdbg(fmt, arg...) do {} while (0)
61 #define vdbg(fmt, arg...) do {} while (0)
64 #define MAX(a,b) (((a)>(b))?(a):(b))
66 #define ALLOC_FLAGS (in_interrupt () ? GFP_ATOMIC : GFP_KERNEL)
68 #define EP_FIFO_DEPTH 8
81 int write_fifo_status;
92 struct usb_endpoint_descriptor *desc;
94 /* Only one of these are used, unless this is the control ep */
97 unsigned int indma, outdma; /* DMA channel numbers for IN, OUT */
98 /* following are extracted from endpoint descriptor for easy access */
102 /* WE assign endpoint addresses! */
108 static struct usb_dev {
110 ep0_stage_t ep0_stage;
112 struct usb_device_descriptor * dev_desc;
113 struct usb_interface_descriptor* if_desc;
114 struct usb_config_descriptor * conf_desc;
116 struct usb_string_descriptor * str_desc[6];
118 /* callback to function layer */
119 void (*func_cb)(usbdev_cb_type_t type, unsigned long arg,
123 usbdev_state_t state; // device state
124 int suspended; // suspended flag
125 int address; // device address
128 u8 alternate_setting;
129 u8 configuration; // configuration value
130 int remote_wakeup_en;
134 static endpoint_reg_t ep_reg[] = {
135 // FIFO's 0 and 1 are EP0 default control
136 {USBD_EP0RD, USBD_EP0WR, USBD_EP0CS, USBD_EP0RDSTAT, USBD_EP0WRSTAT },
139 { -1, USBD_EP2WR, USBD_EP2CS, -1, USBD_EP2WRSTAT },
141 { -1, USBD_EP3WR, USBD_EP3CS, -1, USBD_EP3WRSTAT },
142 // FIFO 4 is EP4, OUT
143 {USBD_EP4RD, -1, USBD_EP4CS, USBD_EP4RDSTAT, -1 },
144 // FIFO 5 is EP5, OUT
145 {USBD_EP5RD, -1, USBD_EP5CS, USBD_EP5RDSTAT, -1 }
152 { DMA_ID_USBDEV_EP0_TX, "USBDev EP0 IN" },
153 { DMA_ID_USBDEV_EP0_RX, "USBDev EP0 OUT" },
154 { DMA_ID_USBDEV_EP2_TX, "USBDev EP2 IN" },
155 { DMA_ID_USBDEV_EP3_TX, "USBDev EP3 IN" },
156 { DMA_ID_USBDEV_EP4_RX, "USBDev EP4 OUT" },
157 { DMA_ID_USBDEV_EP5_RX, "USBDev EP5 OUT" }
161 #define DIR_IN (1<<3)
163 #define CONTROL_EP USB_ENDPOINT_XFER_CONTROL
164 #define BULK_EP USB_ENDPOINT_XFER_BULK
166 static inline endpoint_t *
167 epaddr_to_ep(struct usb_dev* dev, int ep_addr)
169 if (ep_addr >= 0 && ep_addr < 2)
172 return &dev->ep[ep_addr];
176 static const char* std_req_name[] = {
192 static inline const char*
193 get_std_req_name(int req)
195 return (req >= 0 && req <= 12) ? std_req_name[req] : "UNKNOWN";
200 dump_setup(struct usb_ctrlrequest* s)
202 dbg("%s: requesttype=%d", __FUNCTION__, s->requesttype);
203 dbg("%s: request=%d %s", __FUNCTION__, s->request,
204 get_std_req_name(s->request));
205 dbg("%s: value=0x%04x", __FUNCTION__, s->wValue);
206 dbg("%s: index=%d", __FUNCTION__, s->index);
207 dbg("%s: length=%d", __FUNCTION__, s->length);
211 static inline usbdev_pkt_t *
212 alloc_packet(endpoint_t * ep, int data_size, void* data)
215 (usbdev_pkt_t *)kmalloc(sizeof(usbdev_pkt_t) + data_size,
219 pkt->ep_addr = ep->address;
220 pkt->size = data_size;
224 memcpy(pkt->payload, data, data_size);
231 * Link a packet to the tail of the enpoint's packet list.
232 * EP spinlock must be held when calling.
235 link_tail(endpoint_t * ep, pkt_list_t * list, usbdev_pkt_t * pkt)
238 list->head = list->tail = pkt;
241 list->tail->next = pkt;
248 * Unlink and return a packet from the head of the given packet
249 * list. It is the responsibility of the caller to free the packet.
250 * EP spinlock must be held when calling.
252 static usbdev_pkt_t *
253 unlink_head(pkt_list_t * list)
258 if (!pkt || !list->count) {
262 list->head = pkt->next;
264 list->head = list->tail = NULL;
273 * Create and attach a new packet to the tail of the enpoint's
274 * packet list. EP spinlock must be held when calling.
276 static usbdev_pkt_t *
277 add_packet(endpoint_t * ep, pkt_list_t * list, int size)
279 usbdev_pkt_t *pkt = alloc_packet(ep, size, NULL);
283 link_tail(ep, list, pkt);
289 * Unlink and free a packet from the head of the enpoint's
290 * packet list. EP spinlock must be held when calling.
293 free_packet(pkt_list_t * list)
295 kfree(unlink_head(list));
298 /* EP spinlock must be held when calling. */
300 flush_pkt_list(pkt_list_t * list)
306 /* EP spinlock must be held when calling */
308 flush_write_fifo(endpoint_t * ep)
310 if (ep->reg->write_fifo_status >= 0) {
311 au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
313 ep->reg->write_fifo_status);
315 //au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
316 // ep->reg->write_fifo_status);
320 /* EP spinlock must be held when calling */
322 flush_read_fifo(endpoint_t * ep)
324 if (ep->reg->read_fifo_status >= 0) {
325 au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
327 ep->reg->read_fifo_status);
329 //au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
330 // ep->reg->read_fifo_status);
335 /* EP spinlock must be held when calling. */
337 endpoint_flush(endpoint_t * ep)
339 // First, flush all packets
340 flush_pkt_list(&ep->inlist);
341 flush_pkt_list(&ep->outlist);
343 // Now flush the endpoint's h/w FIFO(s)
344 flush_write_fifo(ep);
348 /* EP spinlock must be held when calling. */
350 endpoint_stall(endpoint_t * ep)
356 cs = au_readl(ep->reg->ctrl_stat) | USBDEV_CS_STALL;
357 au_writel(cs, ep->reg->ctrl_stat);
360 /* EP spinlock must be held when calling. */
362 endpoint_unstall(endpoint_t * ep)
368 cs = au_readl(ep->reg->ctrl_stat) & ~USBDEV_CS_STALL;
369 au_writel(cs, ep->reg->ctrl_stat);
373 endpoint_reset_datatoggle(endpoint_t * ep)
375 // FIXME: is this possible?
379 /* EP spinlock must be held when calling. */
381 endpoint_fifo_read(endpoint_t * ep)
385 usbdev_pkt_t *pkt = ep->outlist.tail;
390 bufptr = &pkt->payload[pkt->size];
391 while (au_readl(ep->reg->read_fifo_status) & USBDEV_FSTAT_FCNT_MASK) {
392 *bufptr++ = au_readl(ep->reg->read_fifo) & 0xff;
401 /* EP spinlock must be held when calling. */
403 endpoint_fifo_write(endpoint_t * ep, int index)
407 usbdev_pkt_t *pkt = ep->inlist.head;
412 bufptr = &pkt->payload[index];
413 while ((au_readl(ep->reg->write_fifo_status) &
414 USBDEV_FSTAT_FCNT_MASK) < EP_FIFO_DEPTH) {
415 if (bufptr < pkt->payload + pkt->size) {
416 au_writel(*bufptr++, ep->reg->write_fifo);
428 * This routine is called to restart transmission of a packet.
429 * The endpoint's TSIZE must be set to the new packet's size,
430 * and DMA to the write FIFO needs to be restarted.
431 * EP spinlock must be held when calling.
434 kickstart_send_packet(endpoint_t * ep)
437 usbdev_pkt_t *pkt = ep->inlist.head;
439 vdbg("%s: ep%d, pkt=%p", __FUNCTION__, ep->address, pkt);
442 err("%s: head=NULL! list->count=%d", __FUNCTION__,
447 dma_cache_wback_inv((unsigned long)pkt->payload, pkt->size);
450 * make sure FIFO is empty
452 flush_write_fifo(ep);
454 cs = au_readl(ep->reg->ctrl_stat) & USBDEV_CS_STALL;
455 cs |= (pkt->size << USBDEV_CS_TSIZE_BIT);
456 au_writel(cs, ep->reg->ctrl_stat);
458 if (get_dma_active_buffer(ep->indma) == 1) {
459 set_dma_count1(ep->indma, pkt->size);
460 set_dma_addr1(ep->indma, virt_to_phys(pkt->payload));
461 enable_dma_buffer1(ep->indma); // reenable
463 set_dma_count0(ep->indma, pkt->size);
464 set_dma_addr0(ep->indma, virt_to_phys(pkt->payload));
465 enable_dma_buffer0(ep->indma); // reenable
467 if (dma_halted(ep->indma))
468 start_dma(ep->indma);
473 * This routine is called when a packet in the inlist has been
474 * completed. Frees the completed packet and starts sending the
475 * next. EP spinlock must be held when calling.
477 static usbdev_pkt_t *
478 send_packet_complete(endpoint_t * ep)
480 usbdev_pkt_t *pkt = unlink_head(&ep->inlist);
484 (au_readl(ep->reg->ctrl_stat) & USBDEV_CS_NAK) ?
485 PKT_STATUS_NAK : PKT_STATUS_ACK;
487 vdbg("%s: ep%d, %s pkt=%p, list count=%d", __FUNCTION__,
488 ep->address, (pkt->status & PKT_STATUS_NAK) ?
489 "NAK" : "ACK", pkt, ep->inlist.count);
493 * The write fifo should already be drained if things are
494 * working right, but flush it anyway just in case.
496 flush_write_fifo(ep);
498 // begin transmitting next packet in the inlist
499 if (ep->inlist.count) {
500 kickstart_send_packet(ep);
507 * Add a new packet to the tail of the given ep's packet
508 * inlist. The transmit complete interrupt frees packets from
509 * the head of this list. EP spinlock must be held when calling.
512 send_packet(struct usb_dev* dev, usbdev_pkt_t *pkt, int async)
517 if (!pkt || !(ep = epaddr_to_ep(dev, pkt->ep_addr)))
525 if (!async && list->count) {
527 flush_pkt_list(list);
530 link_tail(ep, list, pkt);
532 vdbg("%s: ep%d, pkt=%p, size=%d, list count=%d", __FUNCTION__,
533 ep->address, pkt, pkt->size, list->count);
535 if (list->count == 1) {
537 * if the packet count is one, it means the list was empty,
538 * and no more data will go out this ep until we kick-start
541 kickstart_send_packet(ep);
548 * This routine is called to restart reception of a packet.
549 * EP spinlock must be held when calling.
552 kickstart_receive_packet(endpoint_t * ep)
556 // get and link a new packet for next reception
557 if (!(pkt = add_packet(ep, &ep->outlist, ep->max_pkt_size))) {
558 err("%s: could not alloc new packet", __FUNCTION__);
562 if (get_dma_active_buffer(ep->outdma) == 1) {
563 clear_dma_done1(ep->outdma);
564 set_dma_count1(ep->outdma, ep->max_pkt_size);
565 set_dma_count0(ep->outdma, 0);
566 set_dma_addr1(ep->outdma, virt_to_phys(pkt->payload));
567 enable_dma_buffer1(ep->outdma); // reenable
569 clear_dma_done0(ep->outdma);
570 set_dma_count0(ep->outdma, ep->max_pkt_size);
571 set_dma_count1(ep->outdma, 0);
572 set_dma_addr0(ep->outdma, virt_to_phys(pkt->payload));
573 enable_dma_buffer0(ep->outdma); // reenable
575 if (dma_halted(ep->outdma))
576 start_dma(ep->outdma);
581 * This routine is called when a packet in the outlist has been
582 * completed (received) and we need to prepare for a new packet
583 * to be received. Halts DMA and computes the packet size from the
584 * remaining DMA counter. Then prepares a new packet for reception
585 * and restarts DMA. FIXME: what if another packet comes in
586 * on top of the completed packet? Counter would be wrong.
587 * EP spinlock must be held when calling.
589 static usbdev_pkt_t *
590 receive_packet_complete(endpoint_t * ep)
592 usbdev_pkt_t *pkt = ep->outlist.tail;
595 halt_dma(ep->outdma);
597 cs = au_readl(ep->reg->ctrl_stat);
602 pkt->size = ep->max_pkt_size - get_dma_residue(ep->outdma);
604 dma_cache_inv((unsigned long)pkt->payload, pkt->size);
606 * need to pull out any remaining bytes in the FIFO.
608 endpoint_fifo_read(ep);
610 * should be drained now, but flush anyway just in case.
614 pkt->status = (cs & USBDEV_CS_NAK) ? PKT_STATUS_NAK : PKT_STATUS_ACK;
615 if (ep->address == 0 && (cs & USBDEV_CS_SU))
616 pkt->status |= PKT_STATUS_SU;
618 vdbg("%s: ep%d, %s pkt=%p, size=%d", __FUNCTION__,
619 ep->address, (pkt->status & PKT_STATUS_NAK) ?
620 "NAK" : "ACK", pkt, pkt->size);
622 kickstart_receive_packet(ep);
629 ****************************************************************************
630 * Here starts the standard device request handlers. They are
631 * all called by do_setup() via a table of function pointers.
632 ****************************************************************************
636 do_get_status(struct usb_dev* dev, struct usb_ctrlrequest* setup)
638 switch (setup->bRequestType) {
640 // FIXME: send device status
642 case 0x81: // Interface
643 // FIXME: send interface status
645 case 0x82: // End Point
646 // FIXME: send endpoint status
650 endpoint_stall(&dev->ep[0]); // Stall End Point 0
658 do_clear_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
660 switch (setup->bRequestType) {
662 if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
663 dev->remote_wakeup_en = 0;
665 endpoint_stall(&dev->ep[0]);
667 case 0x02: // End Point
668 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
671 le16_to_cpu(setup->wIndex) & 0xff);
673 endpoint_unstall(ep);
674 endpoint_reset_datatoggle(ep);
676 endpoint_stall(&dev->ep[0]);
684 do_reserved(struct usb_dev* dev, struct usb_ctrlrequest* setup)
686 // Invalid request, stall End Point 0
687 endpoint_stall(&dev->ep[0]);
692 do_set_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
694 switch (setup->bRequestType) {
696 if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
697 dev->remote_wakeup_en = 1;
699 endpoint_stall(&dev->ep[0]);
701 case 0x02: // End Point
702 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
705 le16_to_cpu(setup->wIndex) & 0xff);
709 endpoint_stall(&dev->ep[0]);
717 do_set_address(struct usb_dev* dev, struct usb_ctrlrequest* setup)
719 int new_state = dev->state;
720 int new_addr = le16_to_cpu(setup->wValue);
722 dbg("%s: our address=%d", __FUNCTION__, new_addr);
724 if (new_addr > 127) {
725 // usb spec doesn't tell us what to do, so just go to
729 } else if (dev->address != new_addr) {
730 dev->address = new_addr;
734 if (dev->state != new_state) {
735 dev->state = new_state;
736 /* inform function layer of usbdev state change */
737 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
744 do_get_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
746 int strnum, desc_len = le16_to_cpu(setup->wLength);
748 switch (le16_to_cpu(setup->wValue) >> 8) {
750 // send device descriptor!
751 desc_len = desc_len > dev->dev_desc->bLength ?
752 dev->dev_desc->bLength : desc_len;
753 dbg("sending device desc, size=%d", desc_len);
754 send_packet(dev, alloc_packet(&dev->ep[0], desc_len,
758 // If the config descr index in low-byte of
759 // setup->wValue is valid, send config descr,
760 // otherwise stall ep0.
761 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
762 // send config descriptor!
763 if (desc_len <= USB_DT_CONFIG_SIZE) {
764 dbg("sending partial config desc, size=%d",
767 alloc_packet(&dev->ep[0],
772 int len = dev->conf_desc->wTotalLength;
773 dbg("sending whole config desc,"
774 " size=%d, our size=%d", desc_len, len);
775 desc_len = desc_len > len ? len : desc_len;
777 alloc_packet(&dev->ep[0],
779 dev->full_conf_desc),
783 endpoint_stall(&dev->ep[0]);
786 // If the string descr index in low-byte of setup->wValue
787 // is valid, send string descr, otherwise stall ep0.
788 strnum = le16_to_cpu(setup->wValue) & 0xff;
789 if (strnum >= 0 && strnum < 6) {
790 struct usb_string_descriptor *desc =
791 dev->str_desc[strnum];
792 desc_len = desc_len > desc->bLength ?
793 desc->bLength : desc_len;
794 dbg("sending string desc %d", strnum);
796 alloc_packet(&dev->ep[0], desc_len,
799 endpoint_stall(&dev->ep[0]);
803 err("invalid get desc=%d, stalled",
804 le16_to_cpu(setup->wValue) >> 8);
805 endpoint_stall(&dev->ep[0]); // Stall endpoint 0
813 do_set_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
816 // there will be an OUT data stage (the descriptor to set)
821 do_get_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
823 // send dev->configuration
824 dbg("sending config");
825 send_packet(dev, alloc_packet(&dev->ep[0], 1, &dev->configuration),
831 do_set_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
833 // set active config to low-byte of setup->wValue
834 dev->configuration = le16_to_cpu(setup->wValue) & 0xff;
835 dbg("set config, config=%d", dev->configuration);
836 if (!dev->configuration && dev->state > DEFAULT) {
837 dev->state = ADDRESS;
838 /* inform function layer of usbdev state change */
839 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
840 } else if (dev->configuration == 1) {
841 dev->state = CONFIGURED;
842 /* inform function layer of usbdev state change */
843 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
845 // FIXME: "respond with request error" - how?
852 do_get_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
854 // interface must be zero.
855 if ((le16_to_cpu(setup->wIndex) & 0xff) || dev->state == ADDRESS) {
856 // FIXME: respond with "request error". how?
857 } else if (dev->state == CONFIGURED) {
858 // send dev->alternate_setting
859 dbg("sending alt setting");
860 send_packet(dev, alloc_packet(&dev->ep[0], 1,
861 &dev->alternate_setting), 0);
869 do_set_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
871 if (dev->state == ADDRESS) {
872 // FIXME: respond with "request error". how?
873 } else if (dev->state == CONFIGURED) {
874 dev->interface = le16_to_cpu(setup->wIndex) & 0xff;
875 dev->alternate_setting =
876 le16_to_cpu(setup->wValue) & 0xff;
877 // interface and alternate_setting must be zero
878 if (dev->interface || dev->alternate_setting) {
879 // FIXME: respond with "request error". how?
887 do_synch_frame(struct usb_dev* dev, struct usb_ctrlrequest* setup)
893 typedef ep0_stage_t (*req_method_t)(struct usb_dev* dev,
894 struct usb_ctrlrequest* setup);
897 /* Table of the standard device request handlers */
898 static const req_method_t req_method[] = {
907 do_get_configuration,
908 do_set_configuration,
915 // SETUP packet request dispatcher
917 do_setup (struct usb_dev* dev, struct usb_ctrlrequest* setup)
921 dbg("%s: req %d %s", __FUNCTION__, setup->bRequestType,
922 get_std_req_name(setup->bRequestType));
924 if ((setup->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
925 (setup->bRequestType & USB_RECIP_MASK) != USB_RECIP_DEVICE) {
926 err("%s: invalid requesttype 0x%02x", __FUNCTION__,
927 setup->bRequestType);
931 if ((setup->bRequestType & 0x80) == USB_DIR_OUT && setup->wLength)
932 dbg("%s: OUT phase! length=%d", __FUNCTION__, setup->wLength);
934 if (setup->bRequestType < sizeof(req_method)/sizeof(req_method_t))
935 m = req_method[setup->bRequestType];
939 dev->ep0_stage = (*m)(dev, setup);
943 * A SETUP, DATA0, or DATA1 packet has been received
944 * on the default control endpoint's fifo.
947 process_ep0_receive (struct usb_dev* dev)
949 endpoint_t *ep0 = &dev->ep[0];
952 spin_lock(&ep0->lock);
954 // complete packet and prepare a new packet
955 pkt = receive_packet_complete(ep0);
957 // FIXME: should put a warn/err here.
958 spin_unlock(&ep0->lock);
962 // unlink immediately from endpoint.
963 unlink_head(&ep0->outlist);
965 // override current stage if h/w says it's a setup packet
966 if (pkt->status & PKT_STATUS_SU)
967 dev->ep0_stage = SETUP_STAGE;
969 switch (dev->ep0_stage) {
971 vdbg("SU bit is %s in setup stage",
972 (pkt->status & PKT_STATUS_SU) ? "set" : "not set");
974 if (pkt->size == sizeof(struct usb_ctrlrequest)) {
976 if (pkt->status & PKT_STATUS_ACK)
977 vdbg("received SETUP");
979 vdbg("received NAK SETUP");
981 do_setup(dev, (struct usb_ctrlrequest*)pkt->payload);
983 err("%s: wrong size SETUP received", __FUNCTION__);
987 * this setup has an OUT data stage. Of the standard
988 * device requests, only set_descriptor has this stage,
989 * so this packet is that descriptor. TODO: drop it for
990 * now, set_descriptor not implemented.
992 * Need to place a byte in the write FIFO here, to prepare
993 * to send a zero-length DATA ack packet to the host in the
996 au_writel(0, ep0->reg->write_fifo);
997 dbg("received OUT stage DATAx on EP0, size=%d", pkt->size);
998 dev->ep0_stage = SETUP_STAGE;
1001 // this setup had an IN data stage, and host is ACK'ing
1002 // the packet we sent during that stage.
1004 warn("received non-zero ACK on EP0??");
1007 vdbg("received ACK on EP0");
1009 dev->ep0_stage = SETUP_STAGE;
1013 spin_unlock(&ep0->lock);
1014 // we're done processing the packet, free it
1020 * A DATA0/1 packet has been received on one of the OUT endpoints (4 or 5)
1023 process_ep_receive (struct usb_dev* dev, endpoint_t *ep)
1027 spin_lock(&ep->lock);
1028 pkt = receive_packet_complete(ep);
1029 spin_unlock(&ep->lock);
1031 dev->func_cb(CB_PKT_COMPLETE, (unsigned long)pkt, dev->cb_data);
1036 /* This ISR handles the receive complete and suspend events */
1038 req_sus_intr (int irq, void *dev_id, struct pt_regs *regs)
1040 struct usb_dev *dev = (struct usb_dev *) dev_id;
1043 status = au_readl(USBD_INTSTAT);
1044 au_writel(status, USBD_INTSTAT); // ack'em
1046 if (status & (1<<0))
1047 process_ep0_receive(dev);
1048 if (status & (1<<4))
1049 process_ep_receive(dev, &dev->ep[4]);
1050 if (status & (1<<5))
1051 process_ep_receive(dev, &dev->ep[5]);
1055 /* This ISR handles the DMA done events on EP0 */
1057 dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs)
1059 struct usb_dev *dev = (struct usb_dev *) dev_id;
1061 endpoint_t *ep0 = &dev->ep[0];
1064 spin_lock(&ep0->lock);
1065 cs0 = au_readl(ep0->reg->ctrl_stat);
1067 // first check packet transmit done
1068 if ((buff_done = get_dma_buffer_done(ep0->indma)) != 0) {
1069 // transmitted a DATAx packet during DATA stage
1070 // on control endpoint 0
1071 // clear DMA done bit
1072 if (buff_done & DMA_D0)
1073 clear_dma_done0(ep0->indma);
1074 if (buff_done & DMA_D1)
1075 clear_dma_done1(ep0->indma);
1077 pkt = send_packet_complete(ep0);
1083 * Now check packet receive done. Shouldn't get these,
1084 * the receive packet complete intr should happen
1085 * before the DMA done intr occurs.
1087 if ((buff_done = get_dma_buffer_done(ep0->outdma)) != 0) {
1088 // clear DMA done bit
1089 if (buff_done & DMA_D0)
1090 clear_dma_done0(ep0->outdma);
1091 if (buff_done & DMA_D1)
1092 clear_dma_done1(ep0->outdma);
1094 //process_ep0_receive(dev);
1097 spin_unlock(&ep0->lock);
1100 /* This ISR handles the DMA done events on endpoints 2,3,4,5 */
1102 dma_done_ep_intr(int irq, void *dev_id, struct pt_regs *regs)
1104 struct usb_dev *dev = (struct usb_dev *) dev_id;
1107 for (i = 2; i < 6; i++) {
1110 endpoint_t *ep = &dev->ep[i];
1112 if (!ep->active) continue;
1114 spin_lock(&ep->lock);
1116 if (ep->direction == USB_DIR_IN) {
1117 buff_done = get_dma_buffer_done(ep->indma);
1118 if (buff_done != 0) {
1119 // transmitted a DATAx pkt on the IN ep
1120 // clear DMA done bit
1121 if (buff_done & DMA_D0)
1122 clear_dma_done0(ep->indma);
1123 if (buff_done & DMA_D1)
1124 clear_dma_done1(ep->indma);
1126 pkt = send_packet_complete(ep);
1128 spin_unlock(&ep->lock);
1129 dev->func_cb(CB_PKT_COMPLETE,
1132 spin_lock(&ep->lock);
1136 * Check packet receive done (OUT ep). Shouldn't get
1137 * these, the rx packet complete intr should happen
1138 * before the DMA done intr occurs.
1140 buff_done = get_dma_buffer_done(ep->outdma);
1141 if (buff_done != 0) {
1142 // received a DATAx pkt on the OUT ep
1143 // clear DMA done bit
1144 if (buff_done & DMA_D0)
1145 clear_dma_done0(ep->outdma);
1146 if (buff_done & DMA_D1)
1147 clear_dma_done1(ep->outdma);
1149 //process_ep_receive(dev, ep);
1153 spin_unlock(&ep->lock);
1158 /***************************************************************************
1159 * Here begins the external interface functions
1160 ***************************************************************************
1164 * allocate a new packet
1167 usbdev_alloc_packet(int ep_addr, int data_size, usbdev_pkt_t** pkt)
1169 endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1170 usbdev_pkt_t* lpkt = NULL;
1172 if (!ep || !ep->active || ep->address < 2)
1174 if (data_size > ep->max_pkt_size)
1177 lpkt = *pkt = alloc_packet(ep, data_size, NULL);
1188 usbdev_send_packet(int ep_addr, usbdev_pkt_t * pkt)
1190 unsigned long flags;
1194 if (!pkt || !(ep = epaddr_to_ep(&usbdev, pkt->ep_addr)) ||
1195 !ep->active || ep->address < 2)
1197 if (ep->direction != USB_DIR_IN)
1200 spin_lock_irqsave(&ep->lock, flags);
1201 count = send_packet(&usbdev, pkt, 1);
1202 spin_unlock_irqrestore(&ep->lock, flags);
1211 usbdev_receive_packet(int ep_addr, usbdev_pkt_t** pkt)
1213 unsigned long flags;
1214 usbdev_pkt_t* lpkt = NULL;
1215 endpoint_t *ep = epaddr_to_ep(&usbdev, ep_addr);
1217 if (!ep || !ep->active || ep->address < 2)
1219 if (ep->direction != USB_DIR_OUT)
1222 spin_lock_irqsave(&ep->lock, flags);
1223 if (ep->outlist.count > 1)
1224 lpkt = unlink_head(&ep->outlist);
1225 spin_unlock_irqrestore(&ep->lock, flags);
1228 /* no packet available */
1240 * return total queued byte count on the endpoint.
1243 usbdev_get_byte_count(int ep_addr)
1245 unsigned long flags;
1249 endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1251 if (!ep || !ep->active || ep->address < 2)
1254 if (ep->direction == USB_DIR_IN) {
1257 spin_lock_irqsave(&ep->lock, flags);
1258 for (scan = list->head; scan; scan = scan->next)
1259 count += scan->size;
1260 spin_unlock_irqrestore(&ep->lock, flags);
1262 list = &ep->outlist;
1264 spin_lock_irqsave(&ep->lock, flags);
1265 if (list->count > 1) {
1266 for (scan = list->head; scan != list->tail;
1268 count += scan->size;
1270 spin_unlock_irqrestore(&ep->lock, flags);
1283 au_writel(0, USBD_INTEN); // disable usb dev ints
1284 au_writel(0, USBD_ENABLE); // disable usb dev
1286 free_irq(AU1000_USB_DEV_REQ_INT, &usbdev);
1287 free_irq(AU1000_USB_DEV_SUS_INT, &usbdev);
1289 // free all control endpoint resources
1291 free_au1000_dma(ep->indma);
1292 free_au1000_dma(ep->outdma);
1295 // free ep resources
1296 for (i = 2; i < 6; i++) {
1298 if (!ep->active) continue;
1300 if (ep->direction == USB_DIR_IN) {
1301 free_au1000_dma(ep->indma);
1303 free_au1000_dma(ep->outdma);
1308 if (usbdev.full_conf_desc)
1309 kfree(usbdev.full_conf_desc);
1313 usbdev_init(struct usb_device_descriptor* dev_desc,
1314 struct usb_config_descriptor* config_desc,
1315 struct usb_interface_descriptor* if_desc,
1316 struct usb_endpoint_descriptor* ep_desc,
1317 struct usb_string_descriptor* str_desc[],
1318 void (*cb)(usbdev_cb_type_t, unsigned long, void *),
1325 if (dev_desc->bNumConfigurations > 1 ||
1326 config_desc->bNumInterfaces > 1 ||
1327 if_desc->bNumEndpoints > 4) {
1328 err("Only one config, one i/f, and no more "
1329 "than 4 ep's allowed");
1335 err("Function-layer callback required");
1340 if (dev_desc->bMaxPacketSize0 != USBDEV_EP0_MAX_PACKET_SIZE) {
1341 warn("EP0 Max Packet size must be %d",
1342 USBDEV_EP0_MAX_PACKET_SIZE);
1343 dev_desc->bMaxPacketSize0 = USBDEV_EP0_MAX_PACKET_SIZE;
1346 memset(&usbdev, 0, sizeof(struct usb_dev));
1348 usbdev.state = DEFAULT;
1349 usbdev.dev_desc = dev_desc;
1350 usbdev.if_desc = if_desc;
1351 usbdev.conf_desc = config_desc;
1353 usbdev.str_desc[i] = str_desc[i];
1354 usbdev.func_cb = cb;
1355 usbdev.cb_data = cb_data;
1357 /* Initialize default control endpoint */
1358 ep0 = &usbdev.ep[0];
1360 ep0->type = CONTROL_EP;
1361 ep0->max_pkt_size = USBDEV_EP0_MAX_PACKET_SIZE;
1362 spin_lock_init(&ep0->lock);
1363 ep0->desc = NULL; // ep0 has no descriptor
1366 ep0->reg = &ep_reg[0];
1368 /* Initialize the other requested endpoints */
1369 for (i = 0; i < if_desc->bNumEndpoints; i++) {
1370 struct usb_endpoint_descriptor* epd = &ep_desc[i];
1373 if ((epd->bEndpointAddress & 0x80) == USB_DIR_IN) {
1380 err("too many IN ep's requested");
1392 err("too many OUT ep's requested");
1400 epd->bEndpointAddress &= ~0x0f;
1401 epd->bEndpointAddress |= (u8)ep->address;
1402 ep->direction = epd->bEndpointAddress & 0x80;
1403 ep->type = epd->bmAttributes & 0x03;
1404 ep->max_pkt_size = epd->wMaxPacketSize;
1405 spin_lock_init(&ep->lock);
1407 ep->reg = &ep_reg[ep->address];
1411 * initialize the full config descriptor
1413 usbdev.full_conf_desc = fcd = kmalloc(config_desc->wTotalLength,
1416 err("failed to alloc full config descriptor");
1421 memcpy(fcd, config_desc, USB_DT_CONFIG_SIZE);
1422 fcd += USB_DT_CONFIG_SIZE;
1423 memcpy(fcd, if_desc, USB_DT_INTERFACE_SIZE);
1424 fcd += USB_DT_INTERFACE_SIZE;
1425 for (i = 0; i < if_desc->bNumEndpoints; i++) {
1426 memcpy(fcd, &ep_desc[i], USB_DT_ENDPOINT_SIZE);
1427 fcd += USB_DT_ENDPOINT_SIZE;
1430 /* Now we're ready to enable the controller */
1431 au_writel(0x0002, USBD_ENABLE);
1433 au_writel(0x0003, USBD_ENABLE);
1436 /* build and send config table based on ep descriptors */
1437 for (i = 0; i < 6; i++) {
1440 continue; // skip dummy ep
1443 au_writel((ep->address << 4) | 0x04, USBD_CONFIG);
1444 au_writel(((ep->max_pkt_size & 0x380) >> 7) |
1445 (ep->direction >> 4) | (ep->type << 4),
1447 au_writel((ep->max_pkt_size & 0x7f) << 1, USBD_CONFIG);
1448 au_writel(0x00, USBD_CONFIG);
1449 au_writel(ep->address, USBD_CONFIG);
1451 u8 dir = (i==2 || i==3) ? DIR_IN : DIR_OUT;
1452 au_writel((i << 4) | 0x04, USBD_CONFIG);
1453 au_writel(((16 & 0x380) >> 7) | dir |
1454 (BULK_EP << 4), USBD_CONFIG);
1455 au_writel((16 & 0x7f) << 1, USBD_CONFIG);
1456 au_writel(0x00, USBD_CONFIG);
1457 au_writel(i, USBD_CONFIG);
1462 * Enable Receive FIFO Complete interrupts only. Transmit
1463 * complete is being handled by the DMA done interrupts.
1465 au_writel(0x31, USBD_INTEN);
1468 * Controller is now enabled, request DMA and IRQ
1472 /* request the USB device transfer complete interrupt */
1473 if (request_irq(AU1000_USB_DEV_REQ_INT, req_sus_intr, SA_INTERRUPT,
1474 "USBdev req", &usbdev)) {
1475 err("Can't get device request intr");
1479 /* request the USB device suspend interrupt */
1480 if (request_irq(AU1000_USB_DEV_SUS_INT, req_sus_intr, SA_INTERRUPT,
1481 "USBdev sus", &usbdev)) {
1482 err("Can't get device suspend intr");
1487 /* Request EP0 DMA and IRQ */
1488 if ((ep0->indma = request_au1000_dma(ep_dma_id[0].id,
1493 err("Can't get %s DMA", ep_dma_id[0].str);
1497 if ((ep0->outdma = request_au1000_dma(ep_dma_id[1].id,
1499 NULL, 0, NULL)) < 0) {
1500 err("Can't get %s DMA", ep_dma_id[1].str);
1505 // Flush the ep0 buffers and FIFOs
1506 endpoint_flush(ep0);
1507 // start packet reception on ep0
1508 kickstart_receive_packet(ep0);
1510 /* Request DMA and IRQ for the other endpoints */
1511 for (i = 2; i < 6; i++) {
1512 endpoint_t *ep = &usbdev.ep[i];
1516 // Flush the endpoint buffers and FIFOs
1519 if (ep->direction == USB_DIR_IN) {
1521 request_au1000_dma(ep_dma_id[ep->address].id,
1522 ep_dma_id[ep->address].str,
1526 if (ep->indma < 0) {
1527 err("Can't get %s DMA",
1528 ep_dma_id[ep->address].str);
1534 request_au1000_dma(ep_dma_id[ep->address].id,
1535 ep_dma_id[ep->address].str,
1537 if (ep->outdma < 0) {
1538 err("Can't get %s DMA",
1539 ep_dma_id[ep->address].str);
1544 // start packet reception on OUT endpoint
1545 kickstart_receive_packet(ep);
1555 EXPORT_SYMBOL(usbdev_init);
1556 EXPORT_SYMBOL(usbdev_exit);
1557 EXPORT_SYMBOL(usbdev_alloc_packet);
1558 EXPORT_SYMBOL(usbdev_receive_packet);
1559 EXPORT_SYMBOL(usbdev_send_packet);
1560 EXPORT_SYMBOL(usbdev_get_byte_count);