2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
17 * Intel documents this fairly well, and as far as I know there
18 * are no royalties or anything like that, but even so there are
19 * people who decided that they want to do the same thing in a
20 * completely different way.
22 * WARNING! The USB documentation is downright evil. Most of it
23 * is just crap, written by a committee. You're better off ignoring
24 * most of it, the important stuff is:
25 * - the low-level protocol (fairly simple but lots of small details)
26 * - working around the horridness of the rest
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/ioport.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/smp_lock.h>
39 #include <linux/errno.h>
40 #include <linux/unistd.h>
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/proc_fs.h>
44 #ifdef CONFIG_USB_DEBUG
49 #include <linux/usb.h>
51 #include <asm/uaccess.h>
54 #include <asm/system.h>
65 #define DRIVER_VERSION "v1.1"
66 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber"
67 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
70 * debug = 0, no debugging messages
71 * debug = 1, dump failed URB's except for stalls
72 * debug = 2, dump all failed URB's (including stalls)
73 * show all queues in /proc/uhci/hc*
74 * debug = 3, show all TD's in URB's when dumping
81 MODULE_PARM(debug, "i");
82 MODULE_PARM_DESC(debug, "Debug level");
84 #define ERRBUF_LEN (PAGE_SIZE * 8)
86 #include "uhci-debug.h"
88 static kmem_cache_t *uhci_up_cachep; /* urb_priv */
90 static int rh_submit_urb(struct urb *urb);
91 static int rh_unlink_urb(struct urb *urb);
92 static int uhci_get_current_frame_number(struct usb_device *dev);
93 static int uhci_unlink_urb(struct urb *urb);
94 static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb);
95 static void uhci_call_completion(struct urb *urb);
97 static int ports_active(struct uhci *uhci);
98 static void suspend_hc(struct uhci *uhci);
99 static void wakeup_hc(struct uhci *uhci);
101 /* If a transfer is still active after this much time, turn off FSBR */
102 #define IDLE_TIMEOUT (HZ / 20) /* 50 ms */
103 #define FSBR_DELAY (HZ / 20) /* 50 ms */
105 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
106 /* depth first traversal. We'll do it in groups of this number of TD's */
107 /* to make sure it doesn't hog all of the bandwidth */
108 #define DEPTH_INTERVAL 5
110 #define MAX_URB_LOOP 2048 /* Maximum number of linked URB's */
113 * Only the USB core should call uhci_alloc_dev and uhci_free_dev
115 static int uhci_alloc_dev(struct usb_device *dev)
120 static int uhci_free_dev(struct usb_device *dev)
126 * Technically, updating td->status here is a race, but it's not really a
127 * problem. The worst that can happen is that we set the IOC bit again
128 * generating a spurios interrupt. We could fix this by creating another
129 * QH and leaving the IOC bit always set, but then we would have to play
130 * games with the FSBR code to make sure we get the correct order in all
131 * the cases. I don't think it's worth the effort
133 static inline void uhci_set_next_interrupt(struct uhci *uhci)
137 spin_lock_irqsave(&uhci->frame_list_lock, flags);
138 uhci->skel_term_td->status |= TD_CTRL_IOC;
139 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
142 static inline void uhci_clear_next_interrupt(struct uhci *uhci)
146 spin_lock_irqsave(&uhci->frame_list_lock, flags);
147 uhci->skel_term_td->status &= ~TD_CTRL_IOC;
148 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
151 static inline void uhci_add_complete(struct urb *urb)
153 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
154 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
157 spin_lock_irqsave(&uhci->complete_list_lock, flags);
158 list_add_tail(&urbp->complete_list, &uhci->complete_list);
159 spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
162 static struct uhci_td *uhci_alloc_td(struct uhci *uhci, struct usb_device *dev)
164 dma_addr_t dma_handle;
167 td = pci_pool_alloc(uhci->td_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
171 td->dma_handle = dma_handle;
173 td->link = UHCI_PTR_TERM;
179 INIT_LIST_HEAD(&td->list);
180 INIT_LIST_HEAD(&td->fl_list);
182 usb_inc_dev_use(dev);
187 static void inline uhci_fill_td(struct uhci_td *td, __u32 status,
188 __u32 info, __u32 buffer)
195 static void uhci_insert_td(struct uhci *uhci, struct uhci_td *skeltd, struct uhci_td *td)
200 spin_lock_irqsave(&uhci->frame_list_lock, flags);
202 ltd = list_entry(skeltd->fl_list.prev, struct uhci_td, fl_list);
204 td->link = ltd->link;
206 ltd->link = td->dma_handle;
208 list_add_tail(&td->fl_list, &skeltd->fl_list);
210 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
214 * We insert Isochronous transfers directly into the frame list at the
216 * The layout looks as follows:
217 * frame list pointer -> iso td's (if any) ->
218 * periodic interrupt td (if frame 0) -> irq td's -> control qh -> bulk qh
220 static void uhci_insert_td_frame_list(struct uhci *uhci, struct uhci_td *td, unsigned framenum)
224 framenum %= UHCI_NUMFRAMES;
226 spin_lock_irqsave(&uhci->frame_list_lock, flags);
228 td->frame = framenum;
230 /* Is there a TD already mapped there? */
231 if (uhci->fl->frame_cpu[framenum]) {
232 struct uhci_td *ftd, *ltd;
234 ftd = uhci->fl->frame_cpu[framenum];
235 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
237 list_add_tail(&td->fl_list, &ftd->fl_list);
239 td->link = ltd->link;
241 ltd->link = td->dma_handle;
243 td->link = uhci->fl->frame[framenum];
245 uhci->fl->frame[framenum] = td->dma_handle;
246 uhci->fl->frame_cpu[framenum] = td;
249 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
252 static void uhci_remove_td(struct uhci *uhci, struct uhci_td *td)
256 /* If it's not inserted, don't remove it */
257 spin_lock_irqsave(&uhci->frame_list_lock, flags);
258 if (td->frame == -1 && list_empty(&td->fl_list))
261 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
262 if (list_empty(&td->fl_list)) {
263 uhci->fl->frame[td->frame] = td->link;
264 uhci->fl->frame_cpu[td->frame] = NULL;
268 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
269 uhci->fl->frame[td->frame] = ntd->dma_handle;
270 uhci->fl->frame_cpu[td->frame] = ntd;
275 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
276 ptd->link = td->link;
280 td->link = UHCI_PTR_TERM;
282 list_del_init(&td->fl_list);
286 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
290 * Inserts a td into qh list at the top.
292 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, int breadth)
294 struct list_head *tmp, *head;
295 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
296 struct uhci_td *td, *ptd;
298 if (list_empty(&urbp->td_list))
301 head = &urbp->td_list;
304 /* Ordering isn't important here yet since the QH hasn't been */
305 /* inserted into the schedule yet */
306 td = list_entry(tmp, struct uhci_td, list);
308 /* Add the first TD to the QH element pointer */
309 qh->element = td->dma_handle | (breadth ? 0 : UHCI_PTR_DEPTH);
313 /* Then link the rest of the TD's */
315 while (tmp != head) {
316 td = list_entry(tmp, struct uhci_td, list);
320 ptd->link = td->dma_handle | (breadth ? 0 : UHCI_PTR_DEPTH);
325 ptd->link = UHCI_PTR_TERM;
328 static void uhci_free_td(struct uhci *uhci, struct uhci_td *td)
330 if (!list_empty(&td->list) || !list_empty(&td->fl_list))
331 dbg("td is still in URB list!");
334 usb_dec_dev_use(td->dev);
336 pci_pool_free(uhci->td_pool, td, td->dma_handle);
339 static struct uhci_qh *uhci_alloc_qh(struct uhci *uhci, struct usb_device *dev)
341 dma_addr_t dma_handle;
344 qh = pci_pool_alloc(uhci->qh_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
348 qh->dma_handle = dma_handle;
350 qh->element = UHCI_PTR_TERM;
351 qh->link = UHCI_PTR_TERM;
356 INIT_LIST_HEAD(&qh->list);
357 INIT_LIST_HEAD(&qh->remove_list);
359 usb_inc_dev_use(dev);
364 static void uhci_free_qh(struct uhci *uhci, struct uhci_qh *qh)
366 if (!list_empty(&qh->list))
367 dbg("qh list not empty!");
368 if (!list_empty(&qh->remove_list))
369 dbg("qh still in remove_list!");
372 usb_dec_dev_use(qh->dev);
374 pci_pool_free(uhci->qh_pool, qh, qh->dma_handle);
378 * MUST be called with uhci->frame_list_lock acquired
380 static void _uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct urb *urb)
382 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
383 struct list_head *head, *tmp;
386 /* Grab the last QH */
387 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
390 head = &lqh->urbp->queue_list;
392 while (head != tmp) {
393 struct urb_priv *turbp =
394 list_entry(tmp, struct urb_priv, queue_list);
398 turbp->qh->link = urbp->qh->dma_handle | UHCI_PTR_QH;
402 head = &urbp->queue_list;
404 while (head != tmp) {
405 struct urb_priv *turbp =
406 list_entry(tmp, struct urb_priv, queue_list);
410 turbp->qh->link = lqh->link;
413 urbp->qh->link = lqh->link;
414 mb(); /* Ordering is important */
415 lqh->link = urbp->qh->dma_handle | UHCI_PTR_QH;
417 list_add_tail(&urbp->qh->list, &skelqh->list);
420 static void uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct urb *urb)
424 spin_lock_irqsave(&uhci->frame_list_lock, flags);
425 _uhci_insert_qh(uhci, skelqh, urb);
426 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
429 static void uhci_remove_qh(struct uhci *uhci, struct uhci_qh *qh)
439 /* Only go through the hoops if it's actually linked in */
440 spin_lock_irqsave(&uhci->frame_list_lock, flags);
441 if (!list_empty(&qh->list)) {
442 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
445 struct list_head *head, *tmp;
447 head = &pqh->urbp->queue_list;
449 while (head != tmp) {
450 struct urb_priv *turbp =
451 list_entry(tmp, struct urb_priv, queue_list);
455 turbp->qh->link = qh->link;
459 pqh->link = qh->link;
461 qh->element = qh->link = UHCI_PTR_TERM;
463 list_del_init(&qh->list);
465 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
467 spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
469 /* Check to see if the remove list is empty. Set the IOC bit */
470 /* to force an interrupt so we can remove the QH */
471 if (list_empty(&uhci->qh_remove_list))
472 uhci_set_next_interrupt(uhci);
474 list_add(&qh->remove_list, &uhci->qh_remove_list);
476 spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
479 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
481 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
482 struct list_head *head, *tmp;
484 head = &urbp->td_list;
486 while (head != tmp) {
487 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
492 td->info |= TD_TOKEN_TOGGLE;
494 td->info &= ~TD_TOKEN_TOGGLE;
502 /* This function will append one URB's QH to another URB's QH. This is for */
503 /* USB_QUEUE_BULK support for bulk transfers and soon implicitily for */
504 /* control transfers */
505 static void uhci_append_queued_urb(struct uhci *uhci, struct urb *eurb, struct urb *urb)
507 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
508 struct list_head *tmp;
509 struct uhci_td *lltd;
512 eurbp = eurb->hcpriv;
515 spin_lock_irqsave(&uhci->frame_list_lock, flags);
517 /* Find the first URB in the queue */
519 struct list_head *head = &eurbp->queue_list;
522 while (tmp != head) {
523 struct urb_priv *turbp =
524 list_entry(tmp, struct urb_priv, queue_list);
532 tmp = &eurbp->queue_list;
534 furbp = list_entry(tmp, struct urb_priv, queue_list);
535 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
537 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
539 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe),
540 uhci_fixup_toggle(urb, uhci_toggle(lltd->info) ^ 1));
542 /* All qh's in the queue need to link to the next queue */
543 urbp->qh->link = eurbp->qh->link;
545 mb(); /* Make sure we flush everything */
546 /* Only support bulk right now, so no depth */
547 lltd->link = urbp->qh->dma_handle | UHCI_PTR_QH;
549 list_add_tail(&urbp->queue_list, &furbp->queue_list);
553 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
556 static void uhci_delete_queued_urb(struct uhci *uhci, struct urb *urb)
558 struct urb_priv *urbp, *nurbp;
559 struct list_head *head, *tmp;
560 struct urb_priv *purbp;
561 struct uhci_td *pltd;
567 spin_lock_irqsave(&uhci->frame_list_lock, flags);
569 if (list_empty(&urbp->queue_list))
572 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
574 /* Fix up the toggle for the next URB's */
576 /* We set the toggle when we unlink */
577 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
579 /* If we're in the middle of the queue, grab the toggle */
580 /* from the TD previous to us */
581 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
584 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
586 toggle = uhci_toggle(pltd->info) ^ 1;
589 head = &urbp->queue_list;
591 while (head != tmp) {
592 struct urb_priv *turbp;
594 turbp = list_entry(tmp, struct urb_priv, queue_list);
601 toggle = uhci_fixup_toggle(turbp->urb, toggle);
604 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
605 usb_pipeout(urb->pipe), toggle);
610 _uhci_insert_qh(uhci, uhci->skel_bulk_qh, nurbp->urb);
612 /* We're somewhere in the middle (or end). A bit trickier */
613 /* than the head scenario */
614 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
617 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
619 pltd->link = nurbp->qh->dma_handle | UHCI_PTR_QH;
621 /* The next URB happens to be the beginning, so */
622 /* we're the last, end the chain */
623 pltd->link = UHCI_PTR_TERM;
626 list_del_init(&urbp->queue_list);
629 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
632 static struct urb_priv *uhci_alloc_urb_priv(struct uhci *uhci, struct urb *urb)
634 struct urb_priv *urbp;
636 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
638 err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
642 memset((void *)urbp, 0, sizeof(*urbp));
644 urbp->inserttime = jiffies;
645 urbp->fsbrtime = jiffies;
647 urbp->dev = urb->dev;
649 INIT_LIST_HEAD(&urbp->td_list);
650 INIT_LIST_HEAD(&urbp->queue_list);
651 INIT_LIST_HEAD(&urbp->complete_list);
655 if (urb->dev != uhci->rh.dev) {
656 if (urb->transfer_buffer_length) {
657 urbp->transfer_buffer_dma_handle = pci_map_single(uhci->dev,
658 urb->transfer_buffer, urb->transfer_buffer_length,
659 usb_pipein(urb->pipe) ? PCI_DMA_FROMDEVICE :
661 if (!urbp->transfer_buffer_dma_handle)
665 if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet) {
666 urbp->setup_packet_dma_handle = pci_map_single(uhci->dev,
667 urb->setup_packet, sizeof(struct usb_ctrlrequest),
669 if (!urbp->setup_packet_dma_handle)
678 * MUST be called with urb->lock acquired
680 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
682 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
686 list_add_tail(&td->list, &urbp->td_list);
690 * MUST be called with urb->lock acquired
692 static void uhci_remove_td_from_urb(struct uhci_td *td)
694 if (list_empty(&td->list))
697 list_del_init(&td->list);
703 * MUST be called with urb->lock acquired
705 static void uhci_destroy_urb_priv(struct urb *urb)
707 struct list_head *head, *tmp;
708 struct urb_priv *urbp;
711 urbp = (struct urb_priv *)urb->hcpriv;
715 if (!urbp->dev || !urbp->dev->bus || !urbp->dev->bus->hcpriv) {
716 warn("uhci_destroy_urb_priv: urb %p belongs to disconnected device or bus?", urb);
720 if (!list_empty(&urb->urb_list))
721 warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb);
723 if (!list_empty(&urbp->complete_list))
724 warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb);
726 uhci = urbp->dev->bus->hcpriv;
728 head = &urbp->td_list;
730 while (tmp != head) {
731 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
735 uhci_remove_td_from_urb(td);
736 uhci_remove_td(uhci, td);
737 uhci_free_td(uhci, td);
740 if (urbp->setup_packet_dma_handle) {
741 pci_unmap_single(uhci->dev, urbp->setup_packet_dma_handle,
742 sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
743 urbp->setup_packet_dma_handle = 0;
746 if (urbp->transfer_buffer_dma_handle) {
747 pci_unmap_single(uhci->dev, urbp->transfer_buffer_dma_handle,
748 urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
749 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
750 urbp->transfer_buffer_dma_handle = 0;
754 kmem_cache_free(uhci_up_cachep, urbp);
757 static void uhci_inc_fsbr(struct uhci *uhci, struct urb *urb)
760 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
762 spin_lock_irqsave(&uhci->frame_list_lock, flags);
764 if ((!(urb->transfer_flags & USB_NO_FSBR)) && !urbp->fsbr) {
766 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
767 uhci->skel_term_qh->link = uhci->skel_hs_control_qh->dma_handle | UHCI_PTR_QH;
770 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
773 static void uhci_dec_fsbr(struct uhci *uhci, struct urb *urb)
776 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
778 spin_lock_irqsave(&uhci->frame_list_lock, flags);
780 if ((!(urb->transfer_flags & USB_NO_FSBR)) && urbp->fsbr) {
783 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
786 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
790 * Map status to standard result codes
792 * <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)]
793 * <dir_out> is True for output TDs and False for input TDs.
795 static int uhci_map_status(int status, int dir_out)
799 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
801 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
807 if (status & TD_CTRL_NAK) /* NAK */
809 if (status & TD_CTRL_BABBLE) /* Babble */
811 if (status & TD_CTRL_DBUFERR) /* Buffer error */
813 if (status & TD_CTRL_STALLED) /* Stalled */
815 if (status & TD_CTRL_ACTIVE) /* Active */
824 static int uhci_submit_control(struct urb *urb)
826 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
827 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
830 unsigned long destination, status;
831 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
832 int len = urb->transfer_buffer_length;
833 dma_addr_t data = urbp->transfer_buffer_dma_handle;
835 /* The "pipe" thing contains the destination in bits 8--18 */
836 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
839 status = (urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | (3 << 27);
842 * Build the TD for the control request
844 td = uhci_alloc_td(uhci, urb->dev);
848 uhci_add_td_to_urb(urb, td);
849 uhci_fill_td(td, status, destination | (7 << 21),
850 urbp->setup_packet_dma_handle);
853 * If direction is "send", change the frame from SETUP (0x2D)
854 * to OUT (0xE1). Else change it from SETUP to IN (0x69).
856 destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe));
858 if (!(urb->transfer_flags & USB_DISABLE_SPD))
859 status |= TD_CTRL_SPD;
862 * Build the DATA TD's
870 td = uhci_alloc_td(uhci, urb->dev);
874 /* Alternate Data0/1 (start with Data1) */
875 destination ^= TD_TOKEN_TOGGLE;
877 uhci_add_td_to_urb(urb, td);
878 uhci_fill_td(td, status, destination | ((pktsze - 1) << 21),
886 * Build the final TD for control status
888 td = uhci_alloc_td(uhci, urb->dev);
893 * It's IN if the pipe is an output pipe or we're not expecting
896 destination &= ~TD_TOKEN_PID_MASK;
897 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
898 destination |= USB_PID_IN;
900 destination |= USB_PID_OUT;
902 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
904 status &= ~TD_CTRL_SPD;
906 uhci_add_td_to_urb(urb, td);
907 uhci_fill_td(td, status | TD_CTRL_IOC,
908 destination | (UHCI_NULL_DATA_SIZE << 21), 0);
910 qh = uhci_alloc_qh(uhci, urb->dev);
917 /* Low speed or small transfers gets a different queue and treatment */
918 if (urb->pipe & TD_CTRL_LS) {
919 uhci_insert_tds_in_qh(qh, urb, 0);
920 uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
922 uhci_insert_tds_in_qh(qh, urb, 1);
923 uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
924 uhci_inc_fsbr(uhci, urb);
930 static int usb_control_retrigger_status(struct urb *urb);
932 static int uhci_result_control(struct urb *urb)
934 struct list_head *tmp, *head;
935 struct urb_priv *urbp = urb->hcpriv;
940 if (list_empty(&urbp->td_list))
943 head = &urbp->td_list;
945 if (urbp->short_control_packet) {
951 td = list_entry(tmp, struct uhci_td, list);
953 /* The first TD is the SETUP phase, check the status, but skip */
955 status = uhci_status_bits(td->status);
956 if (status & TD_CTRL_ACTIVE)
962 urb->actual_length = 0;
964 /* The rest of the TD's (but the last) are data */
966 while (tmp != head && tmp->next != head) {
967 td = list_entry(tmp, struct uhci_td, list);
971 status = uhci_status_bits(td->status);
972 if (status & TD_CTRL_ACTIVE)
975 urb->actual_length += uhci_actual_length(td->status);
980 /* Check to see if we received a short packet */
981 if (uhci_actual_length(td->status) < uhci_expected_length(td->info)) {
982 if (urb->transfer_flags & USB_DISABLE_SPD) {
987 if (uhci_packetid(td->info) == USB_PID_IN)
988 return usb_control_retrigger_status(urb);
995 td = list_entry(tmp, struct uhci_td, list);
997 /* Control status phase */
998 status = uhci_status_bits(td->status);
1000 #ifdef I_HAVE_BUGGY_APC_BACKUPS
1001 /* APC BackUPS Pro kludge */
1002 /* It tries to send all of the descriptor instead of the amount */
1004 if (td->status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
1005 status & TD_CTRL_ACTIVE &&
1006 status & TD_CTRL_NAK)
1010 if (status & TD_CTRL_ACTIVE)
1011 return -EINPROGRESS;
1019 ret = uhci_map_status(status, uhci_packetout(td->info));
1021 /* endpoint has stalled - mark it halted */
1022 usb_endpoint_halt(urb->dev, uhci_endpoint(td->info),
1023 uhci_packetout(td->info));
1026 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1027 /* Some debugging code */
1028 dbg("uhci_result_control() failed with status %x", status);
1031 /* Print the chain for debugging purposes */
1032 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1041 static int usb_control_retrigger_status(struct urb *urb)
1043 struct list_head *tmp, *head;
1044 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1045 struct uhci *uhci = urb->dev->bus->hcpriv;
1047 urbp->short_control_packet = 1;
1049 /* Create a new QH to avoid pointer overwriting problems */
1050 uhci_remove_qh(uhci, urbp->qh);
1052 /* Delete all of the TD's except for the status TD at the end */
1053 head = &urbp->td_list;
1055 while (tmp != head && tmp->next != head) {
1056 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1060 uhci_remove_td_from_urb(td);
1061 uhci_remove_td(uhci, td);
1062 uhci_free_td(uhci, td);
1065 urbp->qh = uhci_alloc_qh(uhci, urb->dev);
1067 err("unable to allocate new QH for control retrigger");
1071 urbp->qh->urbp = urbp;
1073 /* One TD, who cares about Breadth first? */
1074 uhci_insert_tds_in_qh(urbp->qh, urb, 0);
1076 /* Low speed or small transfers gets a different queue and treatment */
1077 if (urb->pipe & TD_CTRL_LS)
1078 uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
1080 uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
1082 return -EINPROGRESS;
1086 * Interrupt transfers
1088 static int uhci_submit_interrupt(struct urb *urb)
1091 unsigned long destination, status;
1092 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1093 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1095 if (urb->transfer_buffer_length > usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))
1098 /* The "pipe" thing contains the destination in bits 8--18 */
1099 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1101 status = (urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | TD_CTRL_IOC;
1103 td = uhci_alloc_td(uhci, urb->dev);
1107 destination |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT);
1108 destination |= ((urb->transfer_buffer_length - 1) << 21);
1110 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
1112 uhci_add_td_to_urb(urb, td);
1113 uhci_fill_td(td, status, destination, urbp->transfer_buffer_dma_handle);
1115 uhci_insert_td(uhci, uhci->skeltd[__interval_to_skel(urb->interval)], td);
1117 return -EINPROGRESS;
1120 static int uhci_result_interrupt(struct urb *urb)
1122 struct list_head *tmp, *head;
1123 struct urb_priv *urbp = urb->hcpriv;
1125 unsigned int status;
1128 urb->actual_length = 0;
1130 head = &urbp->td_list;
1132 while (tmp != head) {
1133 td = list_entry(tmp, struct uhci_td, list);
1137 status = uhci_status_bits(td->status);
1138 if (status & TD_CTRL_ACTIVE)
1139 return -EINPROGRESS;
1141 urb->actual_length += uhci_actual_length(td->status);
1146 if (uhci_actual_length(td->status) < uhci_expected_length(td->info)) {
1147 if (urb->transfer_flags & USB_DISABLE_SPD) {
1158 ret = uhci_map_status(status, uhci_packetout(td->info));
1160 /* endpoint has stalled - mark it halted */
1161 usb_endpoint_halt(urb->dev, uhci_endpoint(td->info),
1162 uhci_packetout(td->info));
1165 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1166 /* Some debugging code */
1167 dbg("uhci_result_interrupt/bulk() failed with status %x",
1171 /* Print the chain for debugging purposes */
1173 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1175 uhci_show_td(td, errbuf, ERRBUF_LEN, 0);
1184 static void uhci_reset_interrupt(struct urb *urb)
1186 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1187 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1189 unsigned long flags;
1191 spin_lock_irqsave(&urb->lock, flags);
1193 /* Root hub is special */
1194 if (urb->dev == uhci->rh.dev)
1197 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1199 td->status = (td->status & 0x2F000000) | TD_CTRL_ACTIVE | TD_CTRL_IOC;
1200 td->info &= ~TD_TOKEN_TOGGLE;
1201 td->info |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT);
1202 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
1205 urb->status = -EINPROGRESS;
1207 spin_unlock_irqrestore(&urb->lock, flags);
1213 static int uhci_submit_bulk(struct urb *urb, struct urb *eurb)
1217 unsigned long destination, status;
1218 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1219 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1220 int len = urb->transfer_buffer_length;
1221 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1222 dma_addr_t data = urbp->transfer_buffer_dma_handle;
1224 if (len < 0 || maxsze <= 0)
1227 /* Can't have low speed bulk transfers */
1228 if (urb->pipe & TD_CTRL_LS)
1231 /* The "pipe" thing contains the destination in bits 8--18 */
1232 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1235 status = TD_CTRL_ACTIVE | (3 << TD_CTRL_C_ERR_SHIFT);
1237 if (!(urb->transfer_flags & USB_DISABLE_SPD))
1238 status |= TD_CTRL_SPD;
1241 * Build the DATA TD's
1243 do { /* Allow zero length packets */
1246 if (pktsze > maxsze)
1249 td = uhci_alloc_td(uhci, urb->dev);
1253 uhci_add_td_to_urb(urb, td);
1254 uhci_fill_td(td, status, destination |
1255 (((pktsze - 1) & UHCI_NULL_DATA_SIZE) << 21) |
1256 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1257 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1263 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1264 usb_pipeout(urb->pipe));
1268 * USB_ZERO_PACKET means adding a 0-length packet, if
1269 * direction is OUT and the transfer_length was an
1270 * exact multiple of maxsze, hence
1271 * (len = transfer_length - N * maxsze) == 0
1272 * however, if transfer_length == 0, the zero packet
1273 * was already prepared above.
1275 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & USB_ZERO_PACKET) &&
1276 !len && urb->transfer_buffer_length) {
1277 td = uhci_alloc_td(uhci, urb->dev);
1281 uhci_add_td_to_urb(urb, td);
1282 uhci_fill_td(td, status, destination |
1283 (UHCI_NULL_DATA_SIZE << 21) |
1284 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1285 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1288 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1289 usb_pipeout(urb->pipe));
1292 /* Set the flag on the last packet */
1293 td->status |= TD_CTRL_IOC;
1295 qh = uhci_alloc_qh(uhci, urb->dev);
1302 /* Always assume breadth first */
1303 uhci_insert_tds_in_qh(qh, urb, 1);
1305 if (urb->transfer_flags & USB_QUEUE_BULK && eurb)
1306 uhci_append_queued_urb(uhci, eurb, urb);
1308 uhci_insert_qh(uhci, uhci->skel_bulk_qh, urb);
1310 uhci_inc_fsbr(uhci, urb);
1312 return -EINPROGRESS;
1315 /* We can use the result interrupt since they're identical */
1316 #define uhci_result_bulk uhci_result_interrupt
1319 * Isochronous transfers
1321 static int isochronous_find_limits(struct urb *urb, unsigned int *start, unsigned int *end)
1323 struct urb *last_urb = NULL;
1324 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1325 struct list_head *tmp, *head;
1328 head = &uhci->urb_list;
1330 while (tmp != head) {
1331 struct urb *u = list_entry(tmp, struct urb, urb_list);
1335 /* look for pending URB's with identical pipe handle */
1336 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1337 (u->status == -EINPROGRESS) && (u != urb)) {
1339 *start = u->start_frame;
1345 *end = (last_urb->start_frame + last_urb->number_of_packets) & 1023;
1348 ret = -1; /* no previous urb found */
1353 static int isochronous_find_start(struct urb *urb)
1356 unsigned int start = 0, end = 0;
1358 if (urb->number_of_packets > 900) /* 900? Why? */
1361 limits = isochronous_find_limits(urb, &start, &end);
1363 if (urb->transfer_flags & USB_ISO_ASAP) {
1367 curframe = uhci_get_current_frame_number(urb->dev) % UHCI_NUMFRAMES;
1368 urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1370 urb->start_frame = end;
1372 urb->start_frame %= UHCI_NUMFRAMES;
1373 /* FIXME: Sanity check */
1380 * Isochronous transfers
1382 static int uhci_submit_isochronous(struct urb *urb)
1385 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1386 int i, ret, framenum;
1387 int status, destination;
1388 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1390 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1391 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1393 ret = isochronous_find_start(urb);
1397 framenum = urb->start_frame;
1398 for (i = 0; i < urb->number_of_packets; i++, framenum++) {
1399 if (!urb->iso_frame_desc[i].length)
1402 td = uhci_alloc_td(uhci, urb->dev);
1406 uhci_add_td_to_urb(urb, td);
1407 uhci_fill_td(td, status, destination | ((urb->iso_frame_desc[i].length - 1) << 21),
1408 urbp->transfer_buffer_dma_handle + urb->iso_frame_desc[i].offset);
1410 if (i + 1 >= urb->number_of_packets)
1411 td->status |= TD_CTRL_IOC;
1413 uhci_insert_td_frame_list(uhci, td, framenum);
1416 return -EINPROGRESS;
1419 static int uhci_result_isochronous(struct urb *urb)
1421 struct list_head *tmp, *head;
1422 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1426 urb->actual_length = 0;
1429 head = &urbp->td_list;
1431 while (tmp != head) {
1432 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1437 if (td->status & TD_CTRL_ACTIVE)
1438 return -EINPROGRESS;
1440 actlength = uhci_actual_length(td->status);
1441 urb->iso_frame_desc[i].actual_length = actlength;
1442 urb->actual_length += actlength;
1444 status = uhci_map_status(uhci_status_bits(td->status), usb_pipeout(urb->pipe));
1445 urb->iso_frame_desc[i].status = status;
1458 * MUST be called with uhci->urb_list_lock acquired
1460 static struct urb *uhci_find_urb_ep(struct uhci *uhci, struct urb *urb)
1462 struct list_head *tmp, *head;
1464 /* We don't match Isoc transfers since they are special */
1465 if (usb_pipeisoc(urb->pipe))
1468 head = &uhci->urb_list;
1470 while (tmp != head) {
1471 struct urb *u = list_entry(tmp, struct urb, urb_list);
1475 if (u->dev == urb->dev && u->pipe == urb->pipe &&
1476 u->status == -EINPROGRESS)
1483 static int uhci_submit_urb(struct urb *urb)
1487 unsigned long flags;
1494 if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
1495 warn("uhci_submit_urb: urb %p belongs to disconnected device or bus?", urb);
1499 uhci = (struct uhci *)urb->dev->bus->hcpriv;
1501 usb_inc_dev_use(urb->dev);
1503 spin_lock_irqsave(&uhci->urb_list_lock, flags);
1504 spin_lock(&urb->lock);
1506 if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
1507 urb->status == -ECONNABORTED) {
1508 dbg("uhci_submit_urb: urb not available to submit (status = %d)", urb->status);
1509 /* Since we can have problems on the out path */
1510 spin_unlock(&urb->lock);
1511 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1512 usb_dec_dev_use(urb->dev);
1517 INIT_LIST_HEAD(&urb->urb_list);
1518 if (!uhci_alloc_urb_priv(uhci, urb)) {
1524 eurb = uhci_find_urb_ep(uhci, urb);
1525 if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
1531 /* Short circuit the virtual root hub */
1532 if (urb->dev == uhci->rh.dev) {
1533 ret = rh_submit_urb(urb);
1538 switch (usb_pipetype(urb->pipe)) {
1540 ret = uhci_submit_control(urb);
1542 case PIPE_INTERRUPT:
1543 if (urb->bandwidth == 0) { /* not yet checked/allocated */
1544 bustime = usb_check_bandwidth(urb->dev, urb);
1548 ret = uhci_submit_interrupt(urb);
1549 if (ret == -EINPROGRESS)
1550 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1552 } else /* bandwidth is already set */
1553 ret = uhci_submit_interrupt(urb);
1556 ret = uhci_submit_bulk(urb, eurb);
1558 case PIPE_ISOCHRONOUS:
1559 if (urb->bandwidth == 0) { /* not yet checked/allocated */
1560 if (urb->number_of_packets <= 0) {
1564 bustime = usb_check_bandwidth(urb->dev, urb);
1570 ret = uhci_submit_isochronous(urb);
1571 if (ret == -EINPROGRESS)
1572 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1573 } else /* bandwidth is already set */
1574 ret = uhci_submit_isochronous(urb);
1581 if (ret == -EINPROGRESS) {
1582 /* We use _tail to make find_urb_ep more efficient */
1583 list_add_tail(&urb->urb_list, &uhci->urb_list);
1585 spin_unlock(&urb->lock);
1586 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1591 uhci_unlink_generic(uhci, urb);
1593 spin_unlock(&urb->lock);
1594 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1596 /* Only call completion if it was successful */
1598 uhci_call_completion(urb);
1604 * Return the result of a transfer
1606 * MUST be called with urb_list_lock acquired
1608 static void uhci_transfer_result(struct uhci *uhci, struct urb *urb)
1611 unsigned long flags;
1612 struct urb_priv *urbp;
1614 /* The root hub is special */
1615 if (urb->dev == uhci->rh.dev)
1618 spin_lock_irqsave(&urb->lock, flags);
1620 urbp = (struct urb_priv *)urb->hcpriv;
1622 if (urb->status != -EINPROGRESS) {
1623 info("uhci_transfer_result: called for URB %p not in flight?", urb);
1627 switch (usb_pipetype(urb->pipe)) {
1629 ret = uhci_result_control(urb);
1631 case PIPE_INTERRUPT:
1632 ret = uhci_result_interrupt(urb);
1635 ret = uhci_result_bulk(urb);
1637 case PIPE_ISOCHRONOUS:
1638 ret = uhci_result_isochronous(urb);
1644 if (ret == -EINPROGRESS)
1647 switch (usb_pipetype(urb->pipe)) {
1650 case PIPE_ISOCHRONOUS:
1651 /* Release bandwidth for Interrupt or Isoc. transfers */
1652 /* Spinlock needed ? */
1654 usb_release_bandwidth(urb->dev, urb, 1);
1655 uhci_unlink_generic(uhci, urb);
1657 case PIPE_INTERRUPT:
1658 /* Interrupts are an exception */
1662 /* Release bandwidth for Interrupt or Isoc. transfers */
1663 /* Spinlock needed ? */
1665 usb_release_bandwidth(urb->dev, urb, 0);
1666 uhci_unlink_generic(uhci, urb);
1669 info("uhci_transfer_result: unknown pipe type %d for urb %p\n",
1670 usb_pipetype(urb->pipe), urb);
1673 /* Remove it from uhci->urb_list */
1674 list_del_init(&urb->urb_list);
1677 uhci_add_complete(urb);
1680 spin_unlock_irqrestore(&urb->lock, flags);
1684 * MUST be called with urb->lock acquired
1686 static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb)
1688 struct list_head *head, *tmp;
1689 struct urb_priv *urbp = urb->hcpriv;
1692 /* We can get called when urbp allocation fails, so check */
1696 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1699 * Now we need to find out what the last successful toggle was
1700 * so we can update the local data toggle for the next transfer
1702 * There's 3 way's the last successful completed TD is found:
1704 * 1) The TD is NOT active and the actual length < expected length
1705 * 2) The TD is NOT active and it's the last TD in the chain
1706 * 3) The TD is active and the previous TD is NOT active
1708 * Control and Isochronous ignore the toggle, so this is safe
1711 head = &urbp->td_list;
1713 while (tmp != head) {
1714 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1718 if (!(td->status & TD_CTRL_ACTIVE) &&
1719 (uhci_actual_length(td->status) < uhci_expected_length(td->info) ||
1721 usb_settoggle(urb->dev, uhci_endpoint(td->info),
1722 uhci_packetout(td->info),
1723 uhci_toggle(td->info) ^ 1);
1724 else if ((td->status & TD_CTRL_ACTIVE) && !prevactive)
1725 usb_settoggle(urb->dev, uhci_endpoint(td->info),
1726 uhci_packetout(td->info),
1727 uhci_toggle(td->info));
1729 prevactive = td->status & TD_CTRL_ACTIVE;
1732 uhci_delete_queued_urb(uhci, urb);
1734 /* The interrupt loop will reclaim the QH's */
1735 uhci_remove_qh(uhci, urbp->qh);
1739 static int uhci_unlink_urb(struct urb *urb)
1742 unsigned long flags;
1743 struct urb_priv *urbp = urb->hcpriv;
1748 if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
1751 uhci = (struct uhci *)urb->dev->bus->hcpriv;
1753 spin_lock_irqsave(&uhci->urb_list_lock, flags);
1754 spin_lock(&urb->lock);
1756 /* Release bandwidth for Interrupt or Isoc. transfers */
1757 /* Spinlock needed ? */
1758 if (urb->bandwidth) {
1759 switch (usb_pipetype(urb->pipe)) {
1760 case PIPE_INTERRUPT:
1761 usb_release_bandwidth(urb->dev, urb, 0);
1763 case PIPE_ISOCHRONOUS:
1764 usb_release_bandwidth(urb->dev, urb, 1);
1771 if (urb->status != -EINPROGRESS) {
1772 spin_unlock(&urb->lock);
1773 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1777 list_del_init(&urb->urb_list);
1779 uhci_unlink_generic(uhci, urb);
1781 /* Short circuit the virtual root hub */
1782 if (urb->dev == uhci->rh.dev) {
1785 spin_unlock(&urb->lock);
1786 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1788 uhci_call_completion(urb);
1790 if (urb->transfer_flags & USB_ASYNC_UNLINK) {
1791 urbp->status = urb->status = -ECONNABORTED;
1793 spin_lock(&uhci->urb_remove_list_lock);
1795 /* If we're the first, set the next interrupt bit */
1796 if (list_empty(&uhci->urb_remove_list))
1797 uhci_set_next_interrupt(uhci);
1799 list_add(&urb->urb_list, &uhci->urb_remove_list);
1801 spin_unlock(&uhci->urb_remove_list_lock);
1803 spin_unlock(&urb->lock);
1804 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1807 urb->status = -ENOENT;
1809 spin_unlock(&urb->lock);
1810 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1812 if (in_interrupt()) { /* wait at least 1 frame */
1813 static int errorcount = 10;
1816 dbg("uhci_unlink_urb called from interrupt for urb %p", urb);
1819 schedule_timeout(1+1*HZ/1000);
1821 uhci_call_completion(urb);
1828 static int uhci_fsbr_timeout(struct uhci *uhci, struct urb *urb)
1830 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1831 struct list_head *head, *tmp;
1834 uhci_dec_fsbr(uhci, urb);
1836 urbp->fsbr_timeout = 1;
1839 * Ideally we would want to fix qh->element as well, but it's
1840 * read/write by the HC, so that can introduce a race. It's not
1841 * really worth the hassle
1844 head = &urbp->td_list;
1846 while (tmp != head) {
1847 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1852 * Make sure we don't do the last one (since it'll have the
1853 * TERM bit set) as well as we skip every so many TD's to
1854 * make sure it doesn't hog the bandwidth
1856 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1857 td->link |= UHCI_PTR_DEPTH;
1866 * uhci_get_current_frame_number()
1868 * returns the current frame number for a USB bus/controller.
1870 static int uhci_get_current_frame_number(struct usb_device *dev)
1872 struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
1874 return inw(uhci->io_addr + USBFRNUM);
1877 struct usb_operations uhci_device_operations = {
1880 uhci_get_current_frame_number,
1885 /* Virtual Root Hub */
1887 static __u8 root_hub_dev_des[] =
1889 0x12, /* __u8 bLength; */
1890 0x01, /* __u8 bDescriptorType; Device */
1891 0x00, /* __u16 bcdUSB; v1.0 */
1893 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
1894 0x00, /* __u8 bDeviceSubClass; */
1895 0x00, /* __u8 bDeviceProtocol; */
1896 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */
1897 0x00, /* __u16 idVendor; */
1899 0x00, /* __u16 idProduct; */
1901 0x00, /* __u16 bcdDevice; */
1903 0x00, /* __u8 iManufacturer; */
1904 0x02, /* __u8 iProduct; */
1905 0x01, /* __u8 iSerialNumber; */
1906 0x01 /* __u8 bNumConfigurations; */
1910 /* Configuration descriptor */
1911 static __u8 root_hub_config_des[] =
1913 0x09, /* __u8 bLength; */
1914 0x02, /* __u8 bDescriptorType; Configuration */
1915 0x19, /* __u16 wTotalLength; */
1917 0x01, /* __u8 bNumInterfaces; */
1918 0x01, /* __u8 bConfigurationValue; */
1919 0x00, /* __u8 iConfiguration; */
1920 0x40, /* __u8 bmAttributes;
1921 Bit 7: Bus-powered, 6: Self-powered,
1922 Bit 5 Remote-wakeup, 4..0: resvd */
1923 0x00, /* __u8 MaxPower; */
1926 0x09, /* __u8 if_bLength; */
1927 0x04, /* __u8 if_bDescriptorType; Interface */
1928 0x00, /* __u8 if_bInterfaceNumber; */
1929 0x00, /* __u8 if_bAlternateSetting; */
1930 0x01, /* __u8 if_bNumEndpoints; */
1931 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
1932 0x00, /* __u8 if_bInterfaceSubClass; */
1933 0x00, /* __u8 if_bInterfaceProtocol; */
1934 0x00, /* __u8 if_iInterface; */
1937 0x07, /* __u8 ep_bLength; */
1938 0x05, /* __u8 ep_bDescriptorType; Endpoint */
1939 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
1940 0x03, /* __u8 ep_bmAttributes; Interrupt */
1941 0x08, /* __u16 ep_wMaxPacketSize; 8 Bytes */
1943 0xff /* __u8 ep_bInterval; 255 ms */
1946 static __u8 root_hub_hub_des[] =
1948 0x09, /* __u8 bLength; */
1949 0x29, /* __u8 bDescriptorType; Hub-descriptor */
1950 0x02, /* __u8 bNbrPorts; */
1951 0x00, /* __u16 wHubCharacteristics; */
1953 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
1954 0x00, /* __u8 bHubContrCurrent; 0 mA */
1955 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
1956 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
1959 /* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
1960 static int rh_send_irq(struct urb *urb)
1962 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1963 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1964 unsigned int io_addr = uhci->io_addr;
1965 unsigned long flags;
1969 spin_lock_irqsave(&urb->lock, flags);
1970 for (i = 0; i < uhci->rh.numports; i++) {
1971 data |= ((inw(io_addr + USBPORTSC1 + i * 2) & 0xa) > 0 ? (1 << (i + 1)) : 0);
1972 len = (i + 1) / 8 + 1;
1975 *(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
1976 urb->actual_length = len;
1979 spin_unlock_irqrestore(&urb->lock, flags);
1981 if ((data > 0) && (uhci->rh.send != 0)) {
1982 dbg("root-hub INT complete: port1: %x port2: %x data: %x",
1983 inw(io_addr + USBPORTSC1), inw(io_addr + USBPORTSC2), data);
1984 uhci_call_completion(urb);
1990 /* Virtual Root Hub INTs are polled by this timer every "interval" ms */
1991 static int rh_init_int_timer(struct urb *urb);
1993 static void rh_int_timer_do(unsigned long ptr)
1995 struct urb *urb = (struct urb *)ptr;
1996 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1997 struct list_head list, *tmp, *head;
1998 unsigned long flags;
2003 INIT_LIST_HEAD(&list);
2005 spin_lock_irqsave(&uhci->urb_list_lock, flags);
2006 head = &uhci->urb_list;
2008 while (tmp != head) {
2009 struct urb *u = list_entry(tmp, struct urb, urb_list);
2010 struct urb_priv *up = (struct urb_priv *)u->hcpriv;
2014 spin_lock(&u->lock);
2016 /* Check if the FSBR timed out */
2017 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
2018 uhci_fsbr_timeout(uhci, u);
2020 /* Check if the URB timed out */
2021 if (u->timeout && time_after_eq(jiffies, up->inserttime + u->timeout)) {
2022 list_del(&u->urb_list);
2023 list_add_tail(&u->urb_list, &list);
2026 spin_unlock(&u->lock);
2028 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
2032 while (tmp != head) {
2033 struct urb *u = list_entry(tmp, struct urb, urb_list);
2037 u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
2041 /* Really disable FSBR */
2042 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
2043 uhci->fsbrtimeout = 0;
2044 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2047 /* enter global suspend if nothing connected */
2048 if (!uhci->is_suspended && !ports_active(uhci))
2051 rh_init_int_timer(urb);
2054 /* Root Hub INTs are polled by this timer */
2055 static int rh_init_int_timer(struct urb *urb)
2057 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
2059 uhci->rh.interval = urb->interval;
2060 init_timer(&uhci->rh.rh_int_timer);
2061 uhci->rh.rh_int_timer.function = rh_int_timer_do;
2062 uhci->rh.rh_int_timer.data = (unsigned long)urb;
2063 uhci->rh.rh_int_timer.expires = jiffies + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000;
2064 add_timer(&uhci->rh.rh_int_timer);
2069 #define OK(x) len = (x); break
2071 #define CLR_RH_PORTSTAT(x) \
2072 status = inw(io_addr + USBPORTSC1 + 2 * (wIndex-1)); \
2073 status = (status & 0xfff5) & ~(x); \
2074 outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
2076 #define SET_RH_PORTSTAT(x) \
2077 status = inw(io_addr + USBPORTSC1 + 2 * (wIndex-1)); \
2078 status = (status & 0xfff5) | (x); \
2079 outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
2082 /* Root Hub Control Pipe */
2083 static int rh_submit_urb(struct urb *urb)
2085 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
2086 unsigned int pipe = urb->pipe;
2087 struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *)urb->setup_packet;
2088 void *data = urb->transfer_buffer;
2089 int leni = urb->transfer_buffer_length;
2094 unsigned int io_addr = uhci->io_addr;
2101 if (usb_pipetype(pipe) == PIPE_INTERRUPT) {
2104 uhci->rh.interval = urb->interval;
2105 rh_init_int_timer(urb);
2107 return -EINPROGRESS;
2110 bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
2111 wValue = le16_to_cpu(cmd->wValue);
2112 wIndex = le16_to_cpu(cmd->wIndex);
2113 wLength = le16_to_cpu(cmd->wLength);
2115 for (i = 0; i < 8; i++)
2116 uhci->rh.c_p_r[i] = 0;
2118 switch (bmRType_bReq) {
2119 /* Request Destination:
2120 without flags: Device,
2121 RH_INTERFACE: interface,
2122 RH_ENDPOINT: endpoint,
2123 RH_CLASS means HUB here,
2124 RH_OTHER | RH_CLASS almost ever means HUB_PORT here
2128 *(__u16 *)data = cpu_to_le16(1);
2130 case RH_GET_STATUS | RH_INTERFACE:
2131 *(__u16 *)data = cpu_to_le16(0);
2133 case RH_GET_STATUS | RH_ENDPOINT:
2134 *(__u16 *)data = cpu_to_le16(0);
2136 case RH_GET_STATUS | RH_CLASS:
2137 *(__u32 *)data = cpu_to_le32(0);
2138 OK(4); /* hub power */
2139 case RH_GET_STATUS | RH_OTHER | RH_CLASS:
2140 status = inw(io_addr + USBPORTSC1 + 2 * (wIndex - 1));
2141 cstatus = ((status & USBPORTSC_CSC) >> (1 - 0)) |
2142 ((status & USBPORTSC_PEC) >> (3 - 1)) |
2143 (uhci->rh.c_p_r[wIndex - 1] << (0 + 4));
2144 status = (status & USBPORTSC_CCS) |
2145 ((status & USBPORTSC_PE) >> (2 - 1)) |
2146 ((status & USBPORTSC_SUSP) >> (12 - 2)) |
2147 ((status & USBPORTSC_PR) >> (9 - 4)) |
2148 (1 << 8) | /* power on */
2149 ((status & USBPORTSC_LSDA) << (-8 + 9));
2151 *(__u16 *)data = cpu_to_le16(status);
2152 *(__u16 *)(data + 2) = cpu_to_le16(cstatus);
2154 case RH_CLEAR_FEATURE | RH_ENDPOINT:
2156 case RH_ENDPOINT_STALL:
2160 case RH_CLEAR_FEATURE | RH_CLASS:
2162 case RH_C_HUB_OVER_CURRENT:
2163 OK(0); /* hub power over current */
2166 case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
2168 case RH_PORT_ENABLE:
2169 CLR_RH_PORTSTAT(USBPORTSC_PE);
2171 case RH_PORT_SUSPEND:
2172 CLR_RH_PORTSTAT(USBPORTSC_SUSP);
2175 OK(0); /* port power */
2176 case RH_C_PORT_CONNECTION:
2177 SET_RH_PORTSTAT(USBPORTSC_CSC);
2179 case RH_C_PORT_ENABLE:
2180 SET_RH_PORTSTAT(USBPORTSC_PEC);
2182 case RH_C_PORT_SUSPEND:
2183 /*** WR_RH_PORTSTAT(RH_PS_PSSC); */
2185 case RH_C_PORT_OVER_CURRENT:
2186 OK(0); /* port power over current */
2187 case RH_C_PORT_RESET:
2188 uhci->rh.c_p_r[wIndex - 1] = 0;
2192 case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
2194 case RH_PORT_SUSPEND:
2195 SET_RH_PORTSTAT(USBPORTSC_SUSP);
2198 SET_RH_PORTSTAT(USBPORTSC_PR);
2199 mdelay(50); /* USB v1.1 7.1.7.3 */
2200 uhci->rh.c_p_r[wIndex - 1] = 1;
2201 CLR_RH_PORTSTAT(USBPORTSC_PR);
2203 SET_RH_PORTSTAT(USBPORTSC_PE);
2205 SET_RH_PORTSTAT(0xa);
2208 OK(0); /* port power ** */
2209 case RH_PORT_ENABLE:
2210 SET_RH_PORTSTAT(USBPORTSC_PE);
2214 case RH_SET_ADDRESS:
2215 uhci->rh.devnum = wValue;
2217 case RH_GET_DESCRIPTOR:
2218 switch ((wValue & 0xff00) >> 8) {
2219 case 0x01: /* device descriptor */
2220 len = min_t(unsigned int, leni,
2222 sizeof(root_hub_dev_des), wLength));
2223 memcpy(data, root_hub_dev_des, len);
2225 case 0x02: /* configuration descriptor */
2226 len = min_t(unsigned int, leni,
2228 sizeof(root_hub_config_des), wLength));
2229 memcpy (data, root_hub_config_des, len);
2231 case 0x03: /* string descriptors */
2232 len = usb_root_hub_string (wValue & 0xff,
2233 uhci->io_addr, "UHCI-alt",
2236 OK(min_t(int, leni, len));
2241 case RH_GET_DESCRIPTOR | RH_CLASS:
2242 root_hub_hub_des[2] = uhci->rh.numports;
2243 len = min_t(unsigned int, leni,
2244 min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
2245 memcpy(data, root_hub_hub_des, len);
2247 case RH_GET_CONFIGURATION:
2248 *(__u8 *)data = 0x01;
2250 case RH_SET_CONFIGURATION:
2252 case RH_GET_INTERFACE | RH_INTERFACE:
2253 *(__u8 *)data = 0x00;
2255 case RH_SET_INTERFACE | RH_INTERFACE:
2261 urb->actual_length = len;
2267 * MUST be called with urb->lock acquired
2269 static int rh_unlink_urb(struct urb *urb)
2271 struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
2273 if (uhci->rh.urb == urb) {
2274 urb->status = -ENOENT;
2276 uhci->rh.urb = NULL;
2277 del_timer(&uhci->rh.rh_int_timer);
2282 static void uhci_free_pending_qhs(struct uhci *uhci)
2284 struct list_head *tmp, *head;
2285 unsigned long flags;
2287 spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
2288 head = &uhci->qh_remove_list;
2290 while (tmp != head) {
2291 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
2295 list_del_init(&qh->remove_list);
2297 uhci_free_qh(uhci, qh);
2299 spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
2302 static void uhci_call_completion(struct urb *urb)
2304 struct urb_priv *urbp;
2305 struct usb_device *dev = urb->dev;
2306 struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
2307 int is_ring = 0, killed, resubmit_interrupt, status;
2309 unsigned long flags;
2311 spin_lock_irqsave(&urb->lock, flags);
2313 urbp = (struct urb_priv *)urb->hcpriv;
2314 if (!urbp || !urb->dev) {
2315 spin_unlock_irqrestore(&urb->lock, flags);
2319 killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
2320 urb->status == -ECONNRESET);
2321 resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
2325 if (nurb && !killed) {
2328 while (nurb && nurb != urb && count < MAX_URB_LOOP) {
2329 if (nurb->status == -ENOENT ||
2330 nurb->status == -ECONNABORTED ||
2331 nurb->status == -ECONNRESET) {
2340 if (count == MAX_URB_LOOP)
2341 err("uhci_call_completion: too many linked URB's, loop? (first loop)");
2343 /* Check to see if chain is a ring */
2344 is_ring = (nurb == urb);
2347 if (urbp->transfer_buffer_dma_handle)
2348 pci_dma_sync_single(uhci->dev, urbp->transfer_buffer_dma_handle,
2349 urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
2350 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
2352 if (urbp->setup_packet_dma_handle)
2353 pci_dma_sync_single(uhci->dev, urbp->setup_packet_dma_handle,
2354 sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
2356 status = urbp->status;
2357 if (!resubmit_interrupt || killed)
2358 /* We don't need urb_priv anymore */
2359 uhci_destroy_urb_priv(urb);
2362 urb->status = status;
2365 spin_unlock_irqrestore(&urb->lock, flags);
2370 if (resubmit_interrupt)
2371 /* Recheck the status. The completion handler may have */
2372 /* unlinked the resubmitting interrupt URB */
2373 killed = (urb->status == -ENOENT ||
2374 urb->status == -ECONNABORTED ||
2375 urb->status == -ECONNRESET);
2377 if (resubmit_interrupt && !killed) {
2379 uhci_reset_interrupt(urb);
2381 if (is_ring && !killed) {
2383 uhci_submit_urb(urb);
2385 /* We decrement the usage count after we're done */
2386 /* with everything */
2387 usb_dec_dev_use(dev);
2392 static void uhci_finish_completion(struct uhci *uhci)
2394 struct list_head *tmp, *head;
2395 unsigned long flags;
2397 spin_lock_irqsave(&uhci->complete_list_lock, flags);
2398 head = &uhci->complete_list;
2400 while (tmp != head) {
2401 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list);
2402 struct urb *urb = urbp->urb;
2404 list_del_init(&urbp->complete_list);
2405 spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
2407 uhci_call_completion(urb);
2409 spin_lock_irqsave(&uhci->complete_list_lock, flags);
2410 head = &uhci->complete_list;
2413 spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
2416 static void uhci_remove_pending_qhs(struct uhci *uhci)
2418 struct list_head *tmp, *head;
2419 unsigned long flags;
2421 spin_lock_irqsave(&uhci->urb_remove_list_lock, flags);
2422 head = &uhci->urb_remove_list;
2424 while (tmp != head) {
2425 struct urb *urb = list_entry(tmp, struct urb, urb_list);
2426 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
2430 list_del_init(&urb->urb_list);
2432 urbp->status = urb->status = -ECONNRESET;
2434 uhci_add_complete(urb);
2436 spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags);
2439 static void uhci_interrupt(int irq, void *__uhci, struct pt_regs *regs)
2441 struct uhci *uhci = __uhci;
2442 unsigned int io_addr = uhci->io_addr;
2443 unsigned short status;
2444 struct list_head *tmp, *head;
2447 * Read the interrupt status, and write it back to clear the
2450 status = inw(io_addr + USBSTS);
2451 if (!status) /* shared interrupt, not mine */
2453 outw(status, io_addr + USBSTS); /* Clear it */
2455 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
2456 if (status & USBSTS_HSE)
2457 err("%x: host system error, PCI problems?", io_addr);
2458 if (status & USBSTS_HCPE)
2459 err("%x: host controller process error. something bad happened", io_addr);
2460 if ((status & USBSTS_HCH) && !uhci->is_suspended) {
2461 err("%x: host controller halted. very bad", io_addr);
2462 /* FIXME: Reset the controller, fix the offending TD */
2466 if (status & USBSTS_RD)
2469 uhci_free_pending_qhs(uhci);
2471 uhci_remove_pending_qhs(uhci);
2473 uhci_clear_next_interrupt(uhci);
2475 /* Walk the list of pending URB's to see which ones completed */
2476 spin_lock(&uhci->urb_list_lock);
2477 head = &uhci->urb_list;
2479 while (tmp != head) {
2480 struct urb *urb = list_entry(tmp, struct urb, urb_list);
2484 /* Checks the status and does all of the magic necessary */
2485 uhci_transfer_result(uhci, urb);
2487 spin_unlock(&uhci->urb_list_lock);
2489 uhci_finish_completion(uhci);
2492 static void reset_hc(struct uhci *uhci)
2494 unsigned int io_addr = uhci->io_addr;
2496 /* Global reset for 50ms */
2497 outw(USBCMD_GRESET, io_addr + USBCMD);
2499 outw(0, io_addr + USBCMD);
2503 static void suspend_hc(struct uhci *uhci)
2505 unsigned int io_addr = uhci->io_addr;
2507 dbg("%x: suspend_hc", io_addr);
2509 outw(USBCMD_EGSM, io_addr + USBCMD);
2511 uhci->is_suspended = 1;
2514 static void wakeup_hc(struct uhci *uhci)
2516 unsigned int io_addr = uhci->io_addr;
2517 unsigned int status;
2519 dbg("%x: wakeup_hc", io_addr);
2521 outw(0, io_addr + USBCMD);
2523 /* wait for EOP to be sent */
2524 status = inw(io_addr + USBCMD);
2525 while (status & USBCMD_FGR)
2526 status = inw(io_addr + USBCMD);
2528 uhci->is_suspended = 0;
2530 /* Run and mark it configured with a 64-byte max packet */
2531 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2534 static int ports_active(struct uhci *uhci)
2536 unsigned int io_addr = uhci->io_addr;
2540 for (i = 0; i < uhci->rh.numports; i++)
2541 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & 0x1);
2546 static void start_hc(struct uhci *uhci)
2548 unsigned int io_addr = uhci->io_addr;
2552 * Reset the HC - this will force us to get a
2553 * new notification of any already connected
2554 * ports due to the virtual disconnect that it
2557 outw(USBCMD_HCRESET, io_addr + USBCMD);
2558 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
2560 printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n");
2565 /* Turn on all interrupts */
2566 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
2569 /* Start at frame 0 */
2570 outw(0, io_addr + USBFRNUM);
2571 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2573 /* Run and mark it configured with a 64-byte max packet */
2574 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2577 #ifdef CONFIG_PROC_FS
2578 static int uhci_num = 0;
2581 static void free_uhci(struct uhci *uhci)
2587 * De-allocate all resources..
2589 static void release_uhci(struct uhci *uhci)
2592 #ifdef CONFIG_PROC_FS
2596 if (uhci->irq >= 0) {
2597 free_irq(uhci->irq, uhci);
2601 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2602 if (uhci->skelqh[i]) {
2603 uhci_free_qh(uhci, uhci->skelqh[i]);
2604 uhci->skelqh[i] = NULL;
2607 for (i = 0; i < UHCI_NUM_SKELTD; i++)
2608 if (uhci->skeltd[i]) {
2609 uhci_free_td(uhci, uhci->skeltd[i]);
2610 uhci->skeltd[i] = NULL;
2613 if (uhci->qh_pool) {
2614 pci_pool_destroy(uhci->qh_pool);
2615 uhci->qh_pool = NULL;
2618 if (uhci->td_pool) {
2619 pci_pool_destroy(uhci->td_pool);
2620 uhci->td_pool = NULL;
2624 pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
2629 usb_free_bus(uhci->bus);
2633 #ifdef CONFIG_PROC_FS
2634 if (uhci->proc_entry) {
2635 sprintf(buf, "hc%d", uhci->num);
2637 remove_proc_entry(buf, uhci_proc_root);
2638 uhci->proc_entry = NULL;
2646 * Allocate a frame list, and then setup the skeleton
2648 * The hardware doesn't really know any difference
2649 * in the queues, but the order does matter for the
2650 * protocols higher up. The order is:
2652 * - any isochronous events handled before any
2653 * of the queues. We don't do that here, because
2654 * we'll create the actual TD entries on demand.
2655 * - The first queue is the interrupt queue.
2656 * - The second queue is the control queue, split into low and high speed
2657 * - The third queue is bulk queue.
2658 * - The fourth queue is the bandwidth reclamation queue, which loops back
2659 * to the high speed control queue.
2661 static int alloc_uhci(struct pci_dev *dev, unsigned int io_addr, unsigned int io_size)
2665 char buf[8], *bufp = buf;
2667 struct usb_bus *bus;
2668 dma_addr_t dma_handle;
2669 #ifdef CONFIG_PROC_FS
2670 struct proc_dir_entry *ent;
2674 if (pci_enable_device(dev) < 0) {
2675 err("couldn't enable PCI device");
2676 goto err_enable_device;
2680 err("found UHCI device with no IRQ assigned. check BIOS settings!");
2681 goto err_invalid_irq;
2684 if (!pci_dma_supported(dev, 0xFFFFFFFF)) {
2685 err("PCI subsystem doesn't support 32 bit addressing?");
2686 goto err_pci_dma_supported;
2690 if (!request_region(io_addr, io_size, "usb-uhci")) {
2691 err("couldn't allocate I/O range %x - %x", io_addr,
2692 io_addr + io_size - 1);
2693 goto err_request_region;
2696 pci_set_master(dev);
2699 sprintf(buf, "%d", dev->irq);
2701 bufp = __irq_itoa(dev->irq);
2703 printk(KERN_INFO __FILE__ ": USB UHCI at I/O 0x%x, IRQ %s\n",
2706 if (pci_set_dma_mask(dev, 0xFFFFFFFF)) {
2707 err("couldn't set PCI dma mask");
2709 goto err_pci_set_dma_mask;
2712 uhci = kmalloc(sizeof(*uhci), GFP_KERNEL);
2714 err("couldn't allocate uhci structure");
2716 goto err_alloc_uhci;
2720 uhci->irq = dev->irq;
2721 uhci->io_addr = io_addr;
2722 uhci->io_size = io_size;
2723 pci_set_drvdata(dev, uhci);
2725 #ifdef CONFIG_PROC_FS
2726 uhci->num = uhci_num++;
2728 sprintf(buf, "hc%d", uhci->num);
2730 ent = create_proc_entry(buf, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2732 err("couldn't create uhci proc entry");
2734 goto err_create_proc_entry;
2738 ent->proc_fops = &uhci_proc_operations;
2740 uhci->proc_entry = ent;
2743 /* Reset here so we don't get any interrupts from an old setup */
2744 /* or broken setup */
2748 uhci->fsbrtimeout = 0;
2750 uhci->is_suspended = 0;
2752 spin_lock_init(&uhci->qh_remove_list_lock);
2753 INIT_LIST_HEAD(&uhci->qh_remove_list);
2755 spin_lock_init(&uhci->urb_remove_list_lock);
2756 INIT_LIST_HEAD(&uhci->urb_remove_list);
2758 spin_lock_init(&uhci->urb_list_lock);
2759 INIT_LIST_HEAD(&uhci->urb_list);
2761 spin_lock_init(&uhci->complete_list_lock);
2762 INIT_LIST_HEAD(&uhci->complete_list);
2764 spin_lock_init(&uhci->frame_list_lock);
2766 /* We need exactly one page (per UHCI specs), how convenient */
2767 /* We assume that one page is atleast 4k (1024 frames * 4 bytes) */
2768 #if PAGE_SIZE < (4 * 1024)
2769 #error PAGE_SIZE is not atleast 4k
2771 uhci->fl = pci_alloc_consistent(uhci->dev, sizeof(*uhci->fl), &dma_handle);
2773 err("unable to allocate consistent memory for frame list");
2777 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2779 uhci->fl->dma_handle = dma_handle;
2781 uhci->td_pool = pci_pool_create("uhci_td", uhci->dev,
2782 sizeof(struct uhci_td), 16, 0, GFP_DMA | GFP_ATOMIC);
2783 if (!uhci->td_pool) {
2784 err("unable to create td pci_pool");
2785 goto err_create_td_pool;
2788 uhci->qh_pool = pci_pool_create("uhci_qh", uhci->dev,
2789 sizeof(struct uhci_qh), 16, 0, GFP_DMA | GFP_ATOMIC);
2790 if (!uhci->qh_pool) {
2791 err("unable to create qh pci_pool");
2792 goto err_create_qh_pool;
2795 bus = usb_alloc_bus(&uhci_device_operations);
2797 err("unable to allocate bus");
2802 bus->bus_name = dev->slot_name;
2805 usb_register_bus(uhci->bus);
2807 /* Initialize the root hub */
2809 /* UHCI specs says devices must have 2 ports, but goes on to say */
2810 /* they may have more but give no way to determine how many they */
2811 /* have. However, according to the UHCI spec, Bit 7 is always set */
2812 /* to 1. So we try to use this to our advantage */
2813 for (port = 0; port < (uhci->io_size - 0x10) / 2; port++) {
2814 unsigned int portstatus;
2816 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2817 if (!(portstatus & 0x0080))
2821 info("detected %d ports", port);
2823 /* This is experimental so anything less than 2 or greater than 8 is */
2824 /* something weird and we'll ignore it */
2825 if (port < 2 || port > 8) {
2826 info("port count misdetected? forcing to 2 ports");
2830 uhci->rh.numports = port;
2832 uhci->bus->root_hub = uhci->rh.dev = usb_alloc_dev(NULL, uhci->bus);
2833 if (!uhci->rh.dev) {
2834 err("unable to allocate root hub");
2835 goto err_alloc_root_hub;
2838 uhci->skeltd[0] = uhci_alloc_td(uhci, uhci->rh.dev);
2839 if (!uhci->skeltd[0]) {
2840 err("unable to allocate TD 0");
2841 goto err_alloc_skeltd;
2845 * 9 Interrupt queues; link int2 to int1, int4 to int2, etc
2846 * then link int1 to control and control to bulk
2848 for (i = 1; i < 9; i++) {
2851 td = uhci->skeltd[i] = uhci_alloc_td(uhci, uhci->rh.dev);
2853 err("unable to allocate TD %d", i);
2854 goto err_alloc_skeltd;
2857 uhci_fill_td(td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
2858 td->link = uhci->skeltd[i - 1]->dma_handle;
2861 uhci->skel_term_td = uhci_alloc_td(uhci, uhci->rh.dev);
2862 if (!uhci->skel_term_td) {
2863 err("unable to allocate skel TD term");
2864 goto err_alloc_skeltd;
2867 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2868 uhci->skelqh[i] = uhci_alloc_qh(uhci, uhci->rh.dev);
2869 if (!uhci->skelqh[i]) {
2870 err("unable to allocate QH %d", i);
2871 goto err_alloc_skelqh;
2875 uhci_fill_td(uhci->skel_int1_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
2876 uhci->skel_int1_td->link = uhci->skel_ls_control_qh->dma_handle | UHCI_PTR_QH;
2878 uhci->skel_ls_control_qh->link = uhci->skel_hs_control_qh->dma_handle | UHCI_PTR_QH;
2879 uhci->skel_ls_control_qh->element = UHCI_PTR_TERM;
2881 uhci->skel_hs_control_qh->link = uhci->skel_bulk_qh->dma_handle | UHCI_PTR_QH;
2882 uhci->skel_hs_control_qh->element = UHCI_PTR_TERM;
2884 uhci->skel_bulk_qh->link = uhci->skel_term_qh->dma_handle | UHCI_PTR_QH;
2885 uhci->skel_bulk_qh->element = UHCI_PTR_TERM;
2887 /* This dummy TD is to work around a bug in Intel PIIX controllers */
2888 uhci_fill_td(uhci->skel_term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
2889 uhci->skel_term_td->link = uhci->skel_term_td->dma_handle;
2891 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2892 uhci->skel_term_qh->element = uhci->skel_term_td->dma_handle;
2895 * Fill the frame list: make all entries point to
2896 * the proper interrupt queue.
2898 * This is probably silly, but it's a simple way to
2899 * scatter the interrupt queues in a way that gives
2900 * us a reasonable dynamic range for irq latencies.
2902 for (i = 0; i < UHCI_NUMFRAMES; i++) {
2926 /* Only place we don't use the frame list routines */
2927 uhci->fl->frame[i] = uhci->skeltd[irq]->dma_handle;
2932 if (request_irq(dev->irq, uhci_interrupt, SA_SHIRQ, "usb-uhci", uhci))
2933 goto err_request_irq;
2935 /* disable legacy emulation */
2936 pci_write_config_word(uhci->dev, USBLEGSUP, USBLEGSUP_DEFAULT);
2938 usb_connect(uhci->rh.dev);
2940 if (usb_new_device(uhci->rh.dev) != 0) {
2941 err("unable to start root hub");
2943 goto err_start_root_hub;
2952 free_irq(uhci->irq, uhci);
2956 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2957 if (uhci->skelqh[i]) {
2958 uhci_free_qh(uhci, uhci->skelqh[i]);
2959 uhci->skelqh[i] = NULL;
2963 for (i = 0; i < UHCI_NUM_SKELTD; i++)
2964 if (uhci->skeltd[i]) {
2965 uhci_free_td(uhci, uhci->skeltd[i]);
2966 uhci->skeltd[i] = NULL;
2970 usb_free_dev(uhci->rh.dev);
2971 uhci->rh.dev = NULL;
2974 usb_free_bus(uhci->bus);
2978 pci_pool_destroy(uhci->qh_pool);
2979 uhci->qh_pool = NULL;
2982 pci_pool_destroy(uhci->td_pool);
2983 uhci->td_pool = NULL;
2986 pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
2990 #ifdef CONFIG_PROC_FS
2991 remove_proc_entry(buf, uhci_proc_root);
2992 uhci->proc_entry = NULL;
2994 err_create_proc_entry:
3000 err_pci_set_dma_mask:
3001 release_region(io_addr, io_size);
3005 err_pci_dma_supported:
3014 static int __devinit uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3018 /* Search for the IO base address.. */
3019 for (i = 0; i < 6; i++) {
3020 unsigned int io_addr = pci_resource_start(dev, i);
3021 unsigned int io_size = pci_resource_len(dev, i);
3024 if (!(pci_resource_flags(dev, i) & IORESOURCE_IO))
3027 return alloc_uhci(dev, io_addr, io_size);
3033 static void __devexit uhci_pci_remove(struct pci_dev *dev)
3035 struct uhci *uhci = pci_get_drvdata(dev);
3037 if (uhci->bus->root_hub)
3038 usb_disconnect(&uhci->bus->root_hub);
3040 usb_deregister_bus(uhci->bus);
3043 * At this point, we're guaranteed that no new connects can be made
3044 * to this bus since there are no more parents
3046 uhci_free_pending_qhs(uhci);
3047 uhci_remove_pending_qhs(uhci);
3050 release_region(uhci->io_addr, uhci->io_size);
3052 uhci_free_pending_qhs(uhci);
3058 static int uhci_pci_suspend(struct pci_dev *dev, u32 state)
3060 suspend_hc((struct uhci *) pci_get_drvdata(dev));
3064 static int uhci_pci_resume(struct pci_dev *dev)
3066 reset_hc((struct uhci *) pci_get_drvdata(dev));
3067 start_hc((struct uhci *) pci_get_drvdata(dev));
3072 static const struct pci_device_id __devinitdata uhci_pci_ids[] = { {
3074 /* handle any USB UHCI controller */
3075 class: ((PCI_CLASS_SERIAL_USB << 8) | 0x00),
3078 /* no matter who makes it */
3081 subvendor: PCI_ANY_ID,
3082 subdevice: PCI_ANY_ID,
3084 }, { /* end: all zeroes */ }
3087 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
3089 static struct pci_driver uhci_pci_driver = {
3091 id_table: uhci_pci_ids,
3093 probe: uhci_pci_probe,
3094 remove: __devexit_p(uhci_pci_remove),
3097 suspend: uhci_pci_suspend,
3098 resume: uhci_pci_resume,
3103 static int __init uhci_hcd_init(void)
3105 int retval = -ENOMEM;
3107 info(DRIVER_DESC " " DRIVER_VERSION);
3110 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
3115 #ifdef CONFIG_PROC_FS
3116 uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
3117 if (!uhci_proc_root)
3121 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
3122 sizeof(struct urb_priv), 0, 0, NULL, NULL);
3123 if (!uhci_up_cachep)
3126 retval = pci_module_init(&uhci_pci_driver);
3133 if (kmem_cache_destroy(uhci_up_cachep))
3134 printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
3138 #ifdef CONFIG_PROC_FS
3139 remove_proc_entry("driver/uhci", 0);
3151 static void __exit uhci_hcd_cleanup(void)
3153 pci_unregister_driver(&uhci_pci_driver);
3155 if (kmem_cache_destroy(uhci_up_cachep))
3156 printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
3158 #ifdef CONFIG_PROC_FS
3159 remove_proc_entry("driver/uhci", 0);
3166 module_init(uhci_hcd_init);
3167 module_exit(uhci_hcd_cleanup);
3169 MODULE_AUTHOR(DRIVER_AUTHOR);
3170 MODULE_DESCRIPTION(DRIVER_DESC);
3171 MODULE_LICENSE("GPL");