2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu
19 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
22 * Technically, updating td->status here is a race, but it's not really a
23 * problem. The worst that can happen is that we set the IOC bit again
24 * generating a spurious interrupt. We could fix this by creating another
25 * QH and leaving the IOC bit always set, but then we would have to play
26 * games with the FSBR code to make sure we get the correct order in all
27 * the cases. I don't think it's worth the effort
29 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
32 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
33 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
36 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
38 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
41 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
43 dma_addr_t dma_handle;
46 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
50 td->dma_handle = dma_handle;
53 INIT_LIST_HEAD(&td->list);
54 INIT_LIST_HEAD(&td->remove_list);
55 INIT_LIST_HEAD(&td->fl_list);
60 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
62 if (!list_empty(&td->list))
63 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
64 if (!list_empty(&td->remove_list))
65 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
66 if (!list_empty(&td->fl_list))
67 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
69 dma_pool_free(uhci->td_pool, td, td->dma_handle);
72 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
73 u32 token, u32 buffer)
75 td->status = cpu_to_le32(status);
76 td->token = cpu_to_le32(token);
77 td->buffer = cpu_to_le32(buffer);
81 * We insert Isochronous URBs directly into the frame list at the beginning
83 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
84 struct uhci_td *td, unsigned framenum)
86 framenum &= (UHCI_NUMFRAMES - 1);
90 /* Is there a TD already mapped there? */
91 if (uhci->frame_cpu[framenum]) {
92 struct uhci_td *ftd, *ltd;
94 ftd = uhci->frame_cpu[framenum];
95 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
97 list_add_tail(&td->fl_list, &ftd->fl_list);
101 ltd->link = cpu_to_le32(td->dma_handle);
103 td->link = uhci->frame[framenum];
105 uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
106 uhci->frame_cpu[framenum] = td;
110 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
113 /* If it's not inserted, don't remove it */
114 if (td->frame == -1) {
115 WARN_ON(!list_empty(&td->fl_list));
119 if (uhci->frame_cpu[td->frame] == td) {
120 if (list_empty(&td->fl_list)) {
121 uhci->frame[td->frame] = td->link;
122 uhci->frame_cpu[td->frame] = NULL;
126 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
127 uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
128 uhci->frame_cpu[td->frame] = ntd;
133 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
134 ptd->link = td->link;
137 list_del_init(&td->fl_list);
142 * Remove all the TDs for an Isochronous URB from the frame list
144 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
146 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
149 list_for_each_entry(td, &urbp->td_list, list)
150 uhci_remove_td_from_frame_list(uhci, td);
154 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
155 struct usb_device *udev, struct usb_host_endpoint *hep)
157 dma_addr_t dma_handle;
160 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
164 qh->dma_handle = dma_handle;
166 qh->element = UHCI_PTR_TERM;
167 qh->link = UHCI_PTR_TERM;
169 INIT_LIST_HEAD(&qh->queue);
170 INIT_LIST_HEAD(&qh->node);
172 if (udev) { /* Normal QH */
173 qh->dummy_td = uhci_alloc_td(uhci);
175 dma_pool_free(uhci->qh_pool, qh, dma_handle);
178 qh->state = QH_STATE_IDLE;
182 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
184 } else { /* Skeleton QH */
185 qh->state = QH_STATE_ACTIVE;
192 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
194 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
195 if (!list_empty(&qh->queue))
196 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
200 qh->hep->hcpriv = NULL;
201 uhci_free_td(uhci, qh->dummy_td);
203 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
207 * When the currently executing URB is dequeued, save its current toggle value
209 static void uhci_save_toggle(struct uhci_qh *qh, struct urb *urb)
211 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
214 /* If the QH element pointer is UHCI_PTR_TERM then then currently
215 * executing URB has already been unlinked, so this one isn't it. */
216 if (qh_element(qh) == UHCI_PTR_TERM ||
217 qh->queue.next != &urbp->node)
219 qh->element = UHCI_PTR_TERM;
221 /* Only bulk and interrupt pipes have to worry about toggles */
222 if (!(qh->type == USB_ENDPOINT_XFER_BULK ||
223 qh->type == USB_ENDPOINT_XFER_INT))
226 /* Find the first active TD; that's the device's toggle state */
227 list_for_each_entry(td, &urbp->td_list, list) {
228 if (td_status(td) & TD_CTRL_ACTIVE) {
230 qh->initial_toggle = uhci_toggle(td_token(td));
239 * Fix up the data toggles for URBs in a queue, when one of them
240 * terminates early (short transfer, error, or dequeued).
242 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
244 struct urb_priv *urbp = NULL;
246 unsigned int toggle = qh->initial_toggle;
249 /* Fixups for a short transfer start with the second URB in the
250 * queue (the short URB is the first). */
252 urbp = list_entry(qh->queue.next, struct urb_priv, node);
254 /* When starting with the first URB, if the QH element pointer is
255 * still valid then we know the URB's toggles are okay. */
256 else if (qh_element(qh) != UHCI_PTR_TERM)
259 /* Fix up the toggle for the URBs in the queue. Normally this
260 * loop won't run more than once: When an error or short transfer
261 * occurs, the queue usually gets emptied. */
262 urbp = list_prepare_entry(urbp, &qh->queue, node);
263 list_for_each_entry_continue(urbp, &qh->queue, node) {
265 /* If the first TD has the right toggle value, we don't
266 * need to change any toggles in this URB */
267 td = list_entry(urbp->td_list.next, struct uhci_td, list);
268 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
269 td = list_entry(urbp->td_list.next, struct uhci_td,
271 toggle = uhci_toggle(td_token(td)) ^ 1;
273 /* Otherwise all the toggles in the URB have to be switched */
275 list_for_each_entry(td, &urbp->td_list, list) {
276 td->token ^= __constant_cpu_to_le32(
284 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
285 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
286 usb_pipeout(pipe), toggle);
291 * Put a QH on the schedule in both hardware and software
293 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
297 WARN_ON(list_empty(&qh->queue));
299 /* Set the element pointer if it isn't set already.
300 * This isn't needed for Isochronous queues, but it doesn't hurt. */
301 if (qh_element(qh) == UHCI_PTR_TERM) {
302 struct urb_priv *urbp = list_entry(qh->queue.next,
303 struct urb_priv, node);
304 struct uhci_td *td = list_entry(urbp->td_list.next,
305 struct uhci_td, list);
307 qh->element = cpu_to_le32(td->dma_handle);
310 if (qh->state == QH_STATE_ACTIVE)
312 qh->state = QH_STATE_ACTIVE;
314 /* Move the QH from its old list to the end of the appropriate
316 if (qh == uhci->next_qh)
317 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
319 list_move_tail(&qh->node, &qh->skel->node);
321 /* Link it into the schedule */
322 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
323 qh->link = pqh->link;
325 pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle);
329 * Take a QH off the hardware schedule
331 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
335 if (qh->state == QH_STATE_UNLINKING)
337 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
338 qh->state = QH_STATE_UNLINKING;
340 /* Unlink the QH from the schedule and record when we did it */
341 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
342 pqh->link = qh->link;
345 uhci_get_current_frame_number(uhci);
346 qh->unlink_frame = uhci->frame_number;
348 /* Force an interrupt so we know when the QH is fully unlinked */
349 if (list_empty(&uhci->skel_unlink_qh->node))
350 uhci_set_next_interrupt(uhci);
352 /* Move the QH from its old list to the end of the unlinking list */
353 if (qh == uhci->next_qh)
354 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
356 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
360 * When we and the controller are through with a QH, it becomes IDLE.
361 * This happens when a QH has been off the schedule (on the unlinking
362 * list) for more than one frame, or when an error occurs while adding
363 * the first URB onto a new QH.
365 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
367 WARN_ON(qh->state == QH_STATE_ACTIVE);
369 if (qh == uhci->next_qh)
370 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
372 list_move(&qh->node, &uhci->idle_qh_list);
373 qh->state = QH_STATE_IDLE;
375 /* If anyone is waiting for a QH to become idle, wake them up */
376 if (uhci->num_waiting)
377 wake_up_all(&uhci->waitqh);
380 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
383 struct urb_priv *urbp;
385 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
389 memset((void *)urbp, 0, sizeof(*urbp));
394 INIT_LIST_HEAD(&urbp->node);
395 INIT_LIST_HEAD(&urbp->td_list);
400 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
402 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
404 list_add_tail(&td->list, &urbp->td_list);
407 static void uhci_remove_td_from_urb(struct uhci_td *td)
409 if (list_empty(&td->list))
412 list_del_init(&td->list);
415 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
416 struct urb_priv *urbp)
418 struct uhci_td *td, *tmp;
420 if (!list_empty(&urbp->node))
421 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
424 uhci_get_current_frame_number(uhci);
425 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
426 uhci_free_pending_tds(uhci);
427 uhci->td_remove_age = uhci->frame_number;
430 /* Check to see if the remove list is empty. Set the IOC bit */
431 /* to force an interrupt so we can remove the TDs. */
432 if (list_empty(&uhci->td_remove_list))
433 uhci_set_next_interrupt(uhci);
435 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
436 uhci_remove_td_from_urb(td);
437 list_add(&td->remove_list, &uhci->td_remove_list);
440 urbp->urb->hcpriv = NULL;
441 kmem_cache_free(uhci_up_cachep, urbp);
444 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
446 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
448 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
450 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
451 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
455 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
457 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
459 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
462 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
467 * Map status to standard result codes
469 * <status> is (td_status(td) & 0xF60000), a.k.a.
470 * uhci_status_bits(td_status(td)).
471 * Note: <status> does not include the TD_CTRL_NAK bit.
472 * <dir_out> is True for output TDs and False for input TDs.
474 static int uhci_map_status(int status, int dir_out)
478 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
480 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
486 if (status & TD_CTRL_BABBLE) /* Babble */
488 if (status & TD_CTRL_DBUFERR) /* Buffer error */
490 if (status & TD_CTRL_STALLED) /* Stalled */
492 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
499 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
503 unsigned long destination, status;
504 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
505 int len = urb->transfer_buffer_length;
506 dma_addr_t data = urb->transfer_dma;
509 /* The "pipe" thing contains the destination in bits 8--18 */
510 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
512 /* 3 errors, dummy TD remains inactive */
513 status = uhci_maxerr(3);
514 if (urb->dev->speed == USB_SPEED_LOW)
515 status |= TD_CTRL_LS;
518 * Build the TD for the control request setup packet
521 uhci_add_td_to_urb(urb, td);
522 uhci_fill_td(td, status, destination | uhci_explen(8),
525 status |= TD_CTRL_ACTIVE;
528 * If direction is "send", change the packet ID from SETUP (0x2D)
529 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
530 * set Short Packet Detect (SPD) for all data packets.
532 if (usb_pipeout(urb->pipe))
533 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
535 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
536 status |= TD_CTRL_SPD;
543 int pktsze = min(len, maxsze);
545 td = uhci_alloc_td(uhci);
548 *plink = cpu_to_le32(td->dma_handle);
550 /* Alternate Data0/1 (start with Data1) */
551 destination ^= TD_TOKEN_TOGGLE;
553 uhci_add_td_to_urb(urb, td);
554 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
563 * Build the final TD for control status
565 td = uhci_alloc_td(uhci);
568 *plink = cpu_to_le32(td->dma_handle);
571 * It's IN if the pipe is an output pipe or we're not expecting
574 destination &= ~TD_TOKEN_PID_MASK;
575 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
576 destination |= USB_PID_IN;
578 destination |= USB_PID_OUT;
580 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
582 status &= ~TD_CTRL_SPD;
584 uhci_add_td_to_urb(urb, td);
585 uhci_fill_td(td, status | TD_CTRL_IOC,
586 destination | uhci_explen(0), 0);
590 * Build the new dummy TD and activate the old one
592 td = uhci_alloc_td(uhci);
595 *plink = cpu_to_le32(td->dma_handle);
597 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
599 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
602 /* Low-speed transfers get a different queue, and won't hog the bus.
603 * Also, some devices enumerate better without FSBR; the easiest way
604 * to do that is to put URBs on the low-speed queue while the device
605 * isn't in the CONFIGURED state. */
606 if (urb->dev->speed == USB_SPEED_LOW ||
607 urb->dev->state != USB_STATE_CONFIGURED)
608 qh->skel = uhci->skel_ls_control_qh;
610 qh->skel = uhci->skel_fs_control_qh;
611 uhci_inc_fsbr(uhci, urb);
616 /* Remove the dummy TD from the td_list so it doesn't get freed */
617 uhci_remove_td_from_urb(qh->dummy_td);
622 * Common submit for bulk and interrupt
624 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
628 unsigned long destination, status;
629 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
630 int len = urb->transfer_buffer_length;
631 dma_addr_t data = urb->transfer_dma;
638 /* The "pipe" thing contains the destination in bits 8--18 */
639 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
640 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
641 usb_pipeout(urb->pipe));
643 /* 3 errors, dummy TD remains inactive */
644 status = uhci_maxerr(3);
645 if (urb->dev->speed == USB_SPEED_LOW)
646 status |= TD_CTRL_LS;
647 if (usb_pipein(urb->pipe))
648 status |= TD_CTRL_SPD;
655 do { /* Allow zero length packets */
658 if (len <= pktsze) { /* The last packet */
660 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
661 status &= ~TD_CTRL_SPD;
665 td = uhci_alloc_td(uhci);
668 *plink = cpu_to_le32(td->dma_handle);
670 uhci_add_td_to_urb(urb, td);
671 uhci_fill_td(td, status,
672 destination | uhci_explen(pktsze) |
673 (toggle << TD_TOKEN_TOGGLE_SHIFT),
676 status |= TD_CTRL_ACTIVE;
684 * URB_ZERO_PACKET means adding a 0-length packet, if direction
685 * is OUT and the transfer_length was an exact multiple of maxsze,
686 * hence (len = transfer_length - N * maxsze) == 0
687 * however, if transfer_length == 0, the zero packet was already
690 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
691 usb_pipeout(urb->pipe) && len == 0 &&
692 urb->transfer_buffer_length > 0) {
693 td = uhci_alloc_td(uhci);
696 *plink = cpu_to_le32(td->dma_handle);
698 uhci_add_td_to_urb(urb, td);
699 uhci_fill_td(td, status,
700 destination | uhci_explen(0) |
701 (toggle << TD_TOKEN_TOGGLE_SHIFT),
708 /* Set the interrupt-on-completion flag on the last packet.
709 * A more-or-less typical 4 KB URB (= size of one memory page)
710 * will require about 3 ms to transfer; that's a little on the
711 * fast side but not enough to justify delaying an interrupt
712 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
714 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
717 * Build the new dummy TD and activate the old one
719 td = uhci_alloc_td(uhci);
722 *plink = cpu_to_le32(td->dma_handle);
724 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
726 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
729 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
730 usb_pipeout(urb->pipe), toggle);
734 /* Remove the dummy TD from the td_list so it doesn't get freed */
735 uhci_remove_td_from_urb(qh->dummy_td);
739 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
744 /* Can't have low-speed bulk transfers */
745 if (urb->dev->speed == USB_SPEED_LOW)
748 qh->skel = uhci->skel_bulk_qh;
749 ret = uhci_submit_common(uhci, urb, qh);
751 uhci_inc_fsbr(uhci, urb);
755 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
758 /* USB 1.1 interrupt transfers only involve one packet per interval.
759 * Drivers can submit URBs of any length, but longer ones will need
760 * multiple intervals to complete.
762 qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)];
763 return uhci_submit_common(uhci, urb, qh);
767 * Fix up the data structures following a short transfer
769 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
770 struct uhci_qh *qh, struct urb_priv *urbp,
771 struct uhci_td *short_td)
776 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
777 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
778 urbp->short_transfer = 1;
780 /* When a control transfer is short, we have to restart
781 * the queue at the status stage transaction, which is
783 qh->element = cpu_to_le32(td->dma_handle);
786 } else if (!urbp->short_transfer) {
787 urbp->short_transfer = 1;
789 /* When a bulk/interrupt transfer is short, we have to
790 * fix up the toggles of the following URBs on the queue
791 * before restarting the queue at the next URB. */
792 qh->initial_toggle = uhci_toggle(td_token(short_td)) ^ 1;
793 uhci_fixup_toggles(qh, 1);
795 qh->element = td->link;
802 * Common result for control, bulk, and interrupt
804 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
806 struct urb_priv *urbp = urb->hcpriv;
807 struct uhci_qh *qh = urbp->qh;
809 struct list_head *tmp;
813 tmp = urbp->td_list.next;
815 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
816 if (urbp->short_transfer)
817 tmp = urbp->td_list.prev;
819 urb->actual_length = -8; /* SETUP packet */
821 urb->actual_length = 0;
824 while (tmp != &urbp->td_list) {
825 unsigned int ctrlstat;
828 td = list_entry(tmp, struct uhci_td, list);
831 ctrlstat = td_status(td);
832 status = uhci_status_bits(ctrlstat);
833 if (status & TD_CTRL_ACTIVE)
836 len = uhci_actual_length(ctrlstat);
837 urb->actual_length += len;
840 ret = uhci_map_status(status,
841 uhci_packetout(td_token(td)));
842 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
843 /* Some debugging code */
844 dev_dbg(uhci_dev(uhci),
845 "%s: failed with status %x\n",
846 __FUNCTION__, status);
848 if (debug > 1 && errbuf) {
849 /* Print the chain for debugging */
850 uhci_show_qh(urbp->qh, errbuf,
856 } else if (len < uhci_expected_length(td_token(td))) {
858 /* We received a short packet */
859 if (urb->transfer_flags & URB_SHORT_NOT_OK)
861 else if (ctrlstat & TD_CTRL_SPD)
872 /* In case a control transfer gets an error
873 * during the setup stage */
874 urb->actual_length = max(urb->actual_length, 0);
876 /* Note that the queue has stopped and save
877 * the next toggle value */
878 qh->element = UHCI_PTR_TERM;
880 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
881 qh->initial_toggle = uhci_toggle(td_token(td)) ^
884 } else /* Short packet received */
885 ret = uhci_fixup_short_transfer(uhci, qh, urbp, td);
890 * Isochronous transfers
892 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
895 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
897 unsigned long destination, status;
898 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
900 if (urb->number_of_packets > 900) /* 900? Why? */
903 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
904 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
906 /* Figure out the starting frame number */
907 if (urb->transfer_flags & URB_ISO_ASAP) {
908 if (list_empty(&qh->queue)) {
909 uhci_get_current_frame_number(uhci);
910 urb->start_frame = (uhci->frame_number + 10);
912 } else { /* Go right after the last one */
913 struct urb *last_urb;
915 last_urb = list_entry(qh->queue.prev,
916 struct urb_priv, node)->urb;
917 urb->start_frame = (last_urb->start_frame +
918 last_urb->number_of_packets *
922 /* FIXME: Sanity check */
924 urb->start_frame &= (UHCI_NUMFRAMES - 1);
926 for (i = 0; i < urb->number_of_packets; i++) {
927 td = uhci_alloc_td(uhci);
931 uhci_add_td_to_urb(urb, td);
932 uhci_fill_td(td, status, destination |
933 uhci_explen(urb->iso_frame_desc[i].length),
935 urb->iso_frame_desc[i].offset);
938 /* Set the interrupt-on-completion flag on the last packet. */
939 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
941 qh->skel = uhci->skel_iso_qh;
943 /* Add the TDs to the frame list */
944 frame = urb->start_frame;
945 list_for_each_entry(td, &urbp->td_list, list) {
946 uhci_insert_td_in_frame_list(uhci, td, frame);
947 frame += urb->interval;
953 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
956 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
960 urb->actual_length = urb->error_count = 0;
963 list_for_each_entry(td, &urbp->td_list, list) {
965 unsigned int ctrlstat = td_status(td);
967 if (ctrlstat & TD_CTRL_ACTIVE)
970 actlength = uhci_actual_length(ctrlstat);
971 urb->iso_frame_desc[i].actual_length = actlength;
972 urb->actual_length += actlength;
974 status = uhci_map_status(uhci_status_bits(ctrlstat),
975 usb_pipeout(urb->pipe));
976 urb->iso_frame_desc[i].status = status;
988 static int uhci_urb_enqueue(struct usb_hcd *hcd,
989 struct usb_host_endpoint *hep,
990 struct urb *urb, gfp_t mem_flags)
993 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
995 struct urb_priv *urbp;
999 spin_lock_irqsave(&uhci->lock, flags);
1002 if (ret != -EINPROGRESS) /* URB already unlinked! */
1006 urbp = uhci_alloc_urb_priv(uhci, urb);
1011 qh = (struct uhci_qh *) hep->hcpriv;
1013 qh = uhci_alloc_qh(uhci, urb->dev, hep);
1020 case USB_ENDPOINT_XFER_CONTROL:
1021 ret = uhci_submit_control(uhci, urb, qh);
1023 case USB_ENDPOINT_XFER_BULK:
1024 ret = uhci_submit_bulk(uhci, urb, qh);
1026 case USB_ENDPOINT_XFER_INT:
1027 if (list_empty(&qh->queue)) {
1028 bustime = usb_check_bandwidth(urb->dev, urb);
1032 ret = uhci_submit_interrupt(uhci, urb, qh);
1034 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1036 } else { /* inherit from parent */
1037 struct urb_priv *eurbp;
1039 eurbp = list_entry(qh->queue.prev, struct urb_priv,
1041 urb->bandwidth = eurbp->urb->bandwidth;
1042 ret = uhci_submit_interrupt(uhci, urb, qh);
1045 case USB_ENDPOINT_XFER_ISOC:
1046 bustime = usb_check_bandwidth(urb->dev, urb);
1052 ret = uhci_submit_isochronous(uhci, urb, qh);
1054 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1058 goto err_submit_failed;
1060 /* Add this URB to the QH */
1062 list_add_tail(&urbp->node, &qh->queue);
1064 /* If the new URB is the first and only one on this QH then either
1065 * the QH is new and idle or else it's unlinked and waiting to
1066 * become idle, so we can activate it right away. But only if the
1067 * queue isn't stopped. */
1068 if (qh->queue.next == &urbp->node && !qh->is_stopped)
1069 uhci_activate_qh(uhci, qh);
1073 if (qh->state == QH_STATE_IDLE)
1074 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1077 uhci_free_urb_priv(uhci, urbp);
1080 spin_unlock_irqrestore(&uhci->lock, flags);
1084 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1086 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1087 unsigned long flags;
1088 struct urb_priv *urbp;
1090 spin_lock_irqsave(&uhci->lock, flags);
1092 if (!urbp) /* URB was never linked! */
1095 /* Remove Isochronous TDs from the frame list ASAP */
1096 if (urbp->qh->type == USB_ENDPOINT_XFER_ISOC)
1097 uhci_unlink_isochronous_tds(uhci, urb);
1098 uhci_unlink_qh(uhci, urbp->qh);
1101 spin_unlock_irqrestore(&uhci->lock, flags);
1106 * Finish unlinking an URB and give it back
1108 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1109 struct urb *urb, struct pt_regs *regs)
1110 __releases(uhci->lock)
1111 __acquires(uhci->lock)
1113 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1115 /* Isochronous TDs get unlinked directly from the frame list */
1116 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1117 uhci_unlink_isochronous_tds(uhci, urb);
1119 /* If the URB isn't first on its queue, adjust the link pointer
1120 * of the last TD in the previous URB. */
1121 else if (qh->queue.next != &urbp->node) {
1122 struct urb_priv *purbp;
1123 struct uhci_td *ptd, *ltd;
1125 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
1126 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
1128 ltd = list_entry(urbp->td_list.prev, struct uhci_td,
1130 ptd->link = ltd->link;
1133 /* Take the URB off the QH's queue. If the queue is now empty,
1134 * this is a perfect time for a toggle fixup. */
1135 list_del_init(&urbp->node);
1136 if (list_empty(&qh->queue) && qh->needs_fixup) {
1137 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1138 usb_pipeout(urb->pipe), qh->initial_toggle);
1139 qh->needs_fixup = 0;
1142 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1143 uhci_free_urb_priv(uhci, urbp);
1146 case USB_ENDPOINT_XFER_ISOC:
1147 /* Release bandwidth for Interrupt or Isoc. transfers */
1149 usb_release_bandwidth(urb->dev, urb, 1);
1151 case USB_ENDPOINT_XFER_INT:
1152 /* Release bandwidth for Interrupt or Isoc. transfers */
1153 /* Make sure we don't release if we have a queued URB */
1154 if (list_empty(&qh->queue) && urb->bandwidth)
1155 usb_release_bandwidth(urb->dev, urb, 0);
1157 /* bandwidth was passed on to queued URB, */
1158 /* so don't let usb_unlink_urb() release it */
1163 spin_unlock(&uhci->lock);
1164 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);
1165 spin_lock(&uhci->lock);
1167 /* If the queue is now empty, we can unlink the QH and give up its
1168 * reserved bandwidth. */
1169 if (list_empty(&qh->queue)) {
1170 uhci_unlink_qh(uhci, qh);
1172 /* Bandwidth stuff not yet implemented */
1177 * Scan the URBs in a QH's queue
1179 #define QH_FINISHED_UNLINKING(qh) \
1180 (qh->state == QH_STATE_UNLINKING && \
1181 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1183 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
1184 struct pt_regs *regs)
1186 struct urb_priv *urbp;
1190 while (!list_empty(&qh->queue)) {
1191 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1194 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1195 status = uhci_result_isochronous(uhci, urb);
1197 status = uhci_result_common(uhci, urb);
1198 if (status == -EINPROGRESS)
1201 spin_lock(&urb->lock);
1202 if (urb->status == -EINPROGRESS) /* Not dequeued */
1203 urb->status = status;
1205 status = ECONNRESET; /* Not -ECONNRESET */
1206 spin_unlock(&urb->lock);
1208 /* Dequeued but completed URBs can't be given back unless
1209 * the QH is stopped or has finished unlinking. */
1210 if (status == ECONNRESET) {
1211 if (QH_FINISHED_UNLINKING(qh))
1213 else if (!qh->is_stopped)
1217 uhci_giveback_urb(uhci, qh, urb, regs);
1222 /* If the QH is neither stopped nor finished unlinking (normal case),
1223 * our work here is done. */
1224 if (QH_FINISHED_UNLINKING(qh))
1226 else if (!qh->is_stopped)
1229 /* Otherwise give back each of the dequeued URBs */
1231 list_for_each_entry(urbp, &qh->queue, node) {
1233 if (urb->status != -EINPROGRESS) {
1234 uhci_save_toggle(qh, urb);
1235 uhci_giveback_urb(uhci, qh, urb, regs);
1241 /* There are no more dequeued URBs. If there are still URBs on the
1242 * queue, the QH can now be re-activated. */
1243 if (!list_empty(&qh->queue)) {
1244 if (qh->needs_fixup)
1245 uhci_fixup_toggles(qh, 0);
1246 uhci_activate_qh(uhci, qh);
1249 /* The queue is empty. The QH can become idle if it is fully
1251 else if (QH_FINISHED_UNLINKING(qh))
1252 uhci_make_qh_idle(uhci, qh);
1255 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1257 struct uhci_td *td, *tmp;
1259 list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1260 list_del_init(&td->remove_list);
1262 uhci_free_td(uhci, td);
1267 * Process events in the schedule, but only in one thread at a time
1269 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1274 /* Don't allow re-entrant calls */
1275 if (uhci->scan_in_progress) {
1276 uhci->need_rescan = 1;
1279 uhci->scan_in_progress = 1;
1281 uhci->need_rescan = 0;
1283 uhci_clear_next_interrupt(uhci);
1284 uhci_get_current_frame_number(uhci);
1286 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
1287 uhci_free_pending_tds(uhci);
1289 /* Go through all the QH queues and process the URBs in each one */
1290 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1291 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1292 struct uhci_qh, node);
1293 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1294 uhci->next_qh = list_entry(qh->node.next,
1295 struct uhci_qh, node);
1296 uhci_scan_qh(uhci, qh, regs);
1300 if (uhci->need_rescan)
1302 uhci->scan_in_progress = 0;
1304 /* If the controller is stopped, we can finish these off right now */
1305 if (uhci->is_stopped)
1306 uhci_free_pending_tds(uhci);
1308 if (list_empty(&uhci->td_remove_list) &&
1309 list_empty(&uhci->skel_unlink_qh->node))
1310 uhci_clear_next_interrupt(uhci);
1312 uhci_set_next_interrupt(uhci);
1315 static void check_fsbr(struct uhci_hcd *uhci)
1317 /* For now, don't scan URBs for FSBR timeouts.
1318 * Add it back in later... */
1320 /* Really disable FSBR */
1321 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1322 uhci->fsbrtimeout = 0;
1323 uhci->skel_term_qh->link = UHCI_PTR_TERM;