2 * Copyright (c) 2001-2002 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
47 int token, int maxpacket)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
54 qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) {
65 qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
66 qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
67 #if 0 //+Bing try try 12222004
69 if ((count + 0x1000) < len)
73 if ((count + 0x4000) < len)
80 /* short packets may only terminate transfers */
82 count -= (count % maxpacket);
84 qtd->hw_token = cpu_to_le32 ((count << 16) | token);
90 /*-------------------------------------------------------------------------*/
93 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
95 BUG_ON(qh->qh_state != QH_STATE_IDLE);
97 qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
98 qh->hw_alt_next = EHCI_LIST_END;
100 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
102 qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING);
106 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
108 struct ehci_qtd *qtd;
110 if (list_empty (&qh->qtd_list))
113 qtd = list_entry (qh->qtd_list.next,
114 struct ehci_qtd, qtd_list);
115 /* first qtd may already be partially processed */
116 if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current)
120 qh_update (ehci, qh, qtd);
123 /*-------------------------------------------------------------------------*/
125 static void qtd_copy_status (
126 struct ehci_hcd *ehci,
132 /* count IN/OUT bytes, not SETUP (even short packets) */
133 if (likely (QTD_PID (token) != 2))
134 urb->actual_length += length - QTD_LENGTH (token);
136 /* don't modify error codes */
137 if (unlikely (urb->status != -EINPROGRESS))
140 /* force cleanup after short read; not always an error */
141 if (unlikely (IS_SHORT_READ (token)))
142 urb->status = -EREMOTEIO;
144 /* serious "can't proceed" faults reported by the hardware */
145 if (token & QTD_STS_HALT) {
146 if (token & QTD_STS_BABBLE) {
147 /* FIXME "must" disable babbling device's port too */
148 urb->status = -EOVERFLOW;
149 } else if (token & QTD_STS_MMF) {
150 /* fs/ls interrupt xfer missed the complete-split */
151 urb->status = -EPROTO;
152 } else if (token & QTD_STS_DBE) {
153 urb->status = (QTD_PID (token) == 1) /* IN ? */
154 ? -ENOSR /* hc couldn't read data */
155 : -ECOMM; /* hc couldn't write data */
156 } else if (token & QTD_STS_XACT) {
157 /* timeout, bad crc, wrong PID, etc; retried */
158 if (QTD_CERR (token))
159 urb->status = -EPIPE;
161 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n",
163 usb_pipeendpoint (urb->pipe),
164 usb_pipein (urb->pipe) ? "in" : "out");
165 urb->status = -EPROTO;
167 /* CERR nonzero + no errors + halt --> stall */
168 } else if (QTD_CERR (token))
169 urb->status = -EPIPE;
171 urb->status = -EPROTO;
174 "dev%d ep%d%s qtd token %08x --> status %d\n",
175 usb_pipedevice (urb->pipe),
176 usb_pipeendpoint (urb->pipe),
177 usb_pipein (urb->pipe) ? "in" : "out",
180 /* stall indicates some recovery action is needed */
181 if (urb->status == -EPIPE) {
182 int pipe = urb->pipe;
184 if (!usb_pipecontrol (pipe))
185 usb_endpoint_halt (urb->dev,
186 usb_pipeendpoint (pipe),
188 if (urb->dev->tt && !usb_pipeint (pipe)) {
190 struct usb_device *tt = urb->dev->tt->hub;
191 dbg ("clear tt %s-%s p%d buffer, a%d ep%d",
192 tt->bus->bus_name, tt->devpath,
193 urb->dev->ttport, urb->dev->devnum,
194 usb_pipeendpoint (pipe));
196 usb_hub_tt_clear_buffer (urb->dev, pipe);
203 ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb, struct pt_regs *regs)
205 #ifdef INTR_AUTOMAGIC
206 struct urb *resubmit = 0;
207 struct usb_device *dev = 0;
210 if (likely (urb->hcpriv != 0)) {
211 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
213 /* S-mask in a QH means it's an interrupt urb */
214 if ((qh->hw_info2 & __constant_cpu_to_le32 (0x00ff)) != 0) {
216 /* ... update hc-wide periodic stats (for usbfs) */
217 hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs--;
219 #ifdef INTR_AUTOMAGIC
220 if (!((urb->status == -ENOENT)
221 || (urb->status == -ECONNRESET))) {
222 resubmit = usb_get_urb (urb);
230 spin_lock (&urb->lock);
232 switch (urb->status) {
233 case -EINPROGRESS: /* success */
236 COUNT (ehci->stats.complete);
238 case -EREMOTEIO: /* fault or normal */
239 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
241 COUNT (ehci->stats.complete);
243 case -ECONNRESET: /* canceled */
245 COUNT (ehci->stats.unlink);
248 spin_unlock (&urb->lock);
250 #ifdef EHCI_URB_TRACE
252 "%s %s urb %p ep%d%s status %d len %d/%d\n",
253 __FUNCTION__, urb->dev->devpath, urb,
254 usb_pipeendpoint (urb->pipe),
255 usb_pipein (urb->pipe) ? "in" : "out",
257 urb->actual_length, urb->transfer_buffer_length);
260 /* complete() can reenter this HCD */
261 spin_unlock (&ehci->lock);
262 usb_hcd_giveback_urb (&ehci->hcd, urb, regs);
264 #ifdef INTR_AUTOMAGIC
265 if (resubmit && ((urb->status == -ENOENT)
266 || (urb->status == -ECONNRESET))) {
267 usb_put_urb (resubmit);
270 // device drivers will soon be doing something like this
275 status = SUBMIT_URB (resubmit, SLAB_ATOMIC);
277 err ("can't resubmit interrupt urb %p: status %d",
279 usb_put_urb (resubmit);
283 spin_lock (&ehci->lock);
286 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
287 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
290 * Process and free completed qtds for a qh, returning URBs to drivers.
291 * Chases up to qh->hw_current. Returns number of completions called,
292 * indicating how much "real" work we did.
294 #define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
296 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
298 struct ehci_qtd *last = 0, *end = qh->dummy;
299 struct list_head *entry, *tmp;
305 if (unlikely (list_empty (&qh->qtd_list)))
308 /* completions (or tasks on other cpus) must never clobber HALT
309 * till we've gone through and cleaned everything up, even when
310 * they add urbs to this qh's queue or mark them for unlinking.
312 * NOTE: unlinking expects to be done in queue order.
314 state = qh->qh_state;
315 qh->qh_state = QH_STATE_COMPLETING;
316 stopped = (state == QH_STATE_IDLE);
318 /* remove de-activated QTDs from front of queue.
319 * after faults (including short reads), cleanup this urb
320 * then let the queue advance.
321 * if queue is stopped, handles unlinks.
323 list_for_each_safe (entry, tmp, &qh->qtd_list) {
324 struct ehci_qtd *qtd;
328 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
331 /* clean up any state from previous QTD ...*/
333 if (likely (last->urb != urb)) {
334 ehci_urb_done (ehci, last->urb, regs);
337 ehci_qtd_free (ehci, last);
341 /* ignore urbs submitted during completions we reported */
345 /* hardware copies qtd out of qh overlay */
347 token = le32_to_cpu (qtd->hw_token);
349 /* always clean up qtds the hc de-activated */
350 if ((token & QTD_STS_ACTIVE) == 0) {
352 if ((token & QTD_STS_HALT) != 0) {
355 /* magic dummy for some short reads; qh won't advance */
356 } else if (IS_SHORT_READ (token)
357 && (qh->hw_alt_next & QTD_MASK)
358 == ehci->async->hw_alt_next) {
363 /* stop scanning when we reach qtds the hc is using */
364 } else if (likely (!stopped
365 && HCD_IS_RUNNING (ehci->hcd.state))) {
371 /* ignore active urbs unless some previous qtd
372 * for the urb faulted (including short read) or
373 * its urb was canceled. we may patch qh or qtds.
375 if (likely (urb->status == -EINPROGRESS))
378 /* issue status after short control reads */
379 if (unlikely (do_status != 0)
380 && QTD_PID (token) == 0 /* OUT */) {
385 /* token in overlay may be most current */
386 if (state == QH_STATE_IDLE
387 && cpu_to_le32 (qtd->qtd_dma)
389 token = le32_to_cpu (qh->hw_token);
391 /* force halt for unlinked or blocked qh, so we'll
392 * patch the qh later and so that completions can't
393 * activate it while we "know" it's stopped.
395 if ((HALT_BIT & qh->hw_token) == 0) {
397 qh->hw_token |= HALT_BIT;
402 /* remove it from the queue */
403 spin_lock (&urb->lock);
404 qtd_copy_status (ehci, urb, qtd->length, token);
405 do_status = (urb->status == -EREMOTEIO)
406 && usb_pipecontrol (urb->pipe);
407 spin_unlock (&urb->lock);
409 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
410 last = list_entry (qtd->qtd_list.prev,
411 struct ehci_qtd, qtd_list);
412 last->hw_next = qtd->hw_next;
414 list_del (&qtd->qtd_list);
418 /* last urb's completion might still need calling */
419 if (likely (last != 0)) {
420 ehci_urb_done (ehci, last->urb, regs);
422 ehci_qtd_free (ehci, last);
425 /* restore original state; caller must unlink or relink */
426 qh->qh_state = state;
428 /* be sure the hardware's done with the qh before refreshing
429 * it after fault cleanup, or recovering from silicon wrongly
430 * overlaying the dummy qtd (which reduces DMA chatter).
432 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
435 qh_refresh(ehci, qh);
437 case QH_STATE_LINKED:
438 unlink_async (ehci, qh);
440 /* otherwise, unlink already started */
447 /*-------------------------------------------------------------------------*/
449 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
450 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
451 // ... and packet size, for any kind of endpoint descriptor
452 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
455 * reverse of qh_urb_transaction: free a list of TDs.
456 * used for cleanup after errors, before HC sees an URB's TDs.
458 static void qtd_list_free (
459 struct ehci_hcd *ehci,
461 struct list_head *qtd_list
463 struct list_head *entry, *temp;
465 list_for_each_safe (entry, temp, qtd_list) {
466 struct ehci_qtd *qtd;
468 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
469 list_del (&qtd->qtd_list);
470 ehci_qtd_free (ehci, qtd);
475 * create a list of filled qtds for this URB; won't link into qh.
477 static struct list_head *
479 struct ehci_hcd *ehci,
481 struct list_head *head,
484 struct ehci_qtd *qtd, *qtd_prev;
491 * URBs map to sequences of QTDs: one logical transaction
493 qtd = ehci_qtd_alloc (ehci, flags);
496 list_add_tail (&qtd->qtd_list, head);
499 token = QTD_STS_ACTIVE;
500 token |= (EHCI_TUNE_CERR << 10);
501 /* for split transactions, SplitXState initialized to zero */
503 len = urb->transfer_buffer_length;
504 is_input = usb_pipein (urb->pipe);
505 if (usb_pipecontrol (urb->pipe)) {
507 qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest),
508 token | (2 /* "setup" */ << 8), 8);
510 /* ... and always at least one more pid */
513 qtd = ehci_qtd_alloc (ehci, flags);
517 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
518 list_add_tail (&qtd->qtd_list, head);
522 * data transfer stage: buffer setup
524 if (likely (len > 0))
525 buf = urb->transfer_dma;
529 // FIXME this 'buf' check break some zlps...
530 if (!buf || is_input)
531 token |= (1 /* "in" */ << 8);
532 /* else it's already initted to "out" pid (0 << 8) */
534 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
537 * buffer gets wrapped in one or more qtds;
538 * last one may be "short" (including zero len)
539 * and may serve as a control status ack
544 this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket);
548 qtd->hw_alt_next = ehci->async->hw_alt_next;
550 /* qh makes control packets use qtd toggle; maybe switch it */
551 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
554 if (likely (len <= 0))
558 qtd = ehci_qtd_alloc (ehci, flags);
562 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
563 list_add_tail (&qtd->qtd_list, head);
566 /* unless the bulk/interrupt caller wants a chance to clean
567 * up after short reads, hc should advance qh past this urb
569 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
570 || usb_pipecontrol (urb->pipe)))
571 qtd->hw_alt_next = EHCI_LIST_END;
574 * control requests may need a terminating data "status" ack;
575 * bulk ones may need a terminating short packet (zero length).
577 if (likely (buf != 0)) {
580 if (usb_pipecontrol (urb->pipe)) {
582 token ^= 0x0100; /* "in" <--> "out" */
583 token |= QTD_TOGGLE; /* force DATA1 */
584 } else if (usb_pipebulk (urb->pipe)
585 && (urb->transfer_flags & URB_ZERO_PACKET)
586 && !(urb->transfer_buffer_length % maxpacket)) {
591 qtd = ehci_qtd_alloc (ehci, flags);
595 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
596 list_add_tail (&qtd->qtd_list, head);
598 /* never any data in such packets */
599 qtd_fill (qtd, 0, 0, token, 0);
603 /* by default, enable interrupt on urb completion */
604 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
605 qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC);
609 qtd_list_free (ehci, urb, head);
613 /*-------------------------------------------------------------------------*/
616 * Hardware maintains data toggle (like OHCI) ... here we (re)initialize
617 * the hardware data toggle in the QH, and set the pseudo-toggle in udev
618 * so we can see if usb_clear_halt() was called. NOP for control, since
619 * we set up qh->hw_info1 to always use the QTD toggle bits.
622 clear_toggle (struct usb_device *udev, int ep, int is_out, struct ehci_qh *qh)
624 vdbg ("clear toggle, dev %d ep 0x%x-%s",
625 udev->devnum, ep, is_out ? "out" : "in");
626 qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE);
627 usb_settoggle (udev, ep, is_out, 1);
630 // Would be best to create all qh's from config descriptors,
631 // when each interface/altsetting is established. Unlink
632 // any previous qh and cancel its urbs first; endpoints are
633 // implicitly reset then (data toggle too).
634 // That'd mean updating how usbcore talks to HCDs. (2.5?)
638 * Each QH holds a qtd list; a QH is used for everything except iso.
640 * For interrupt urbs, the scheduler must set the microframe scheduling
641 * mask(s) each time the QH gets scheduled. For highspeed, that's
642 * just one microframe in the s-mask. For split interrupt transactions
643 * there are additional complications: c-mask, maybe FSTNs.
645 static struct ehci_qh *
647 struct ehci_hcd *ehci,
651 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
652 u32 info1 = 0, info2 = 0;
660 * init endpoint/device data for this QH
662 info1 |= usb_pipeendpoint (urb->pipe) << 8;
663 info1 |= usb_pipedevice (urb->pipe) << 0;
665 is_input = usb_pipein (urb->pipe);
666 type = usb_pipetype (urb->pipe);
667 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
669 /* Compute interrupt scheduling parameters just once, and save.
670 * - allowing for high bandwidth, how many nsec/uframe are used?
671 * - split transactions need a second CSPLIT uframe; same question
672 * - splits also need a schedule gap (for full/low speed I/O)
673 * - qh has a polling interval
675 * For control/bulk requests, the HC or TT handles these.
677 if (type == PIPE_INTERRUPT) {
678 qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
679 hb_mult (maxp) * max_packet (maxp));
680 qh->start = NO_FRAME;
682 if (urb->dev->speed == USB_SPEED_HIGH) {
686 /* FIXME handle HS periods of less than 1 frame. */
687 qh->period = urb->interval >> 3;
688 if (qh->period < 1) {
689 dbg ("intr period %d uframes, NYET!",
694 /* gap is f(FS/LS transfer times) */
695 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
696 is_input, 0, maxp) / (125 * 1000);
698 /* FIXME this just approximates SPLIT/CSPLIT times */
699 if (is_input) { // SPLIT, gap, CSPLIT+DATA
700 qh->c_usecs = qh->usecs + HS_USECS (0);
701 qh->usecs = HS_USECS (1);
702 } else { // SPLIT+DATA, gap, CSPLIT
703 qh->usecs += HS_USECS (1);
704 qh->c_usecs = HS_USECS (0);
707 qh->period = urb->interval;
710 /* support for tt scheduling */
711 // qh->dev = usb_get_dev (urb->dev);
716 switch (urb->dev->speed) {
718 info1 |= (1 << 12); /* EPS "low" */
722 /* EPS 0 means "full" */
723 if (type != PIPE_INTERRUPT)
724 info1 |= (EHCI_TUNE_RL_TT << 28);
725 if (type == PIPE_CONTROL) {
726 info1 |= (1 << 27); /* for TT */
727 info1 |= 1 << 14; /* toggle from qtd */
731 info2 |= (EHCI_TUNE_MULT_TT << 30);
732 info2 |= urb->dev->ttport << 23;
733 info2 |= urb->dev->tt->hub->devnum << 16;
735 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
739 case USB_SPEED_HIGH: /* no TT involved */
740 info1 |= (2 << 12); /* EPS "high" */
741 if (type == PIPE_CONTROL) {
742 info1 |= (EHCI_TUNE_RL_HS << 28);
743 info1 |= 64 << 16; /* usb2 fixed maxpacket */
744 info1 |= 1 << 14; /* toggle from qtd */
745 info2 |= (EHCI_TUNE_MULT_HS << 30);
746 } else if (type == PIPE_BULK) {
747 info1 |= (EHCI_TUNE_RL_HS << 28);
748 info1 |= 512 << 16; /* usb2 fixed maxpacket */
749 info2 |= (EHCI_TUNE_MULT_HS << 30);
750 } else { /* PIPE_INTERRUPT */
751 info1 |= max_packet (maxp) << 16;
752 info2 |= hb_mult (maxp) << 30;
756 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
762 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
764 /* init as live, toggle clear, advance to dummy */
765 qh->qh_state = QH_STATE_IDLE;
766 qh->hw_info1 = cpu_to_le32 (info1);
767 qh->hw_info2 = cpu_to_le32 (info2);
768 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
769 qh_refresh (ehci, qh);
773 /*-------------------------------------------------------------------------*/
775 /* move qh (and its qtds) onto async queue; maybe enable queue. */
777 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
779 u32 dma = QH_NEXT (qh->qh_dma);
780 struct ehci_qh *head;
782 /* (re)start the async schedule? */
784 timer_action_done (ehci, TIMER_ASYNC_OFF);
785 if (!head->qh_next.qh) {
786 u32 cmd = readl (&ehci->regs->command);
788 if (!(cmd & CMD_ASE)) {
789 /* in case a clear of CMD_ASE didn't take yet */
790 (void) handshake (&ehci->regs->status, STS_ASS, 0, 150);
791 cmd |= CMD_ASE | CMD_RUN;
792 writel (cmd, &ehci->regs->command);
793 ehci->hcd.state = USB_STATE_RUNNING;
794 /* posted write need not be known to HC yet ... */
798 /* clear halt and/or toggle; and maybe recover from silicon quirk */
799 if (qh->qh_state == QH_STATE_IDLE)
800 qh_refresh (ehci, qh);
802 /* splice right after start */
803 qh->qh_next = head->qh_next;
804 qh->hw_next = head->hw_next;
807 head->qh_next.qh = qh;
810 qh->qh_state = QH_STATE_LINKED;
811 /* qtd completions reported later by interrupt */
814 /*-------------------------------------------------------------------------*/
816 #define QH_ADDR_MASK __constant_le32_to_cpu(0x7f)
819 * For control/bulk/interrupt, return QH with these TDs appended.
820 * Allocates and initializes the QH if necessary.
821 * Returns null if it can't allocate a QH it needs to.
822 * If the QH has TDs (urbs) already, that's great.
824 static struct ehci_qh *qh_append_tds (
825 struct ehci_hcd *ehci,
827 struct list_head *qtd_list,
832 struct ehci_qh *qh = 0;
834 qh = (struct ehci_qh *) *ptr;
835 if (unlikely (qh == 0)) {
836 /* can't sleep here, we have ehci->lock... */
837 qh = qh_make (ehci, urb, SLAB_ATOMIC);
840 if (likely (qh != 0)) {
841 struct ehci_qtd *qtd;
843 if (unlikely (list_empty (qtd_list)))
846 qtd = list_entry (qtd_list->next, struct ehci_qtd,
849 /* control qh may need patching after enumeration */
850 if (unlikely (epnum == 0)) {
851 /* set_address changes the address */
852 if ((qh->hw_info1 & QH_ADDR_MASK) == 0)
853 qh->hw_info1 |= cpu_to_le32 (
854 usb_pipedevice (urb->pipe));
856 /* for full speed, ep0 maxpacket can grow */
857 else if (!(qh->hw_info1
858 & __constant_cpu_to_le32 (0x3 << 12))) {
861 info = le32_to_cpu (qh->hw_info1);
862 max = urb->dev->descriptor.bMaxPacketSize0;
863 if (max > (0x07ff & (info >> 16))) {
864 info &= ~(0x07ff << 16);
866 qh->hw_info1 = cpu_to_le32 (info);
870 /* usb_reset_device() briefly reverts to address 0 */
871 if (usb_pipedevice (urb->pipe) == 0)
872 qh->hw_info1 &= ~QH_ADDR_MASK;
875 /* NOTE: changing config or interface setting is not
876 * supported without the 2.5 endpoint disable logic.
879 /* usb_clear_halt() means qh data toggle gets reset */
880 if (unlikely (!usb_gettoggle (urb->dev,
881 (epnum & 0x0f), !(epnum & 0x10)))
882 && !usb_pipecontrol (urb->pipe)) {
883 /* "never happens": drivers do stall cleanup right */
884 if (qh->qh_state != QH_STATE_IDLE
885 && !list_empty (&qh->qtd_list)
886 && qh->qh_state != QH_STATE_COMPLETING)
887 ehci_warn (ehci, "clear toggle dev%d "
888 "ep%d%s: not idle\n",
889 usb_pipedevice (urb->pipe),
891 usb_pipein (urb->pipe)
893 /* else we know this overlay write is safe */
894 clear_toggle (urb->dev,
895 epnum & 0x0f, !(epnum & 0x10), qh);
898 /* just one way to queue requests: swap with the dummy qtd.
899 * only hc or qh_completions() usually modify the overlay.
901 if (likely (qtd != 0)) {
902 struct ehci_qtd *dummy;
906 /* to avoid racing the HC, use the dummy td instead of
907 * the first td of our list (becomes new dummy). both
908 * tds stay deactivated until we're done, when the
909 * HC is allowed to fetch the old dummy (4.10.2).
911 token = qtd->hw_token;
912 qtd->hw_token = HALT_BIT;
916 dma = dummy->qtd_dma;
918 dummy->qtd_dma = dma;
920 list_del (&qtd->qtd_list);
921 list_add (&dummy->qtd_list, qtd_list);
922 __list_splice (qtd_list, qh->qtd_list.prev);
924 ehci_qtd_init (qtd, qtd->qtd_dma);
927 /* hc must see the new dummy at list end */
929 qtd = list_entry (qh->qtd_list.prev,
930 struct ehci_qtd, qtd_list);
931 qtd->hw_next = QTD_NEXT (dma);
933 /* let the hc process these next qtds */
935 dummy->hw_token = token;
937 urb->hcpriv = qh_get (qh);
943 /*-------------------------------------------------------------------------*/
947 struct ehci_hcd *ehci,
949 struct list_head *qtd_list,
952 struct ehci_qtd *qtd;
956 struct ehci_qh *qh = 0;
958 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
959 dev = (struct hcd_dev *)urb->dev->hcpriv;
960 epnum = usb_pipeendpoint (urb->pipe);
961 if (usb_pipein (urb->pipe) && !usb_pipecontrol (urb->pipe))
964 #ifdef EHCI_URB_TRACE
966 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
967 __FUNCTION__, urb->dev->devpath, urb,
968 epnum & 0x0f, usb_pipein (urb->pipe) ? "in" : "out",
969 urb->transfer_buffer_length,
970 qtd, dev ? dev->ep [epnum] : (void *)~0);
973 spin_lock_irqsave (&ehci->lock, flags);
974 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
976 /* Control/bulk operations through TTs don't need scheduling,
977 * the HC and TT handle it when the TT has a buffer ready.
979 if (likely (qh != 0)) {
980 if (likely (qh->qh_state == QH_STATE_IDLE))
981 qh_link_async (ehci, qh_get (qh));
983 spin_unlock_irqrestore (&ehci->lock, flags);
984 if (unlikely (qh == 0)) {
985 qtd_list_free (ehci, urb, qtd_list);
991 /*-------------------------------------------------------------------------*/
993 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
995 static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
997 struct ehci_qh *qh = ehci->reclaim;
998 struct ehci_qh *next;
1000 timer_action_done (ehci, TIMER_IAA_WATCHDOG);
1002 // qh->hw_next = cpu_to_le32 (qh->qh_dma);
1003 qh->qh_state = QH_STATE_IDLE;
1005 qh_put (ehci, qh); // refcount from reclaim
1007 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1009 ehci->reclaim = next;
1010 ehci->reclaim_ready = 0;
1013 qh_completions (ehci, qh, regs);
1015 if (!list_empty (&qh->qtd_list)
1016 && HCD_IS_RUNNING (ehci->hcd.state))
1017 qh_link_async (ehci, qh);
1019 qh_put (ehci, qh); // refcount from async list
1021 /* it's not free to turn the async schedule on/off; leave it
1022 * active but idle for a while once it empties.
1024 if (HCD_IS_RUNNING (ehci->hcd.state)
1025 && ehci->async->qh_next.qh == 0)
1026 timer_action (ehci, TIMER_ASYNC_OFF);
1031 start_unlink_async (ehci, next);
1035 /* makes sure the async qh will become idle */
1036 /* caller must own ehci->lock */
1038 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1040 int cmd = readl (&ehci->regs->command);
1041 struct ehci_qh *prev;
1045 || (qh->qh_state != QH_STATE_LINKED
1046 && qh->qh_state != QH_STATE_UNLINK_WAIT)
1048 // this macro lies except on SMP compiles
1049 || !spin_is_locked (&ehci->lock)
1055 /* stop async schedule right now? */
1056 if (unlikely (qh == ehci->async)) {
1057 /* can't get here without STS_ASS set */
1058 if (ehci->hcd.state != USB_STATE_HALT) {
1059 writel (cmd & ~CMD_ASE, &ehci->regs->command);
1061 // handshake later, if we need to
1063 timer_action_done (ehci, TIMER_ASYNC_OFF);
1067 qh->qh_state = QH_STATE_UNLINK;
1068 ehci->reclaim = qh = qh_get (qh);
1071 while (prev->qh_next.qh != qh)
1072 prev = prev->qh_next.qh;
1074 prev->hw_next = qh->hw_next;
1075 prev->qh_next = qh->qh_next;
1078 if (unlikely (ehci->hcd.state == USB_STATE_HALT)) {
1079 /* if (unlikely (qh->reclaim != 0))
1080 * this will recurse, probably not much
1082 end_unlink_async (ehci, NULL);
1086 ehci->reclaim_ready = 0;
1088 writel (cmd, &ehci->regs->command);
1089 (void) readl (&ehci->regs->command);
1090 timer_action (ehci, TIMER_IAA_WATCHDOG);
1093 /*-------------------------------------------------------------------------*/
1096 scan_async (struct ehci_hcd *ehci, struct pt_regs *regs)
1099 enum ehci_timer_action action = TIMER_IO_WATCHDOG;
1101 if (!++(ehci->stamp))
1103 timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1105 qh = ehci->async->qh_next.qh;
1106 if (likely (qh != 0)) {
1108 /* clean any finished work for this qh */
1109 if (!list_empty (&qh->qtd_list)
1110 && qh->stamp != ehci->stamp) {
1113 /* unlinks could happen here; completion
1114 * reporting drops the lock. rescan using
1115 * the latest schedule, but don't rescan
1116 * qhs we already finished (no looping).
1119 qh->stamp = ehci->stamp;
1120 temp = qh_completions (ehci, qh, regs);
1127 /* unlink idle entries, reducing HC PCI usage as well
1128 * as HCD schedule-scanning costs. delay for any qh
1129 * we just scanned, there's a not-unusual case that it
1130 * doesn't stay idle for long.
1131 * (plus, avoids some kind of re-activation race.)
1133 if (list_empty (&qh->qtd_list)) {
1134 if (qh->stamp == ehci->stamp)
1135 action = TIMER_ASYNC_SHRINK;
1136 else if (!ehci->reclaim
1137 && qh->qh_state == QH_STATE_LINKED)
1138 start_unlink_async (ehci, qh);
1141 qh = qh->qh_next.qh;
1144 if (action == TIMER_ASYNC_SHRINK)
1145 timer_action (ehci, TIMER_ASYNC_SHRINK);