[PATCH] UHCI: use dummy TDs
[powerpc.git] / drivers / usb / host / uhci-q.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu
17  */
18
19 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
20
21 /*
22  * Technically, updating td->status here is a race, but it's not really a
23  * problem. The worst that can happen is that we set the IOC bit again
24  * generating a spurious interrupt. We could fix this by creating another
25  * QH and leaving the IOC bit always set, but then we would have to play
26  * games with the FSBR code to make sure we get the correct order in all
27  * the cases. I don't think it's worth the effort
28  */
29 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
30 {
31         if (uhci->is_stopped)
32                 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
33         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
34 }
35
36 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
37 {
38         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
39 }
40
41 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
42 {
43         dma_addr_t dma_handle;
44         struct uhci_td *td;
45
46         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
47         if (!td)
48                 return NULL;
49
50         td->dma_handle = dma_handle;
51         td->frame = -1;
52
53         INIT_LIST_HEAD(&td->list);
54         INIT_LIST_HEAD(&td->remove_list);
55         INIT_LIST_HEAD(&td->fl_list);
56
57         return td;
58 }
59
60 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
61 {
62         if (!list_empty(&td->list))
63                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
64         if (!list_empty(&td->remove_list))
65                 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
66         if (!list_empty(&td->fl_list))
67                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
68
69         dma_pool_free(uhci->td_pool, td, td->dma_handle);
70 }
71
72 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
73                 u32 token, u32 buffer)
74 {
75         td->status = cpu_to_le32(status);
76         td->token = cpu_to_le32(token);
77         td->buffer = cpu_to_le32(buffer);
78 }
79
80 /*
81  * We insert Isochronous URBs directly into the frame list at the beginning
82  */
83 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
84                 struct uhci_td *td, unsigned framenum)
85 {
86         framenum &= (UHCI_NUMFRAMES - 1);
87
88         td->frame = framenum;
89
90         /* Is there a TD already mapped there? */
91         if (uhci->frame_cpu[framenum]) {
92                 struct uhci_td *ftd, *ltd;
93
94                 ftd = uhci->frame_cpu[framenum];
95                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
96
97                 list_add_tail(&td->fl_list, &ftd->fl_list);
98
99                 td->link = ltd->link;
100                 wmb();
101                 ltd->link = cpu_to_le32(td->dma_handle);
102         } else {
103                 td->link = uhci->frame[framenum];
104                 wmb();
105                 uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
106                 uhci->frame_cpu[framenum] = td;
107         }
108 }
109
110 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
111                 struct uhci_td *td)
112 {
113         /* If it's not inserted, don't remove it */
114         if (td->frame == -1) {
115                 WARN_ON(!list_empty(&td->fl_list));
116                 return;
117         }
118
119         if (uhci->frame_cpu[td->frame] == td) {
120                 if (list_empty(&td->fl_list)) {
121                         uhci->frame[td->frame] = td->link;
122                         uhci->frame_cpu[td->frame] = NULL;
123                 } else {
124                         struct uhci_td *ntd;
125
126                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
127                         uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
128                         uhci->frame_cpu[td->frame] = ntd;
129                 }
130         } else {
131                 struct uhci_td *ptd;
132
133                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
134                 ptd->link = td->link;
135         }
136
137         list_del_init(&td->fl_list);
138         td->frame = -1;
139 }
140
141 /*
142  * Remove all the TDs for an Isochronous URB from the frame list
143  */
144 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
145 {
146         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
147         struct uhci_td *td;
148
149         list_for_each_entry(td, &urbp->td_list, list)
150                 uhci_remove_td_from_frame_list(uhci, td);
151         wmb();
152 }
153
154 /*
155  * Remove an URB's TDs from the hardware schedule
156  */
157 static void uhci_remove_tds_from_schedule(struct uhci_hcd *uhci,
158                 struct urb *urb, int status)
159 {
160         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
161
162         /* Isochronous TDs get unlinked directly from the frame list */
163         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
164                 uhci_unlink_isochronous_tds(uhci, urb);
165                 return;
166         }
167
168         /* If the URB isn't first on its queue, adjust the link pointer
169          * of the last TD in the previous URB. */
170         if (urbp->node.prev != &urbp->qh->queue) {
171                 struct urb_priv *purbp;
172                 struct uhci_td *ptd, *ltd;
173
174                 if (status == -EINPROGRESS)
175                         status = 0;
176                 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
177                 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
178                                 list);
179                 ltd = list_entry(urbp->td_list.prev, struct uhci_td,
180                                 list);
181                 ptd->link = ltd->link;
182         }
183
184         /* If the URB completed with an error, then the QH element certainly
185          * points to one of the URB's TDs.  If it completed normally then
186          * the QH element has certainly moved on to the next URB.  And if
187          * the URB is still in progress then it must have been dequeued.
188          * The QH element either hasn't reached it yet or is somewhere in
189          * the middle.  If the URB wasn't first we can assume that it
190          * hasn't started yet (see above): Otherwise all the preceding URBs
191          * would have completed and been removed from the queue, so this one
192          * _would_ be first.
193          *
194          * If the QH element is inside this URB, clear it.  It will be
195          * set properly when the QH is activated.
196          */
197         if (status < 0)
198                 urbp->qh->element = UHCI_PTR_TERM;
199 }
200
201 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
202                 struct usb_device *udev, struct usb_host_endpoint *hep)
203 {
204         dma_addr_t dma_handle;
205         struct uhci_qh *qh;
206
207         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
208         if (!qh)
209                 return NULL;
210
211         qh->dma_handle = dma_handle;
212
213         qh->element = UHCI_PTR_TERM;
214         qh->link = UHCI_PTR_TERM;
215
216         INIT_LIST_HEAD(&qh->queue);
217         INIT_LIST_HEAD(&qh->node);
218
219         if (udev) {             /* Normal QH */
220                 qh->dummy_td = uhci_alloc_td(uhci);
221                 if (!qh->dummy_td) {
222                         dma_pool_free(uhci->qh_pool, qh, dma_handle);
223                         return NULL;
224                 }
225                 qh->state = QH_STATE_IDLE;
226                 qh->hep = hep;
227                 qh->udev = udev;
228                 hep->hcpriv = qh;
229                 usb_get_dev(udev);
230
231         } else {                /* Skeleton QH */
232                 qh->state = QH_STATE_ACTIVE;
233                 qh->udev = NULL;
234         }
235         return qh;
236 }
237
238 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
239 {
240         WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
241         if (!list_empty(&qh->queue))
242                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
243
244         list_del(&qh->node);
245         if (qh->udev) {
246                 qh->hep->hcpriv = NULL;
247                 usb_put_dev(qh->udev);
248                 uhci_free_td(uhci, qh->dummy_td);
249         }
250         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
251 }
252
253 /*
254  * Put a QH on the schedule in both hardware and software
255  */
256 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
257 {
258         struct uhci_qh *pqh;
259
260         WARN_ON(list_empty(&qh->queue));
261
262         /* Set the element pointer if it isn't set already.
263          * This isn't needed for Isochronous queues, but it doesn't hurt. */
264         if (qh_element(qh) == UHCI_PTR_TERM) {
265                 struct urb_priv *urbp = list_entry(qh->queue.next,
266                                 struct urb_priv, node);
267                 struct uhci_td *td = list_entry(urbp->td_list.next,
268                                 struct uhci_td, list);
269
270                 qh->element = cpu_to_le32(td->dma_handle);
271         }
272
273         if (qh->state == QH_STATE_ACTIVE)
274                 return;
275         qh->state = QH_STATE_ACTIVE;
276
277         /* Move the QH from its old list to the end of the appropriate
278          * skeleton's list */
279         list_move_tail(&qh->node, &qh->skel->node);
280
281         /* Link it into the schedule */
282         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
283         qh->link = pqh->link;
284         wmb();
285         pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle);
286 }
287
288 /*
289  * Take a QH off the hardware schedule
290  */
291 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
292 {
293         struct uhci_qh *pqh;
294
295         if (qh->state == QH_STATE_UNLINKING)
296                 return;
297         WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
298         qh->state = QH_STATE_UNLINKING;
299
300         /* Unlink the QH from the schedule and record when we did it */
301         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
302         pqh->link = qh->link;
303         mb();
304
305         uhci_get_current_frame_number(uhci);
306         qh->unlink_frame = uhci->frame_number;
307
308         /* Force an interrupt so we know when the QH is fully unlinked */
309         if (list_empty(&uhci->skel_unlink_qh->node))
310                 uhci_set_next_interrupt(uhci);
311
312         /* Move the QH from its old list to the end of the unlinking list */
313         list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
314 }
315
316 /*
317  * When we and the controller are through with a QH, it becomes IDLE.
318  * This happens when a QH has been off the schedule (on the unlinking
319  * list) for more than one frame, or when an error occurs while adding
320  * the first URB onto a new QH.
321  */
322 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
323 {
324         WARN_ON(qh->state == QH_STATE_ACTIVE);
325
326         list_move(&qh->node, &uhci->idle_qh_list);
327         qh->state = QH_STATE_IDLE;
328
329         /* If anyone is waiting for a QH to become idle, wake them up */
330         if (uhci->num_waiting)
331                 wake_up_all(&uhci->waitqh);
332 }
333
334 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
335                 struct urb *urb)
336 {
337         struct urb_priv *urbp;
338
339         urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
340         if (!urbp)
341                 return NULL;
342
343         memset((void *)urbp, 0, sizeof(*urbp));
344
345         urbp->urb = urb;
346         urb->hcpriv = urbp;
347         urbp->fsbrtime = jiffies;
348         
349         INIT_LIST_HEAD(&urbp->node);
350         INIT_LIST_HEAD(&urbp->td_list);
351         INIT_LIST_HEAD(&urbp->urb_list);
352
353         return urbp;
354 }
355
356 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
357 {
358         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
359
360         list_add_tail(&td->list, &urbp->td_list);
361 }
362
363 static void uhci_remove_td_from_urb(struct uhci_td *td)
364 {
365         if (list_empty(&td->list))
366                 return;
367
368         list_del_init(&td->list);
369 }
370
371 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
372                 struct urb_priv *urbp)
373 {
374         struct uhci_td *td, *tmp;
375
376         if (!list_empty(&urbp->urb_list))
377                 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list!\n",
378                                 urbp->urb);
379         if (!list_empty(&urbp->node))
380                 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
381                                 urbp->urb);
382
383         uhci_get_current_frame_number(uhci);
384         if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
385                 uhci_free_pending_tds(uhci);
386                 uhci->td_remove_age = uhci->frame_number;
387         }
388
389         /* Check to see if the remove list is empty. Set the IOC bit */
390         /* to force an interrupt so we can remove the TDs. */
391         if (list_empty(&uhci->td_remove_list))
392                 uhci_set_next_interrupt(uhci);
393
394         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
395                 uhci_remove_td_from_urb(td);
396                 list_add(&td->remove_list, &uhci->td_remove_list);
397         }
398
399         urbp->urb->hcpriv = NULL;
400         kmem_cache_free(uhci_up_cachep, urbp);
401 }
402
403 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
404 {
405         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
406
407         if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
408                 urbp->fsbr = 1;
409                 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
410                         uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
411         }
412 }
413
414 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
415 {
416         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
417
418         if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
419                 urbp->fsbr = 0;
420                 if (!--uhci->fsbr)
421                         uhci->fsbrtimeout = jiffies + FSBR_DELAY;
422         }
423 }
424
425 /*
426  * Map status to standard result codes
427  *
428  * <status> is (td_status(td) & 0xF60000), a.k.a.
429  * uhci_status_bits(td_status(td)).
430  * Note: <status> does not include the TD_CTRL_NAK bit.
431  * <dir_out> is True for output TDs and False for input TDs.
432  */
433 static int uhci_map_status(int status, int dir_out)
434 {
435         if (!status)
436                 return 0;
437         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
438                 return -EPROTO;
439         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
440                 if (dir_out)
441                         return -EPROTO;
442                 else
443                         return -EILSEQ;
444         }
445         if (status & TD_CTRL_BABBLE)                    /* Babble */
446                 return -EOVERFLOW;
447         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
448                 return -ENOSR;
449         if (status & TD_CTRL_STALLED)                   /* Stalled */
450                 return -EPIPE;
451         WARN_ON(status & TD_CTRL_ACTIVE);               /* Active */
452         return 0;
453 }
454
455 /*
456  * Fix up the data toggles for URBs in a queue, when one of them
457  * terminates early (short transfer, error, or dequeued).
458  */
459 static void uhci_fixup_toggles(struct urb *urb)
460 {
461         struct list_head *head;
462         struct uhci_td *td;
463         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
464         int prevactive = 0;
465         unsigned int toggle = 0;
466         struct urb_priv *turbp, *list_end;
467
468         /*
469          * We need to find out what the last successful toggle was so
470          * we can update the data toggles for the following transfers.
471          *
472          * There are 2 ways the last successful completed TD is found:
473          *
474          * 1) The TD is NOT active and the actual length < expected length
475          * 2) The TD is NOT active and it's the last TD in the chain
476          *
477          * and a third way the first uncompleted TD is found:
478          *
479          * 3) The TD is active and the previous TD is NOT active
480          */
481         head = &urbp->td_list;
482         list_for_each_entry(td, head, list) {
483                 unsigned int ctrlstat = td_status(td);
484
485                 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
486                                 (uhci_actual_length(ctrlstat) <
487                                  uhci_expected_length(td_token(td)) ||
488                                 td->list.next == head))
489                         toggle = uhci_toggle(td_token(td)) ^ 1;
490                 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
491                         toggle = uhci_toggle(td_token(td));
492
493                 prevactive = ctrlstat & TD_CTRL_ACTIVE;
494         }
495
496         /*
497          * Fix up the toggle for the following URBs in the queue.
498          *
499          * We can stop as soon as we find an URB with toggles set correctly,
500          * because then all the following URBs will be correct also.
501          */
502         list_end = list_entry(&urbp->qh->queue, struct urb_priv, node);
503         turbp = urbp;
504         while ((turbp = list_entry(turbp->node.next, struct urb_priv, node))
505                         != list_end) {
506                 td = list_entry(turbp->td_list.next, struct uhci_td, list);
507                 if (uhci_toggle(td_token(td)) == toggle)
508                         return;
509
510                 list_for_each_entry(td, &turbp->td_list, list) {
511                         td->token ^= __constant_cpu_to_le32(TD_TOKEN_TOGGLE);
512                         toggle ^= 1;
513                 }
514         }
515
516         usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
517                         usb_pipeout(urb->pipe), toggle);
518 }
519
520 /*
521  * Control transfers
522  */
523 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
524                 struct uhci_qh *qh)
525 {
526         struct uhci_td *td;
527         unsigned long destination, status;
528         int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
529         int len = urb->transfer_buffer_length;
530         dma_addr_t data = urb->transfer_dma;
531         __le32 *plink;
532
533         /* The "pipe" thing contains the destination in bits 8--18 */
534         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
535
536         /* 3 errors, dummy TD remains inactive */
537         status = uhci_maxerr(3);
538         if (urb->dev->speed == USB_SPEED_LOW)
539                 status |= TD_CTRL_LS;
540
541         /*
542          * Build the TD for the control request setup packet
543          */
544         td = qh->dummy_td;
545         uhci_add_td_to_urb(urb, td);
546         uhci_fill_td(td, status, destination | uhci_explen(8),
547                         urb->setup_dma);
548         plink = &td->link;
549         status |= TD_CTRL_ACTIVE;
550
551         /*
552          * If direction is "send", change the packet ID from SETUP (0x2D)
553          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
554          * set Short Packet Detect (SPD) for all data packets.
555          */
556         if (usb_pipeout(urb->pipe))
557                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
558         else {
559                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
560                 status |= TD_CTRL_SPD;
561         }
562
563         /*
564          * Build the DATA TDs
565          */
566         while (len > 0) {
567                 int pktsze = min(len, maxsze);
568
569                 td = uhci_alloc_td(uhci);
570                 if (!td)
571                         goto nomem;
572                 *plink = cpu_to_le32(td->dma_handle);
573
574                 /* Alternate Data0/1 (start with Data1) */
575                 destination ^= TD_TOKEN_TOGGLE;
576         
577                 uhci_add_td_to_urb(urb, td);
578                 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
579                                 data);
580                 plink = &td->link;
581
582                 data += pktsze;
583                 len -= pktsze;
584         }
585
586         /*
587          * Build the final TD for control status 
588          */
589         td = uhci_alloc_td(uhci);
590         if (!td)
591                 goto nomem;
592         *plink = cpu_to_le32(td->dma_handle);
593
594         /*
595          * It's IN if the pipe is an output pipe or we're not expecting
596          * data back.
597          */
598         destination &= ~TD_TOKEN_PID_MASK;
599         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
600                 destination |= USB_PID_IN;
601         else
602                 destination |= USB_PID_OUT;
603
604         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
605
606         status &= ~TD_CTRL_SPD;
607
608         uhci_add_td_to_urb(urb, td);
609         uhci_fill_td(td, status | TD_CTRL_IOC,
610                         destination | uhci_explen(0), 0);
611         plink = &td->link;
612
613         /*
614          * Build the new dummy TD and activate the old one
615          */
616         td = uhci_alloc_td(uhci);
617         if (!td)
618                 goto nomem;
619         *plink = cpu_to_le32(td->dma_handle);
620
621         uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
622         wmb();
623         qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
624         qh->dummy_td = td;
625
626         /* Low-speed transfers get a different queue, and won't hog the bus.
627          * Also, some devices enumerate better without FSBR; the easiest way
628          * to do that is to put URBs on the low-speed queue while the device
629          * isn't in the CONFIGURED state. */
630         if (urb->dev->speed == USB_SPEED_LOW ||
631                         urb->dev->state != USB_STATE_CONFIGURED)
632                 qh->skel = uhci->skel_ls_control_qh;
633         else {
634                 qh->skel = uhci->skel_fs_control_qh;
635                 uhci_inc_fsbr(uhci, urb);
636         }
637         return 0;
638
639 nomem:
640         /* Remove the dummy TD from the td_list so it doesn't get freed */
641         uhci_remove_td_from_urb(qh->dummy_td);
642         return -ENOMEM;
643 }
644
645 /*
646  * If control-IN transfer was short, the status packet wasn't sent.
647  * This routine changes the element pointer in the QH to point at the
648  * status TD.  It's safe to do this even while the QH is live, because
649  * the hardware only updates the element pointer following a successful
650  * transfer.  The inactive TD for the short packet won't cause an update,
651  * so the pointer won't get overwritten.  The next time the controller
652  * sees this QH, it will send the status packet.
653  */
654 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
655 {
656         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
657         struct uhci_td *td;
658
659         urbp->short_transfer = 1;
660
661         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
662         urbp->qh->element = cpu_to_le32(td->dma_handle);
663
664         return -EINPROGRESS;
665 }
666
667
668 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
669 {
670         struct list_head *tmp, *head;
671         struct urb_priv *urbp = urb->hcpriv;
672         struct uhci_td *td;
673         unsigned int status;
674         int ret = 0;
675
676         head = &urbp->td_list;
677         if (urbp->short_transfer) {
678                 tmp = head->prev;
679                 goto status_stage;
680         }
681
682         urb->actual_length = 0;
683
684         tmp = head->next;
685         td = list_entry(tmp, struct uhci_td, list);
686
687         /* The first TD is the SETUP stage, check the status, but skip */
688         /*  the count */
689         status = uhci_status_bits(td_status(td));
690         if (status & TD_CTRL_ACTIVE)
691                 return -EINPROGRESS;
692
693         if (status)
694                 goto td_error;
695
696         /* The rest of the TDs (but the last) are data */
697         tmp = tmp->next;
698         while (tmp != head && tmp->next != head) {
699                 unsigned int ctrlstat;
700
701                 td = list_entry(tmp, struct uhci_td, list);
702                 tmp = tmp->next;
703
704                 ctrlstat = td_status(td);
705                 status = uhci_status_bits(ctrlstat);
706                 if (status & TD_CTRL_ACTIVE)
707                         return -EINPROGRESS;
708
709                 urb->actual_length += uhci_actual_length(ctrlstat);
710
711                 if (status)
712                         goto td_error;
713
714                 /* Check to see if we received a short packet */
715                 if (uhci_actual_length(ctrlstat) <
716                                 uhci_expected_length(td_token(td))) {
717                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
718                                 ret = -EREMOTEIO;
719                                 goto err;
720                         }
721
722                         return usb_control_retrigger_status(uhci, urb);
723                 }
724         }
725
726 status_stage:
727         td = list_entry(tmp, struct uhci_td, list);
728
729         /* Control status stage */
730         status = td_status(td);
731
732 #ifdef I_HAVE_BUGGY_APC_BACKUPS
733         /* APC BackUPS Pro kludge */
734         /* It tries to send all of the descriptor instead of the amount */
735         /*  we requested */
736         if (status & TD_CTRL_IOC &&     /* IOC is masked out by uhci_status_bits */
737             status & TD_CTRL_ACTIVE &&
738             status & TD_CTRL_NAK)
739                 return 0;
740 #endif
741
742         status = uhci_status_bits(status);
743         if (status & TD_CTRL_ACTIVE)
744                 return -EINPROGRESS;
745
746         if (status)
747                 goto td_error;
748
749         return 0;
750
751 td_error:
752         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
753
754 err:
755         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
756                 /* Some debugging code */
757                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
758                                 __FUNCTION__, status);
759
760                 if (errbuf) {
761                         /* Print the chain for debugging purposes */
762                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
763
764                         lprintk(errbuf);
765                 }
766         }
767
768         return ret;
769 }
770
771 /*
772  * Common submit for bulk and interrupt
773  */
774 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
775                 struct uhci_qh *qh)
776 {
777         struct uhci_td *td;
778         unsigned long destination, status;
779         int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
780         int len = urb->transfer_buffer_length;
781         dma_addr_t data = urb->transfer_dma;
782         __le32 *plink;
783         unsigned int toggle;
784
785         if (len < 0)
786                 return -EINVAL;
787
788         /* The "pipe" thing contains the destination in bits 8--18 */
789         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
790         toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
791                          usb_pipeout(urb->pipe));
792
793         /* 3 errors, dummy TD remains inactive */
794         status = uhci_maxerr(3);
795         if (urb->dev->speed == USB_SPEED_LOW)
796                 status |= TD_CTRL_LS;
797         if (usb_pipein(urb->pipe))
798                 status |= TD_CTRL_SPD;
799
800         /*
801          * Build the DATA TDs
802          */
803         plink = NULL;
804         td = qh->dummy_td;
805         do {    /* Allow zero length packets */
806                 int pktsze = maxsze;
807
808                 if (len <= pktsze) {            /* The last packet */
809                         pktsze = len;
810                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
811                                 status &= ~TD_CTRL_SPD;
812                 }
813
814                 if (plink) {
815                         td = uhci_alloc_td(uhci);
816                         if (!td)
817                                 goto nomem;
818                         *plink = cpu_to_le32(td->dma_handle);
819                 }
820                 uhci_add_td_to_urb(urb, td);
821                 uhci_fill_td(td, status,
822                                 destination | uhci_explen(pktsze) |
823                                         (toggle << TD_TOKEN_TOGGLE_SHIFT),
824                                 data);
825                 plink = &td->link;
826                 status |= TD_CTRL_ACTIVE;
827
828                 data += pktsze;
829                 len -= maxsze;
830                 toggle ^= 1;
831         } while (len > 0);
832
833         /*
834          * URB_ZERO_PACKET means adding a 0-length packet, if direction
835          * is OUT and the transfer_length was an exact multiple of maxsze,
836          * hence (len = transfer_length - N * maxsze) == 0
837          * however, if transfer_length == 0, the zero packet was already
838          * prepared above.
839          */
840         if ((urb->transfer_flags & URB_ZERO_PACKET) &&
841                         usb_pipeout(urb->pipe) && len == 0 &&
842                         urb->transfer_buffer_length > 0) {
843                 td = uhci_alloc_td(uhci);
844                 if (!td)
845                         goto nomem;
846                 *plink = cpu_to_le32(td->dma_handle);
847
848                 uhci_add_td_to_urb(urb, td);
849                 uhci_fill_td(td, status,
850                                 destination | uhci_explen(0) |
851                                         (toggle << TD_TOKEN_TOGGLE_SHIFT),
852                                 data);
853                 plink = &td->link;
854
855                 toggle ^= 1;
856         }
857
858         /* Set the interrupt-on-completion flag on the last packet.
859          * A more-or-less typical 4 KB URB (= size of one memory page)
860          * will require about 3 ms to transfer; that's a little on the
861          * fast side but not enough to justify delaying an interrupt
862          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
863          * flag setting. */
864         td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
865
866         /*
867          * Build the new dummy TD and activate the old one
868          */
869         td = uhci_alloc_td(uhci);
870         if (!td)
871                 goto nomem;
872         *plink = cpu_to_le32(td->dma_handle);
873
874         uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
875         wmb();
876         qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
877         qh->dummy_td = td;
878
879         usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
880                         usb_pipeout(urb->pipe), toggle);
881         return 0;
882
883 nomem:
884         /* Remove the dummy TD from the td_list so it doesn't get freed */
885         uhci_remove_td_from_urb(qh->dummy_td);
886         return -ENOMEM;
887 }
888
889 /*
890  * Common result for bulk and interrupt
891  */
892 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
893 {
894         struct urb_priv *urbp = urb->hcpriv;
895         struct uhci_td *td;
896         unsigned int status = 0;
897         int ret = 0;
898
899         urb->actual_length = 0;
900
901         list_for_each_entry(td, &urbp->td_list, list) {
902                 unsigned int ctrlstat = td_status(td);
903
904                 status = uhci_status_bits(ctrlstat);
905                 if (status & TD_CTRL_ACTIVE)
906                         return -EINPROGRESS;
907
908                 urb->actual_length += uhci_actual_length(ctrlstat);
909
910                 if (status)
911                         goto td_error;
912
913                 if (uhci_actual_length(ctrlstat) <
914                                 uhci_expected_length(td_token(td))) {
915                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
916                                 ret = -EREMOTEIO;
917                                 goto err;
918                         }
919
920                         /*
921                          * This URB stopped short of its end.  We have to
922                          * fix up the toggles of the following URBs on the
923                          * queue and restart the queue.
924                          *
925                          * Do this only the first time we encounter the
926                          * short URB.
927                          */
928                         if (!urbp->short_transfer) {
929                                 urbp->short_transfer = 1;
930                                 uhci_fixup_toggles(urb);
931                                 td = list_entry(urbp->td_list.prev,
932                                                 struct uhci_td, list);
933                                 urbp->qh->element = td->link;
934                         }
935                         break;
936                 }
937         }
938
939         return 0;
940
941 td_error:
942         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
943
944 err:
945         /* 
946          * Enable this chunk of code if you want to see some more debugging.
947          * But be careful, it has the tendancy to starve out khubd and prevent
948          * disconnects from happening successfully if you have a slow debug
949          * log interface (like a serial console.
950          */
951 #if 0
952         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
953                 /* Some debugging code */
954                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
955                                 __FUNCTION__, status);
956
957                 if (errbuf) {
958                         /* Print the chain for debugging purposes */
959                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
960
961                         lprintk(errbuf);
962                 }
963         }
964 #endif
965         return ret;
966 }
967
968 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
969                 struct uhci_qh *qh)
970 {
971         int ret;
972
973         /* Can't have low-speed bulk transfers */
974         if (urb->dev->speed == USB_SPEED_LOW)
975                 return -EINVAL;
976
977         qh->skel = uhci->skel_bulk_qh;
978         ret = uhci_submit_common(uhci, urb, qh);
979         if (ret == 0)
980                 uhci_inc_fsbr(uhci, urb);
981         return ret;
982 }
983
984 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
985                 struct uhci_qh *qh)
986 {
987         /* USB 1.1 interrupt transfers only involve one packet per interval.
988          * Drivers can submit URBs of any length, but longer ones will need
989          * multiple intervals to complete.
990          */
991         qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)];
992         return uhci_submit_common(uhci, urb, qh);
993 }
994
995 /*
996  * Isochronous transfers
997  */
998 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
999 {
1000         struct urb *last_urb = NULL;
1001         struct urb_priv *up;
1002         int ret = 0;
1003
1004         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1005                 struct urb *u = up->urb;
1006
1007                 /* look for pending URBs with identical pipe handle */
1008                 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1009                     (u->status == -EINPROGRESS) && (u != urb)) {
1010                         if (!last_urb)
1011                                 *start = u->start_frame;
1012                         last_urb = u;
1013                 }
1014         }
1015
1016         if (last_urb) {
1017                 *end = (last_urb->start_frame + last_urb->number_of_packets *
1018                                 last_urb->interval) & (UHCI_NUMFRAMES-1);
1019                 ret = 0;
1020         } else
1021                 ret = -1;       /* no previous urb found */
1022
1023         return ret;
1024 }
1025
1026 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1027 {
1028         int limits;
1029         unsigned int start = 0, end = 0;
1030
1031         if (urb->number_of_packets > 900)       /* 900? Why? */
1032                 return -EFBIG;
1033
1034         limits = isochronous_find_limits(uhci, urb, &start, &end);
1035
1036         if (urb->transfer_flags & URB_ISO_ASAP) {
1037                 if (limits) {
1038                         uhci_get_current_frame_number(uhci);
1039                         urb->start_frame = (uhci->frame_number + 10)
1040                                         & (UHCI_NUMFRAMES - 1);
1041                 } else
1042                         urb->start_frame = end;
1043         } else {
1044                 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1045                 /* FIXME: Sanity check */
1046         }
1047
1048         return 0;
1049 }
1050
1051 /*
1052  * Isochronous transfers
1053  */
1054 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1055                 struct uhci_qh *qh)
1056 {
1057         struct uhci_td *td = NULL;      /* Since urb->number_of_packets > 0 */
1058         int i, ret, frame;
1059         unsigned long destination, status;
1060         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1061
1062         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1063         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1064
1065         ret = isochronous_find_start(uhci, urb);
1066         if (ret)
1067                 return ret;
1068
1069         for (i = 0; i < urb->number_of_packets; i++) {
1070                 td = uhci_alloc_td(uhci);
1071                 if (!td)
1072                         return -ENOMEM;
1073
1074                 uhci_add_td_to_urb(urb, td);
1075                 uhci_fill_td(td, status, destination |
1076                                 uhci_explen(urb->iso_frame_desc[i].length),
1077                                 urb->transfer_dma +
1078                                         urb->iso_frame_desc[i].offset);
1079         }
1080
1081         /* Set the interrupt-on-completion flag on the last packet. */
1082         td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1083
1084         qh->skel = uhci->skel_iso_qh;
1085
1086         /* Add the TDs to the frame list */
1087         frame = urb->start_frame;
1088         list_for_each_entry(td, &urbp->td_list, list) {
1089                 uhci_insert_td_in_frame_list(uhci, td, frame);
1090                 frame += urb->interval;
1091         }
1092
1093         return 0;
1094 }
1095
1096 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1097 {
1098         struct uhci_td *td;
1099         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1100         int status;
1101         int i, ret = 0;
1102
1103         urb->actual_length = urb->error_count = 0;
1104
1105         i = 0;
1106         list_for_each_entry(td, &urbp->td_list, list) {
1107                 int actlength;
1108                 unsigned int ctrlstat = td_status(td);
1109
1110                 if (ctrlstat & TD_CTRL_ACTIVE)
1111                         return -EINPROGRESS;
1112
1113                 actlength = uhci_actual_length(ctrlstat);
1114                 urb->iso_frame_desc[i].actual_length = actlength;
1115                 urb->actual_length += actlength;
1116
1117                 status = uhci_map_status(uhci_status_bits(ctrlstat),
1118                                 usb_pipeout(urb->pipe));
1119                 urb->iso_frame_desc[i].status = status;
1120                 if (status) {
1121                         urb->error_count++;
1122                         ret = status;
1123                 }
1124
1125                 i++;
1126         }
1127
1128         return ret;
1129 }
1130
1131 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1132                 struct usb_host_endpoint *hep,
1133                 struct urb *urb, gfp_t mem_flags)
1134 {
1135         int ret;
1136         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1137         unsigned long flags;
1138         struct urb_priv *urbp;
1139         struct uhci_qh *qh;
1140         int bustime;
1141
1142         spin_lock_irqsave(&uhci->lock, flags);
1143
1144         ret = urb->status;
1145         if (ret != -EINPROGRESS)                /* URB already unlinked! */
1146                 goto done;
1147
1148         ret = -ENOMEM;
1149         urbp = uhci_alloc_urb_priv(uhci, urb);
1150         if (!urbp)
1151                 goto done;
1152
1153         if (hep->hcpriv)
1154                 qh = (struct uhci_qh *) hep->hcpriv;
1155         else {
1156                 qh = uhci_alloc_qh(uhci, urb->dev, hep);
1157                 if (!qh)
1158                         goto err_no_qh;
1159         }
1160         urbp->qh = qh;
1161
1162         switch (usb_pipetype(urb->pipe)) {
1163         case PIPE_CONTROL:
1164                 ret = uhci_submit_control(uhci, urb, qh);
1165                 break;
1166         case PIPE_BULK:
1167                 ret = uhci_submit_bulk(uhci, urb, qh);
1168                 break;
1169         case PIPE_INTERRUPT:
1170                 if (list_empty(&qh->queue)) {
1171                         bustime = usb_check_bandwidth(urb->dev, urb);
1172                         if (bustime < 0)
1173                                 ret = bustime;
1174                         else {
1175                                 ret = uhci_submit_interrupt(uhci, urb, qh);
1176                                 if (ret == 0)
1177                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1178                         }
1179                 } else {        /* inherit from parent */
1180                         struct urb_priv *eurbp;
1181
1182                         eurbp = list_entry(qh->queue.prev, struct urb_priv,
1183                                         node);
1184                         urb->bandwidth = eurbp->urb->bandwidth;
1185                         ret = uhci_submit_interrupt(uhci, urb, qh);
1186                 }
1187                 break;
1188         case PIPE_ISOCHRONOUS:
1189                 bustime = usb_check_bandwidth(urb->dev, urb);
1190                 if (bustime < 0) {
1191                         ret = bustime;
1192                         break;
1193                 }
1194
1195                 ret = uhci_submit_isochronous(uhci, urb, qh);
1196                 if (ret == 0)
1197                         usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1198                 break;
1199         }
1200         if (ret != 0)
1201                 goto err_submit_failed;
1202
1203         /* Add this URB to the QH */
1204         urbp->qh = qh;
1205         list_add_tail(&urbp->node, &qh->queue);
1206         list_add_tail(&urbp->urb_list, &uhci->urb_list);
1207
1208         /* If the new URB is the first and only one on this QH then either
1209          * the QH is new and idle or else it's unlinked and waiting to
1210          * become idle, so we can activate it right away. */
1211         if (qh->queue.next == &urbp->node)
1212                 uhci_activate_qh(uhci, qh);
1213         goto done;
1214
1215 err_submit_failed:
1216         if (qh->state == QH_STATE_IDLE)
1217                 uhci_make_qh_idle(uhci, qh);    /* Reclaim unused QH */
1218
1219 err_no_qh:
1220         uhci_free_urb_priv(uhci, urbp);
1221
1222 done:
1223         spin_unlock_irqrestore(&uhci->lock, flags);
1224         return ret;
1225 }
1226
1227 /*
1228  * Return the result of a transfer
1229  */
1230 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1231 {
1232         int status;
1233         int okay_to_giveback = 0;
1234         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1235
1236         switch (usb_pipetype(urb->pipe)) {
1237         case PIPE_CONTROL:
1238                 status = uhci_result_control(uhci, urb);
1239                 break;
1240         case PIPE_ISOCHRONOUS:
1241                 status = uhci_result_isochronous(uhci, urb);
1242                 break;
1243         default:        /* PIPE_BULK or PIPE_INTERRUPT */
1244                 status = uhci_result_common(uhci, urb);
1245                 break;
1246         }
1247
1248         spin_lock(&urb->lock);
1249         if (urb->status == -EINPROGRESS) {      /* Not yet dequeued */
1250                 if (status != -EINPROGRESS) {   /* URB has completed */
1251                         urb->status = status;
1252
1253                         /* If the URB got a real error (as opposed to
1254                          * simply being dequeued), we don't have to
1255                          * unlink the QH.  Fix this later... */
1256                         if (status < 0)
1257                                 uhci_unlink_qh(uhci, urbp->qh);
1258                         else
1259                                 okay_to_giveback = 1;
1260                 }
1261         } else {                                /* Already dequeued */
1262                 if (urbp->qh->state == QH_STATE_UNLINKING &&
1263                                 uhci->frame_number + uhci->is_stopped !=
1264                                 urbp->qh->unlink_frame)
1265                         okay_to_giveback = 1;
1266         }
1267         spin_unlock(&urb->lock);
1268         if (!okay_to_giveback)
1269                 return;
1270
1271         switch (usb_pipetype(urb->pipe)) {
1272         case PIPE_ISOCHRONOUS:
1273                 /* Release bandwidth for Interrupt or Isoc. transfers */
1274                 if (urb->bandwidth)
1275                         usb_release_bandwidth(urb->dev, urb, 1);
1276                 break;
1277         case PIPE_INTERRUPT:
1278                 /* Release bandwidth for Interrupt or Isoc. transfers */
1279                 /* Make sure we don't release if we have a queued URB */
1280                 if (list_empty(&urbp->qh->queue) && urb->bandwidth)
1281                         usb_release_bandwidth(urb->dev, urb, 0);
1282                 else
1283                         /* bandwidth was passed on to queued URB, */
1284                         /* so don't let usb_unlink_urb() release it */
1285                         urb->bandwidth = 0;
1286                 /* Falls through */
1287         case PIPE_BULK:
1288                 if (status < 0)
1289                         uhci_fixup_toggles(urb);
1290                 break;
1291         default:        /* PIPE_CONTROL */
1292                 break;
1293         }
1294
1295         /* Take the URB's TDs off the hardware schedule */
1296         uhci_remove_tds_from_schedule(uhci, urb, status);
1297
1298         /* Take the URB off the QH's queue and see if the QH is now unused */
1299         list_del_init(&urbp->node);
1300         if (list_empty(&urbp->qh->queue))
1301                 uhci_unlink_qh(uhci, urbp->qh);
1302
1303         uhci_dec_fsbr(uhci, urb);       /* Safe since it checks */
1304
1305         /* Queue it for giving back */
1306         list_move_tail(&urbp->urb_list, &uhci->complete_list);
1307 }
1308
1309 /*
1310  * Check out the QHs waiting to be fully unlinked
1311  */
1312 static void uhci_scan_unlinking_qhs(struct uhci_hcd *uhci)
1313 {
1314         struct uhci_qh *qh, *tmp;
1315
1316         list_for_each_entry_safe(qh, tmp, &uhci->skel_unlink_qh->node, node) {
1317
1318                 /* If the queue is empty and the QH is fully unlinked then
1319                  * it can become IDLE. */
1320                 if (list_empty(&qh->queue)) {
1321                         if (uhci->frame_number + uhci->is_stopped !=
1322                                         qh->unlink_frame)
1323                                 uhci_make_qh_idle(uhci, qh);
1324
1325                 /* If none of the QH's URBs have been dequeued then the QH
1326                  * should be re-activated. */
1327                 } else {
1328                         struct urb_priv *urbp;
1329                         int any_dequeued = 0;
1330
1331                         list_for_each_entry(urbp, &qh->queue, node) {
1332                                 if (urbp->urb->status != -EINPROGRESS) {
1333                                         any_dequeued = 1;
1334                                         break;
1335                                 }
1336                         }
1337                         if (!any_dequeued)
1338                                 uhci_activate_qh(uhci, qh);
1339                 }
1340         }
1341 }
1342
1343 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1344 {
1345         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1346         unsigned long flags;
1347         struct urb_priv *urbp;
1348
1349         spin_lock_irqsave(&uhci->lock, flags);
1350         urbp = urb->hcpriv;
1351         if (!urbp)                      /* URB was never linked! */
1352                 goto done;
1353
1354         /* Remove Isochronous TDs from the frame list ASAP */
1355         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
1356                 uhci_unlink_isochronous_tds(uhci, urb);
1357         uhci_unlink_qh(uhci, urbp->qh);
1358
1359 done:
1360         spin_unlock_irqrestore(&uhci->lock, flags);
1361         return 0;
1362 }
1363
1364 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1365 {
1366         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1367         struct list_head *head;
1368         struct uhci_td *td;
1369         int count = 0;
1370
1371         uhci_dec_fsbr(uhci, urb);
1372
1373         urbp->fsbr_timeout = 1;
1374
1375         /*
1376          * Ideally we would want to fix qh->element as well, but it's
1377          * read/write by the HC, so that can introduce a race. It's not
1378          * really worth the hassle
1379          */
1380
1381         head = &urbp->td_list;
1382         list_for_each_entry(td, head, list) {
1383                 /*
1384                  * Make sure we don't do the last one (since it'll have the
1385                  * TERM bit set) as well as we skip every so many TDs to
1386                  * make sure it doesn't hog the bandwidth
1387                  */
1388                 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1389                                 (DEPTH_INTERVAL - 1))
1390                         td->link |= UHCI_PTR_DEPTH;
1391
1392                 count++;
1393         }
1394
1395         return 0;
1396 }
1397
1398 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1399 {
1400         struct uhci_td *td, *tmp;
1401
1402         list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1403                 list_del_init(&td->remove_list);
1404
1405                 uhci_free_td(uhci, td);
1406         }
1407 }
1408
1409 static void
1410 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1411 __releases(uhci->lock)
1412 __acquires(uhci->lock)
1413 {
1414         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1415
1416         uhci_free_urb_priv(uhci, (struct urb_priv *) (urb->hcpriv));
1417
1418         spin_unlock(&uhci->lock);
1419         usb_hcd_giveback_urb(hcd, urb, regs);
1420         spin_lock(&uhci->lock);
1421 }
1422
1423 static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
1424 {
1425         struct urb_priv *urbp, *tmp;
1426
1427         list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1428                 struct urb *urb = urbp->urb;
1429
1430                 list_del_init(&urbp->urb_list);
1431                 uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
1432         }
1433 }
1434
1435 /* Process events in the schedule, but only in one thread at a time */
1436 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1437 {
1438         struct urb_priv *urbp, *tmp;
1439
1440         /* Don't allow re-entrant calls */
1441         if (uhci->scan_in_progress) {
1442                 uhci->need_rescan = 1;
1443                 return;
1444         }
1445         uhci->scan_in_progress = 1;
1446  rescan:
1447         uhci->need_rescan = 0;
1448
1449         uhci_clear_next_interrupt(uhci);
1450         uhci_get_current_frame_number(uhci);
1451
1452         if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
1453                 uhci_free_pending_tds(uhci);
1454
1455         /* Walk the list of pending URBs to see which ones completed
1456          * (must be _safe because uhci_transfer_result() dequeues URBs) */
1457         list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1458                 struct urb *urb = urbp->urb;
1459
1460                 /* Checks the status and does all of the magic necessary */
1461                 uhci_transfer_result(uhci, urb);
1462         }
1463         uhci_finish_completion(uhci, regs);
1464
1465         /* If the controller is stopped, we can finish these off right now */
1466         if (uhci->is_stopped)
1467                 uhci_free_pending_tds(uhci);
1468
1469         if (uhci->need_rescan)
1470                 goto rescan;
1471         uhci->scan_in_progress = 0;
1472
1473         /* Check out the QHs waiting for unlinking */
1474         uhci_scan_unlinking_qhs(uhci);
1475
1476         if (list_empty(&uhci->td_remove_list) &&
1477                         list_empty(&uhci->skel_unlink_qh->node))
1478                 uhci_clear_next_interrupt(uhci);
1479         else
1480                 uhci_set_next_interrupt(uhci);
1481 }
1482
1483 static void check_fsbr(struct uhci_hcd *uhci)
1484 {
1485         struct urb_priv *up;
1486
1487         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1488                 struct urb *u = up->urb;
1489
1490                 spin_lock(&u->lock);
1491
1492                 /* Check if the FSBR timed out */
1493                 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1494                         uhci_fsbr_timeout(uhci, u);
1495
1496                 spin_unlock(&u->lock);
1497         }
1498
1499         /* Really disable FSBR */
1500         if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1501                 uhci->fsbrtimeout = 0;
1502                 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1503         }
1504 }