import of ftp.dlink.com/GPL/DSMG-600_reB/ppclinux.tar.gz
[linux-2.4.21-pre4.git] / drivers / ide / ibm_ocp_ide.c
1 /*
2  * IDE driver for IBM On-chip IDE contollers 
3  *    Copyright 2001 - 2002 MontaVista Software Inc.
4  *    Dan Malek.
5  *
6  *    I snagged bits and pieces from a variety of drives, primarily
7  *    ide-pmac.c.....thanks to previous authors!
8  *
9  *    Version 1.2 (01/30/12) Armin
10  *    Converted to ocp
11  *    merger up to new ide-timing.h
12  *
13  *    Version 2.0 (05/02/15) - armin
14  *    converted to new core_ocp and only supports one interface for now.
15  *
16  *    Version 2.1 (05/25/02) - armin
17  *      name change from *_driver to *_dev
18  *    Version 2.2 06/13/02 - Armin
19  *      changed irq_resource array to just irq
20  *
21  */
22
23 #include <linux/types.h>
24 #include <linux/hdreg.h>
25 #include <linux/delay.h>
26 #include <linux/ide.h>
27 #include <linux/pci.h>
28 #include <asm/ocp.h>
29 #include <asm/io.h>
30 #include <asm/scatterlist.h>
31
32 #include "ide-timing.h"
33 #define OCPVR   "2.3"
34
35 /* The structure of the PRD entry.  The address must be word aligned,
36  * and the count must be an even number of bytes.
37  */
38 typedef struct {
39         unsigned int prd_physptr;
40         unsigned int prd_count; /* Count only in lower 16 bits */
41 } prd_entry_t;
42 #define PRD_EOT         (uint)0x80000000        /* Set in prd_count */
43
44 /* The number of PRDs required in a single transfer from the upper IDE
45  * functions.  I believe the maximum number is 128, but most seem to
46  * code to 256.  It's probably best to keep this under one page......
47  */
48 #define NUM_PRD 256
49
50 #define MK_TIMING(AS, DIOP, DIOY, DH) \
51         ((FIT((AS),    0, 15) << 27) | \
52          (FIT((DIOP),  0, 63) << 20) | \
53          (FIT((DIOY),  0, 63) << 13) | \
54          (FIT((DH),    0,  7) << 9))
55
56 #define UTIMING_SETHLD  (EZ(20 /*tACK*/, SYS_CLOCK_NS) - 1 /*fixed cycles*/)
57 #define UTIMING_ENV     (EZ(20 /*tENV*/, SYS_CLOCK_NS) - 1 /*fixed cycles*/)
58 #define UTIMING_SS      (EZ(50 /*tSS */, SYS_CLOCK_NS) - 3 /*fixed cycles*/)
59 #define MK_UTIMING(CYC, RP) \
60         ((FIT(UTIMING_SETHLD, 0, 15) << 27) | \
61          (FIT(UTIMING_ENV,    0, 15) << 22) | \
62          (FIT((CYC),          0, 15) << 17) | \
63          (FIT((RP),           0, 63) << 10) | \
64          (FIT(UTIMING_SS,     0, 15) << 5)  | \
65          1 /* Turn on Ultra DMA */)
66
67 /* Define the period of the STB clock used to generate the
68  * IDE bus timing.  The clock is actually 63 MHz, but it
69  * get rounded in a favorable direction.
70  */
71 #define IDE_SYS_FREQ    63      /* MHz */
72 #define SYS_CLOCK_NS    (1000 / IDE_SYS_FREQ)
73
74 struct whold_timing {
75         short mode;
76         short whold;
77 };
78
79 static struct whold_timing whold_timing[] = {
80
81         {XFER_UDMA_5, 0},
82         {XFER_UDMA_4, 0},
83         {XFER_UDMA_3, 0},
84
85         {XFER_UDMA_2, 0},
86         {XFER_UDMA_1, 0},
87         {XFER_UDMA_0, 0},
88
89         {XFER_UDMA_SLOW, 0},
90
91         {XFER_MW_DMA_2, 0},
92         {XFER_MW_DMA_1, 0},
93         {XFER_MW_DMA_0, 0},
94
95         {XFER_SW_DMA_2, 0},
96         {XFER_SW_DMA_1, 0},
97         {XFER_SW_DMA_0, 10},
98
99         {XFER_PIO_5, 10},
100         {XFER_PIO_4, 10},
101         {XFER_PIO_3, 15},
102
103         {XFER_PIO_2, 20},
104         {XFER_PIO_1, 30},
105         {XFER_PIO_0, 50},
106
107         {XFER_PIO_SLOW,},
108
109         {-1}
110 };
111
112 /* The interface doesn't have register/PIO timing for each device,
113  * but rather "fast" and "slow" timing.  We have to determeine
114  * which is the "fast" device based upon their capability.
115  */
116 static int pio_mode[2];
117
118 /* Pointer to the IDE controller registers.
119 */
120 static volatile ide_t *idp;
121
122 /* Virtual and physical address of the PRD page.
123 */
124 static prd_entry_t *prd_table;
125 static dma_addr_t prd_phys;
126
127 int
128 nonpci_ide_default_irq(ide_ioreg_t base)
129 {
130         return IDE0_IRQ;
131 }
132
133 /* this iis barrowed from ide_timing_find_mode so we can find the proper 
134  * whold parameter 
135  */
136
137 static short
138 whold_timing_find_mode(short speed)
139 {
140         struct whold_timing *t;
141
142         for (t = whold_timing; t->mode != speed; t++)
143                 if (t->mode < 0)
144                         return 0;
145         return t->whold;
146 }
147
148 /* The STB04 has a fixed number of cycles that get added in
149  * regardless.  Adjust an ide_timing struct to accommodate that.
150  */
151 static
152     void
153 stb04xxx_ide_adjust_timing(struct ide_timing *t)
154 {
155         t->setup -= 2;
156         t->act8b -= 1;
157         t->rec8b -= 1;
158         t->active -= 1;
159         t->recover -= 1;
160 }
161
162 static int
163 stb04xxx_ide_set_drive(ide_drive_t * drive, unsigned char speed)
164 {
165         ide_drive_t *peer;
166         struct ide_timing d, p, merge, *fast;
167         int fast_device;
168         unsigned int ctl;
169         volatile unsigned int *dtiming;
170
171         if (speed != XFER_PIO_SLOW && speed != drive->current_speed)
172                 if (ide_config_drive_speed(drive, speed))
173                         printk(KERN_WARNING
174                                "ide%d: Drive %d didn't accept speed setting. Oh, well.\n",
175                                drive->dn >> 1, drive->dn & 1);
176
177         ide_timing_compute(drive, speed, &d, SYS_CLOCK_NS, SYS_CLOCK_NS);
178         stb04xxx_ide_adjust_timing(&d);
179
180         /* This should be set somewhere else, but it isn't.....
181          */
182         drive->dn = ((drive->select.all & 0x10) != 0);
183         peer = HWIF(drive)->drives + (~drive->dn & 1);
184
185         if (peer->present) {
186                 ide_timing_compute(peer, peer->current_speed, &p,
187                                    SYS_CLOCK_NS, SYS_CLOCK_NS);
188                 stb04xxx_ide_adjust_timing(&p);
189                 ide_timing_merge(&p, &d, &merge,
190                                  IDE_TIMING_8BIT | IDE_TIMING_SETUP);
191         } else {
192                 merge = d;
193         }
194
195         if (!drive->init_speed)
196                 drive->init_speed = speed;
197         drive->current_speed = speed;
198
199         /* Now determine which drive is faster, and set up the
200          * interface timing.  It would sure be nice if they would
201          * have just had the timing registers for each device......
202          */
203         if (drive->dn & 1)
204                 pio_mode[1] = (int) speed;
205         else
206                 pio_mode[0] = (int) speed;
207
208         if (pio_mode[0] > pio_mode[1])
209                 fast_device = 0;
210         else
211                 fast_device = 1;
212
213         /* Now determine which of the drives
214          * the first call we only know one device, and on subsequent
215          * calls the user may manually change drive parameters.
216          * Make timing[0] the fast device and timing[1] the slow.
217          */
218         if (fast_device == (drive->dn & 1))
219                 fast = &d;
220         else
221                 fast = &p;
222
223         /* Now we know which device is the fast one and which is
224          * the slow one.  The merged timing goes into the "regular"
225          * timing registers and represents the slower of both times.
226          */
227
228         idp->si_c0rt = MK_TIMING(merge.setup, merge.act8b,
229                                  merge.rec8b,
230                                  whold_timing_find_mode(merge.mode));
231
232         idp->si_c0fpt = MK_TIMING(fast->setup, fast->act8b,
233                                   fast->rec8b,
234                                   whold_timing_find_mode(fast->mode));
235
236         /* Tell the interface which drive is the fast one.
237          */
238         ctl = idp->si_c0c;      /* Chan 0 Control */
239         ctl &= ~0x10000000;
240         ctl |= fast_device << 28;
241         idp->si_c0c = ctl;
242
243         /* Set up DMA timing.
244          */
245         if ((speed & XFER_MODE) != XFER_PIO) {
246                 /* NOTE: si_c0d0m and si_c0d0u are two different names
247                  * for the same register.  Whether it is used for
248                  * Multi-word DMA timings or Ultra DMA timings is
249                  * determined by the LSB written into it.  This is also
250                  * true for si_c0d1m and si_c0d1u.  */
251                 if (drive->dn & 1)
252                         dtiming = &(idp->si_c0d1m);
253                 else
254                         dtiming = &(idp->si_c0d0m);
255
256                 if ((speed & XFER_MODE) == XFER_UDMA) {
257                         static const int tRP[] = {
258                                 EZ(160, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
259                                 EZ(125, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
260                                 EZ(100, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
261                                 EZ(100, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
262                                 EZ(100, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
263                                 EZ(85, SYS_CLOCK_NS) - 2        /*fixed cycles */
264                         };
265                         static const int NUMtRP =
266                             (sizeof (tRP) / sizeof (tRP[0]));
267                         *dtiming =
268                             MK_UTIMING(d.udma,
269                                        tRP[FIT(speed & 0xf, 0, NUMtRP - 1)]);
270                 } else {
271                         /* Multi-word DMA.  Note that d.recover/2 is an
272                          * approximation of MAX(tH, MAX(tJ, tN)) */
273                         *dtiming = MK_TIMING(d.setup, d.active,
274                                              d.recover, d.recover / 2);
275                 }
276                 drive->using_dma = 1;
277         }
278
279         return 0;
280 }
281
282 static void
283 stb04xxx_ide_tuneproc(ide_drive_t * drive, unsigned char pio)
284 {
285
286         if (pio == 255)
287                 pio = ide_find_best_mode(drive, XFER_PIO | XFER_EPIO);
288         else
289                 pio = XFER_PIO_0 + MIN(pio, 5);
290
291         stb04xxx_ide_set_drive(drive, pio);
292 }
293
294 #ifdef CONFIG_BLK_DEV_IDEDMA
295
296 /* DMA stuff mostly stolen from PMac....thanks Ben :-)!
297 */
298
299 static int
300 stb04xxx_ide_build_dmatable(ide_drive_t * drive, int wr)
301 {
302         prd_entry_t *table;
303         int count = 0;
304         struct request *rq = HWGROUP(drive)->rq;
305         struct buffer_head *bh = rq->bh;
306         unsigned int size, addr;
307
308         table = prd_table;
309
310         do {
311                 /*
312                  * Determine addr and size of next buffer area.  We assume that
313                  * individual virtual buffers are always composed linearly in
314                  * physical memory.  For example, we assume that any 8kB buffer
315                  * is always composed of two adjacent physical 4kB pages rather
316                  * than two possibly non-adjacent physical 4kB pages.
317                  * We also have to ensure cache coherency here.  If writing,
318                  * flush the data cache to memory.  Logically, if reading
319                  * we should do it after the DMA is complete, but it is
320                  * more convenient to do it here.  If someone is messing
321                  * with a buffer space after it is handed to us, they
322                  * shouldn't be surprised by corrupted data, anyway :-).
323                  */
324                 if (bh == NULL) {       /* paging requests have (rq->bh == NULL) */
325                         addr = virt_to_bus(rq->buffer);
326                         size = rq->nr_sectors << 9;
327                         if (wr)
328                                 consistent_sync(rq->buffer, size,
329                                                 PCI_DMA_TODEVICE);
330                         else
331                                 consistent_sync(rq->buffer, size,
332                                                 PCI_DMA_FROMDEVICE);
333                 } else {
334                         /* group sequential buffers into one large buffer */
335                         addr = virt_to_bus(bh->b_data);
336                         size = bh->b_size;
337                         if (wr)
338                                 consistent_sync(bh->b_data, size,
339                                                 PCI_DMA_TODEVICE);
340                         else
341                                 consistent_sync(bh->b_data, size,
342                                                 PCI_DMA_FROMDEVICE);
343                         while ((bh = bh->b_reqnext) != NULL) {
344                                 if ((addr + size) != virt_to_bus(bh->b_data))
345                                         break;
346                                 size += bh->b_size;
347                                 if (wr)
348                                         consistent_sync(bh->b_data,
349                                                         bh->b_size,
350                                                         PCI_DMA_TODEVICE);
351                                 else
352                                         consistent_sync(bh->b_data,
353                                                         bh->b_size,
354                                                         PCI_DMA_FROMDEVICE);
355                         }
356                 }
357
358                 /*
359                  * Fill in the next PRD entry.
360                  * Note that one PRD entry can transfer
361                  * at most 65535 bytes.
362                  */
363                 while (size) {
364                         unsigned int tc = (size < 0xfe00) ? size : 0xfe00;
365
366                         if (++count >= NUM_PRD) {
367                                 printk(KERN_WARNING "%s: DMA table too small\n",
368                                        drive->name);
369                                 return 0;       /* revert to PIO for this request */
370                         }
371                         table->prd_physptr = (addr & 0xfffffffe);
372                         if (table->prd_physptr & 0xF) {
373                                 printk(KERN_WARNING
374                                        "%s: DMA buffer not 16 byte aligned.\n",
375                                        drive->name);
376                                 return 0;       /* revert to PIO for this request */
377                         }
378                         table->prd_count = (tc & 0xfffe);
379                         addr += tc;
380                         size -= tc;
381                         ++table;
382                 }
383         } while (bh != NULL);
384
385         /* Add the EOT to the last table entry.
386          */
387         if (count) {
388                 table--;
389                 table->prd_count |= PRD_EOT;
390         } else {
391                 printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
392         }
393
394         return 1;
395 }
396
397 /*
398  * dma_intr() is the handler for disk read/write DMA interrupts
399  * This is taken directly from ide-dma.c, which we can't use because
400  * it requires PCI support.
401  */
402 ide_startstop_t
403 ide_dma_intr(ide_drive_t * drive)
404 {
405         int i;
406         byte stat, dma_stat;
407
408         dma_stat = HWIF(drive)->ide_dma_end(drive);
409         stat = HWIF(drive)->INB(IDE_STATUS_REG);        /* get drive status */
410         if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
411                 if (!dma_stat) {
412                         struct request *rq = HWGROUP(drive)->rq;
413                         rq = HWGROUP(drive)->rq;
414                         for (i = rq->nr_sectors; i > 0;) {
415                                 i -= rq->current_nr_sectors;
416                                 ide_end_request(drive, 1);
417                         }
418                         return ide_stopped;
419                 }
420                 printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
421                        drive->name, dma_stat);
422         }
423         return ide_error(drive, "dma_intr", stat);
424 }
425
426 /* ....and another one....
427 */
428 int
429 report_drive_dmaing(ide_drive_t * drive)
430 {
431         struct hd_driveid *id = drive->id;
432
433         if ((id->field_valid & 4) && (eighty_ninty_three(drive)) &&
434             (id->dma_ultra & (id->dma_ultra >> 11) & 7)) {
435                 if ((id->dma_ultra >> 13) & 1) {
436                         printk(", UDMA(100)");  /* UDMA BIOS-enabled! */
437                 } else if ((id->dma_ultra >> 12) & 1) {
438                         printk(", UDMA(66)");   /* UDMA BIOS-enabled! */
439                 } else {
440                         printk(", UDMA(44)");   /* UDMA BIOS-enabled! */
441                 }
442         } else if ((id->field_valid & 4) &&
443                    (id->dma_ultra & (id->dma_ultra >> 8) & 7)) {
444                 if ((id->dma_ultra >> 10) & 1) {
445                         printk(", UDMA(33)");   /* UDMA BIOS-enabled! */
446                 } else if ((id->dma_ultra >> 9) & 1) {
447                         printk(", UDMA(25)");   /* UDMA BIOS-enabled! */
448                 } else {
449                         printk(", UDMA(16)");   /* UDMA BIOS-enabled! */
450                 }
451         } else if (id->field_valid & 4) {
452                 printk(", (U)DMA");     /* Can be BIOS-enabled! */
453         } else {
454                 printk(", DMA");
455         }
456         return 1;
457 }
458
459 static int
460 stb04xxx_ide_check_dma(ide_drive_t * drive)
461 {
462         struct hd_driveid *id = drive->id;
463         int enable = 1;
464         int speed;
465
466         drive->using_dma = 0;
467
468         if (drive->media == ide_floppy)
469                 enable = 0;
470
471         /* Check timing here, we may be able to include XFER_UDMA_66
472          * and XFER_UDMA_100.  This basically tells the 'best_mode'
473          * function to also consider UDMA3 to UDMA5 device timing.
474          */
475         if (enable) {
476                 /* Section 1.6.2.6 "IDE Controller, ATA/ATAPI-5" in the STB04xxx
477                  * Datasheet says the following modes are supported:
478                  *   PIO modes 0 to 4
479                  *   Multiword DMA modes 0 to 2
480                  *   UltraDMA modes 0 to 4
481                  */
482                 int map = XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA;
483                 /* XFER_EPIO includes both PIO modes 4 and 5.  Mode 5 is not
484                  * valid for the STB04, so mask it out of consideration just
485                  * in case some drive sets it...
486                  */
487                 id->eide_pio_modes &= ~4;
488
489                 /* Allow UDMA_66 only if an 80 conductor cable is connected. */
490                 if (eighty_ninty_three(drive))
491                         map |= XFER_UDMA_66;
492
493                 speed = ide_find_best_mode(drive, map);
494                 stb04xxx_ide_set_drive(drive, speed);
495
496                 if (HWIF(drive)->autodma &&
497                     (((speed & XFER_MODE) == XFER_PIO) ||
498                      ((speed & XFER_MODE) == XFER_EPIO))) {
499                         drive->using_dma = 0;
500                 }
501         }
502
503         return 0;
504 }
505
506 static int stb04xxx_ide_dma_off_quietly(ide_drive_t * drive)
507 {
508         drive->using_dma = 0;
509         return 0;
510 }
511
512 static int stb04xxx_ide_dma_off(ide_drive_t * drive)
513 {
514         printk(KERN_INFO "%s: DMA disabled\n", drive->name);
515         return stb04xxx_ide_dma_off_quietly(drive);
516 }
517
518 static int stb04xxx_ide_dma_on(ide_drive_t * drive)
519 {
520         return stb04xxx_ide_check_dma(drive);
521 }
522
523 static int stb04xxx_ide_dma_check(ide_drive_t * drive)
524 {
525         return stb04xxx_ide_dma_on(drive);
526 }
527
528 static int stb04xxx_ide_dma_begin(ide_drive_t * drive, int writing)
529 {
530         idp->si_c0tb = (unsigned int) prd_phys;
531         idp->si_c0s0 = 0xdc800000;      /* Clear all status */
532         idp->si_c0ie = 0x90000000;      /* Enable all intr */
533         idp->si_c0dcm = 0;
534         idp->si_c0dcm =
535                 (writing ? 0x09000000 : 0x01000000);
536         return 0;
537 }
538
539 static int stb04xxx_ide_dma_io(ide_drive_t * drive, int writing)
540 {
541         if (!stb04xxx_ide_build_dmatable(drive, writing))
542                 return 1;
543         drive->waiting_for_dma = 1;
544         if (drive->media != ide_disk)
545                 return 0;
546         ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL);
547         HWIF(drive)->OUTB(writing ? WIN_WRITEDMA : WIN_READDMA,
548                  IDE_COMMAND_REG);
549         return stb04xxx_ide_dma_begin(drive, writing);
550 }
551
552 static int stb04xxx_ide_dma_read(ide_drive_t * drive)
553 {
554         return stb04xxx_ide_dma_io(drive, 0);
555 }
556
557 static int stb04xxx_ide_dma_write(ide_drive_t * drive)
558 {
559         return stb04xxx_ide_dma_io(drive, 1);
560 }
561
562 static int stb04xxx_ide_dma_end(ide_drive_t * drive)
563 {
564         unsigned int dstat;
565
566         drive->waiting_for_dma = 0;
567         dstat = idp->si_c0s1;
568         idp->si_c0s0 = 0xdc800000;      /* Clear all status */
569         /* verify good dma status */
570         return (dstat & 0x80000000);
571 }
572
573 static int stb04xxx_ide_dma_test_irq(ide_drive_t * drive)
574 {
575         return idp->si_c0s0 & 0x10000000 ? 1 : 0;
576 }
577
578 static int stb04xxx_ide_dma_verbose(ide_drive_t * drive)
579 {
580         return report_drive_dmaing(drive);
581 }
582 #endif                          /* CONFIG_BLK_DEV_IDEDMA */
583
584 void
585 nonpci_ide_init_hwif_ports(hw_regs_t * hw,
586                            ide_ioreg_t data_port, ide_ioreg_t ctrl_port,
587                            int *irq)
588 {
589         ide_ioreg_t *p;
590         unsigned char *ip;
591         unsigned int uicdcr;
592         int i;
593         struct ocp_dev *ide_dev;
594         int curr_ide;
595         ide_hwif_t *hwif;
596
597         curr_ide = 0;
598         hwif = &ide_hwifs[0];
599
600         p = hw->io_ports;
601         *p = 0;
602         if (irq)
603                 *irq = 0;
604
605         if (data_port != 0)
606                 return;
607         printk("IBM STB04xxx OCP IDE driver version %s\n", OCPVR);
608         if (!(ide_dev = ocp_alloc_dev(0)))
609                 return;
610         
611         ide_dev->type = IDE;
612         if ((curr_ide = ocp_register(ide_dev)) == -ENXIO) {
613                 ocp_free_dev(ide_dev);
614                 return;
615         } else {
616
617                 if ((idp = (ide_t *) ioremap(ide_dev->paddr,
618                                              IDE0_SIZE)) == NULL) {
619                         printk(KERN_WARNING "ide: failed ioremap\n");
620                         return;
621                 }
622
623                 /* Enable the interface.
624                  */
625                 idp->si_control = 0x80000000;
626                 idp->si_c0s0 = 0xdc800000;      /* Clear all status */
627 #if 0
628                 idp->si_c0sr = 0xf0000000;      /* Mandated val to Slew Rate Control */
629 #endif
630                 idp->si_intenable = 0x80000000;
631                 /* Per the STB04 data sheet:
632                  *  1)  tTO = ((8*RDYT) + 1) * SYS_CLK
633                  * and:
634                  *  2)  tTO >= 1250 + (2 * SYS_CLK) - t2
635                  * Solving the first equation for RDYT:
636                  *             (tTO/SYS_CLK) - 1
637                  *  3)  RDYT = -----------------
638                  *                     8
639                  * Substituting equation 2) for tTO in equation 3:
640                  *             ((1250 + (2 * SYS_CLK) - t2)/SYS_CLK) - 1
641                  *  3)  RDYT = -----------------------------------------
642                  *                                8
643                  * It's just the timeout so having it too long isn't too
644                  * significant, so we'll just assume t2 is zero.  All this math
645                  * is handled by the compiler and RDYT ends up being 11 assuming
646                  * that SYS_CLOCK_NS is 15.
647                  */
648                 idp->si_c0timo = (EZ(EZ(1250 + 2 * SYS_CLOCK_NS, SYS_CLOCK_NS) - 1, 8)) << 23;  /* Chan 0 timeout */
649
650                 /* Stuff some slow default PIO timing.
651                  */
652                 idp->si_c0rt = MK_TIMING(6, 19, 15, 2);
653                 idp->si_c0fpt = MK_TIMING(6, 19, 15, 2);
654
655                 ip = (unsigned char *) (&(idp->si_c0d));        /* Chan 0 data */
656
657                 for (i = 0; i <= IDE_STATUS_OFFSET; i++) {
658                         *p++ = (int) (ip++);
659                 }
660                 hw->io_ports[IDE_CONTROL_OFFSET] = (int) (&(idp->si_c0adc));
661
662                 if (irq)
663                         *irq = ide_dev->irq;
664
665                 pio_mode[0] = pio_mode[1] = -1;
666
667                 /* We should probably have UIC functions to set external
668                  * interrupt level/edge.
669                  */
670                 uicdcr = mfdcr(DCRN_UIC_PR(UIC0));
671                 uicdcr &= ~(0x80000000 >> IDE0_IRQ);
672                 mtdcr(DCRN_UIC_PR(UIC0), uicdcr);
673                 mtdcr(DCRN_UIC_TR(UIC0), 0x80000000 >> IDE0_IRQ);
674
675                 /* Grab a page for the PRD Table.
676                  */
677                 prd_table = (prd_entry_t *) consistent_alloc(GFP_KERNEL,
678                                                              NUM_PRD *
679                                                              sizeof
680                                                              (prd_entry_t),
681                                                              &prd_phys);
682
683                 hwif->tuneproc = &stb04xxx_ide_tuneproc;
684                 hwif->speedproc = &stb04xxx_ide_set_drive;
685                 /* Figure out if an 80 conductor cable is connected or not. */
686                 hwif->udma_four =
687                     (idp->si_c0s1 & 0x20000000) != 0;
688                 hwif->drives[0].autotune = 1;
689                 hwif->drives[1].autotune = 1;
690
691 #ifdef CONFIG_BLK_DEV_IDEDMA
692                 hwif->autodma = 1;
693                 hwif->ide_dma_off = &stb04xxx_ide_dma_off;
694                 hwif->ide_dma_off_quietly = &stb04xxx_ide_dma_off_quietly;
695                 hwif->ide_dma_host_off = &stb04xxx_ide_dma_off_quietly;
696                 hwif->ide_dma_on = &stb04xxx_ide_dma_on;
697                 hwif->ide_dma_host_on = &stb04xxx_ide_dma_on;
698                 hwif->ide_dma_check = &stb04xxx_ide_dma_check;
699                 hwif->ide_dma_read = &stb04xxx_ide_dma_read;
700                 hwif->ide_dma_write = &stb04xxx_ide_dma_write;
701                 hwif->ide_dma_begin = &stb04xxx_ide_dma_begin;
702                 hwif->ide_dma_end = &stb04xxx_ide_dma_end;
703                 hwif->ide_dma_test_irq = &stb04xxx_ide_dma_test_irq;
704                 hwif->ide_dma_verbose = &stb04xxx_ide_dma_verbose;
705 #endif
706
707                 hwif->noprobe = 0;
708
709                 memcpy(hwif->io_ports, hw->io_ports,
710                        sizeof (hw->io_ports));
711                 hwif->irq = ide_dev->irq;
712         }
713 }