2 * IDE driver for IBM On-chip IDE contollers
3 * Copyright 2001 - 2002 MontaVista Software Inc.
6 * I snagged bits and pieces from a variety of drives, primarily
7 * ide-pmac.c.....thanks to previous authors!
9 * Version 1.2 (01/30/12) Armin
11 * merger up to new ide-timing.h
13 * Version 2.0 (05/02/15) - armin
14 * converted to new core_ocp and only supports one interface for now.
16 * Version 2.1 (05/25/02) - armin
17 * name change from *_driver to *_dev
18 * Version 2.2 06/13/02 - Armin
19 * changed irq_resource array to just irq
23 #include <linux/types.h>
24 #include <linux/hdreg.h>
25 #include <linux/delay.h>
26 #include <linux/ide.h>
27 #include <linux/pci.h>
30 #include <asm/scatterlist.h>
32 #include "ide-timing.h"
35 /* The structure of the PRD entry. The address must be word aligned,
36 * and the count must be an even number of bytes.
39 unsigned int prd_physptr;
40 unsigned int prd_count; /* Count only in lower 16 bits */
42 #define PRD_EOT (uint)0x80000000 /* Set in prd_count */
44 /* The number of PRDs required in a single transfer from the upper IDE
45 * functions. I believe the maximum number is 128, but most seem to
46 * code to 256. It's probably best to keep this under one page......
50 #define MK_TIMING(AS, DIOP, DIOY, DH) \
51 ((FIT((AS), 0, 15) << 27) | \
52 (FIT((DIOP), 0, 63) << 20) | \
53 (FIT((DIOY), 0, 63) << 13) | \
54 (FIT((DH), 0, 7) << 9))
56 #define UTIMING_SETHLD (EZ(20 /*tACK*/, SYS_CLOCK_NS) - 1 /*fixed cycles*/)
57 #define UTIMING_ENV (EZ(20 /*tENV*/, SYS_CLOCK_NS) - 1 /*fixed cycles*/)
58 #define UTIMING_SS (EZ(50 /*tSS */, SYS_CLOCK_NS) - 3 /*fixed cycles*/)
59 #define MK_UTIMING(CYC, RP) \
60 ((FIT(UTIMING_SETHLD, 0, 15) << 27) | \
61 (FIT(UTIMING_ENV, 0, 15) << 22) | \
62 (FIT((CYC), 0, 15) << 17) | \
63 (FIT((RP), 0, 63) << 10) | \
64 (FIT(UTIMING_SS, 0, 15) << 5) | \
65 1 /* Turn on Ultra DMA */)
67 /* Define the period of the STB clock used to generate the
68 * IDE bus timing. The clock is actually 63 MHz, but it
69 * get rounded in a favorable direction.
71 #define IDE_SYS_FREQ 63 /* MHz */
72 #define SYS_CLOCK_NS (1000 / IDE_SYS_FREQ)
79 static struct whold_timing whold_timing[] = {
112 /* The interface doesn't have register/PIO timing for each device,
113 * but rather "fast" and "slow" timing. We have to determeine
114 * which is the "fast" device based upon their capability.
116 static int pio_mode[2];
118 /* Pointer to the IDE controller registers.
120 static volatile ide_t *idp;
122 /* Virtual and physical address of the PRD page.
124 static prd_entry_t *prd_table;
125 static dma_addr_t prd_phys;
128 nonpci_ide_default_irq(ide_ioreg_t base)
133 /* this iis barrowed from ide_timing_find_mode so we can find the proper
138 whold_timing_find_mode(short speed)
140 struct whold_timing *t;
142 for (t = whold_timing; t->mode != speed; t++)
148 /* The STB04 has a fixed number of cycles that get added in
149 * regardless. Adjust an ide_timing struct to accommodate that.
153 stb04xxx_ide_adjust_timing(struct ide_timing *t)
163 stb04xxx_ide_set_drive(ide_drive_t * drive, unsigned char speed)
166 struct ide_timing d, p, merge, *fast;
169 volatile unsigned int *dtiming;
171 if (speed != XFER_PIO_SLOW && speed != drive->current_speed)
172 if (ide_config_drive_speed(drive, speed))
174 "ide%d: Drive %d didn't accept speed setting. Oh, well.\n",
175 drive->dn >> 1, drive->dn & 1);
177 ide_timing_compute(drive, speed, &d, SYS_CLOCK_NS, SYS_CLOCK_NS);
178 stb04xxx_ide_adjust_timing(&d);
180 /* This should be set somewhere else, but it isn't.....
182 drive->dn = ((drive->select.all & 0x10) != 0);
183 peer = HWIF(drive)->drives + (~drive->dn & 1);
186 ide_timing_compute(peer, peer->current_speed, &p,
187 SYS_CLOCK_NS, SYS_CLOCK_NS);
188 stb04xxx_ide_adjust_timing(&p);
189 ide_timing_merge(&p, &d, &merge,
190 IDE_TIMING_8BIT | IDE_TIMING_SETUP);
195 if (!drive->init_speed)
196 drive->init_speed = speed;
197 drive->current_speed = speed;
199 /* Now determine which drive is faster, and set up the
200 * interface timing. It would sure be nice if they would
201 * have just had the timing registers for each device......
204 pio_mode[1] = (int) speed;
206 pio_mode[0] = (int) speed;
208 if (pio_mode[0] > pio_mode[1])
213 /* Now determine which of the drives
214 * the first call we only know one device, and on subsequent
215 * calls the user may manually change drive parameters.
216 * Make timing[0] the fast device and timing[1] the slow.
218 if (fast_device == (drive->dn & 1))
223 /* Now we know which device is the fast one and which is
224 * the slow one. The merged timing goes into the "regular"
225 * timing registers and represents the slower of both times.
228 idp->si_c0rt = MK_TIMING(merge.setup, merge.act8b,
230 whold_timing_find_mode(merge.mode));
232 idp->si_c0fpt = MK_TIMING(fast->setup, fast->act8b,
234 whold_timing_find_mode(fast->mode));
236 /* Tell the interface which drive is the fast one.
238 ctl = idp->si_c0c; /* Chan 0 Control */
240 ctl |= fast_device << 28;
243 /* Set up DMA timing.
245 if ((speed & XFER_MODE) != XFER_PIO) {
246 /* NOTE: si_c0d0m and si_c0d0u are two different names
247 * for the same register. Whether it is used for
248 * Multi-word DMA timings or Ultra DMA timings is
249 * determined by the LSB written into it. This is also
250 * true for si_c0d1m and si_c0d1u. */
252 dtiming = &(idp->si_c0d1m);
254 dtiming = &(idp->si_c0d0m);
256 if ((speed & XFER_MODE) == XFER_UDMA) {
257 static const int tRP[] = {
258 EZ(160, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
259 EZ(125, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
260 EZ(100, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
261 EZ(100, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
262 EZ(100, SYS_CLOCK_NS) - 2 /*fixed cycles */ ,
263 EZ(85, SYS_CLOCK_NS) - 2 /*fixed cycles */
265 static const int NUMtRP =
266 (sizeof (tRP) / sizeof (tRP[0]));
269 tRP[FIT(speed & 0xf, 0, NUMtRP - 1)]);
271 /* Multi-word DMA. Note that d.recover/2 is an
272 * approximation of MAX(tH, MAX(tJ, tN)) */
273 *dtiming = MK_TIMING(d.setup, d.active,
274 d.recover, d.recover / 2);
276 drive->using_dma = 1;
283 stb04xxx_ide_tuneproc(ide_drive_t * drive, unsigned char pio)
287 pio = ide_find_best_mode(drive, XFER_PIO | XFER_EPIO);
289 pio = XFER_PIO_0 + MIN(pio, 5);
291 stb04xxx_ide_set_drive(drive, pio);
294 #ifdef CONFIG_BLK_DEV_IDEDMA
296 /* DMA stuff mostly stolen from PMac....thanks Ben :-)!
300 stb04xxx_ide_build_dmatable(ide_drive_t * drive, int wr)
304 struct request *rq = HWGROUP(drive)->rq;
305 struct buffer_head *bh = rq->bh;
306 unsigned int size, addr;
312 * Determine addr and size of next buffer area. We assume that
313 * individual virtual buffers are always composed linearly in
314 * physical memory. For example, we assume that any 8kB buffer
315 * is always composed of two adjacent physical 4kB pages rather
316 * than two possibly non-adjacent physical 4kB pages.
317 * We also have to ensure cache coherency here. If writing,
318 * flush the data cache to memory. Logically, if reading
319 * we should do it after the DMA is complete, but it is
320 * more convenient to do it here. If someone is messing
321 * with a buffer space after it is handed to us, they
322 * shouldn't be surprised by corrupted data, anyway :-).
324 if (bh == NULL) { /* paging requests have (rq->bh == NULL) */
325 addr = virt_to_bus(rq->buffer);
326 size = rq->nr_sectors << 9;
328 consistent_sync(rq->buffer, size,
331 consistent_sync(rq->buffer, size,
334 /* group sequential buffers into one large buffer */
335 addr = virt_to_bus(bh->b_data);
338 consistent_sync(bh->b_data, size,
341 consistent_sync(bh->b_data, size,
343 while ((bh = bh->b_reqnext) != NULL) {
344 if ((addr + size) != virt_to_bus(bh->b_data))
348 consistent_sync(bh->b_data,
352 consistent_sync(bh->b_data,
359 * Fill in the next PRD entry.
360 * Note that one PRD entry can transfer
361 * at most 65535 bytes.
364 unsigned int tc = (size < 0xfe00) ? size : 0xfe00;
366 if (++count >= NUM_PRD) {
367 printk(KERN_WARNING "%s: DMA table too small\n",
369 return 0; /* revert to PIO for this request */
371 table->prd_physptr = (addr & 0xfffffffe);
372 if (table->prd_physptr & 0xF) {
374 "%s: DMA buffer not 16 byte aligned.\n",
376 return 0; /* revert to PIO for this request */
378 table->prd_count = (tc & 0xfffe);
383 } while (bh != NULL);
385 /* Add the EOT to the last table entry.
389 table->prd_count |= PRD_EOT;
391 printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
398 * dma_intr() is the handler for disk read/write DMA interrupts
399 * This is taken directly from ide-dma.c, which we can't use because
400 * it requires PCI support.
403 ide_dma_intr(ide_drive_t * drive)
408 dma_stat = HWIF(drive)->ide_dma_end(drive);
409 stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
410 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
412 struct request *rq = HWGROUP(drive)->rq;
413 rq = HWGROUP(drive)->rq;
414 for (i = rq->nr_sectors; i > 0;) {
415 i -= rq->current_nr_sectors;
416 ide_end_request(drive, 1);
420 printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
421 drive->name, dma_stat);
423 return ide_error(drive, "dma_intr", stat);
426 /* ....and another one....
429 report_drive_dmaing(ide_drive_t * drive)
431 struct hd_driveid *id = drive->id;
433 if ((id->field_valid & 4) && (eighty_ninty_three(drive)) &&
434 (id->dma_ultra & (id->dma_ultra >> 11) & 7)) {
435 if ((id->dma_ultra >> 13) & 1) {
436 printk(", UDMA(100)"); /* UDMA BIOS-enabled! */
437 } else if ((id->dma_ultra >> 12) & 1) {
438 printk(", UDMA(66)"); /* UDMA BIOS-enabled! */
440 printk(", UDMA(44)"); /* UDMA BIOS-enabled! */
442 } else if ((id->field_valid & 4) &&
443 (id->dma_ultra & (id->dma_ultra >> 8) & 7)) {
444 if ((id->dma_ultra >> 10) & 1) {
445 printk(", UDMA(33)"); /* UDMA BIOS-enabled! */
446 } else if ((id->dma_ultra >> 9) & 1) {
447 printk(", UDMA(25)"); /* UDMA BIOS-enabled! */
449 printk(", UDMA(16)"); /* UDMA BIOS-enabled! */
451 } else if (id->field_valid & 4) {
452 printk(", (U)DMA"); /* Can be BIOS-enabled! */
460 stb04xxx_ide_check_dma(ide_drive_t * drive)
462 struct hd_driveid *id = drive->id;
466 drive->using_dma = 0;
468 if (drive->media == ide_floppy)
471 /* Check timing here, we may be able to include XFER_UDMA_66
472 * and XFER_UDMA_100. This basically tells the 'best_mode'
473 * function to also consider UDMA3 to UDMA5 device timing.
476 /* Section 1.6.2.6 "IDE Controller, ATA/ATAPI-5" in the STB04xxx
477 * Datasheet says the following modes are supported:
479 * Multiword DMA modes 0 to 2
480 * UltraDMA modes 0 to 4
482 int map = XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA;
483 /* XFER_EPIO includes both PIO modes 4 and 5. Mode 5 is not
484 * valid for the STB04, so mask it out of consideration just
485 * in case some drive sets it...
487 id->eide_pio_modes &= ~4;
489 /* Allow UDMA_66 only if an 80 conductor cable is connected. */
490 if (eighty_ninty_three(drive))
493 speed = ide_find_best_mode(drive, map);
494 stb04xxx_ide_set_drive(drive, speed);
496 if (HWIF(drive)->autodma &&
497 (((speed & XFER_MODE) == XFER_PIO) ||
498 ((speed & XFER_MODE) == XFER_EPIO))) {
499 drive->using_dma = 0;
506 static int stb04xxx_ide_dma_off_quietly(ide_drive_t * drive)
508 drive->using_dma = 0;
512 static int stb04xxx_ide_dma_off(ide_drive_t * drive)
514 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
515 return stb04xxx_ide_dma_off_quietly(drive);
518 static int stb04xxx_ide_dma_on(ide_drive_t * drive)
520 return stb04xxx_ide_check_dma(drive);
523 static int stb04xxx_ide_dma_check(ide_drive_t * drive)
525 return stb04xxx_ide_dma_on(drive);
528 static int stb04xxx_ide_dma_begin(ide_drive_t * drive, int writing)
530 idp->si_c0tb = (unsigned int) prd_phys;
531 idp->si_c0s0 = 0xdc800000; /* Clear all status */
532 idp->si_c0ie = 0x90000000; /* Enable all intr */
535 (writing ? 0x09000000 : 0x01000000);
539 static int stb04xxx_ide_dma_io(ide_drive_t * drive, int writing)
541 if (!stb04xxx_ide_build_dmatable(drive, writing))
543 drive->waiting_for_dma = 1;
544 if (drive->media != ide_disk)
546 ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL);
547 HWIF(drive)->OUTB(writing ? WIN_WRITEDMA : WIN_READDMA,
549 return stb04xxx_ide_dma_begin(drive, writing);
552 static int stb04xxx_ide_dma_read(ide_drive_t * drive)
554 return stb04xxx_ide_dma_io(drive, 0);
557 static int stb04xxx_ide_dma_write(ide_drive_t * drive)
559 return stb04xxx_ide_dma_io(drive, 1);
562 static int stb04xxx_ide_dma_end(ide_drive_t * drive)
566 drive->waiting_for_dma = 0;
567 dstat = idp->si_c0s1;
568 idp->si_c0s0 = 0xdc800000; /* Clear all status */
569 /* verify good dma status */
570 return (dstat & 0x80000000);
573 static int stb04xxx_ide_dma_test_irq(ide_drive_t * drive)
575 return idp->si_c0s0 & 0x10000000 ? 1 : 0;
578 static int stb04xxx_ide_dma_verbose(ide_drive_t * drive)
580 return report_drive_dmaing(drive);
582 #endif /* CONFIG_BLK_DEV_IDEDMA */
585 nonpci_ide_init_hwif_ports(hw_regs_t * hw,
586 ide_ioreg_t data_port, ide_ioreg_t ctrl_port,
593 struct ocp_dev *ide_dev;
598 hwif = &ide_hwifs[0];
607 printk("IBM STB04xxx OCP IDE driver version %s\n", OCPVR);
608 if (!(ide_dev = ocp_alloc_dev(0)))
612 if ((curr_ide = ocp_register(ide_dev)) == -ENXIO) {
613 ocp_free_dev(ide_dev);
617 if ((idp = (ide_t *) ioremap(ide_dev->paddr,
618 IDE0_SIZE)) == NULL) {
619 printk(KERN_WARNING "ide: failed ioremap\n");
623 /* Enable the interface.
625 idp->si_control = 0x80000000;
626 idp->si_c0s0 = 0xdc800000; /* Clear all status */
628 idp->si_c0sr = 0xf0000000; /* Mandated val to Slew Rate Control */
630 idp->si_intenable = 0x80000000;
631 /* Per the STB04 data sheet:
632 * 1) tTO = ((8*RDYT) + 1) * SYS_CLK
634 * 2) tTO >= 1250 + (2 * SYS_CLK) - t2
635 * Solving the first equation for RDYT:
637 * 3) RDYT = -----------------
639 * Substituting equation 2) for tTO in equation 3:
640 * ((1250 + (2 * SYS_CLK) - t2)/SYS_CLK) - 1
641 * 3) RDYT = -----------------------------------------
643 * It's just the timeout so having it too long isn't too
644 * significant, so we'll just assume t2 is zero. All this math
645 * is handled by the compiler and RDYT ends up being 11 assuming
646 * that SYS_CLOCK_NS is 15.
648 idp->si_c0timo = (EZ(EZ(1250 + 2 * SYS_CLOCK_NS, SYS_CLOCK_NS) - 1, 8)) << 23; /* Chan 0 timeout */
650 /* Stuff some slow default PIO timing.
652 idp->si_c0rt = MK_TIMING(6, 19, 15, 2);
653 idp->si_c0fpt = MK_TIMING(6, 19, 15, 2);
655 ip = (unsigned char *) (&(idp->si_c0d)); /* Chan 0 data */
657 for (i = 0; i <= IDE_STATUS_OFFSET; i++) {
660 hw->io_ports[IDE_CONTROL_OFFSET] = (int) (&(idp->si_c0adc));
665 pio_mode[0] = pio_mode[1] = -1;
667 /* We should probably have UIC functions to set external
668 * interrupt level/edge.
670 uicdcr = mfdcr(DCRN_UIC_PR(UIC0));
671 uicdcr &= ~(0x80000000 >> IDE0_IRQ);
672 mtdcr(DCRN_UIC_PR(UIC0), uicdcr);
673 mtdcr(DCRN_UIC_TR(UIC0), 0x80000000 >> IDE0_IRQ);
675 /* Grab a page for the PRD Table.
677 prd_table = (prd_entry_t *) consistent_alloc(GFP_KERNEL,
683 hwif->tuneproc = &stb04xxx_ide_tuneproc;
684 hwif->speedproc = &stb04xxx_ide_set_drive;
685 /* Figure out if an 80 conductor cable is connected or not. */
687 (idp->si_c0s1 & 0x20000000) != 0;
688 hwif->drives[0].autotune = 1;
689 hwif->drives[1].autotune = 1;
691 #ifdef CONFIG_BLK_DEV_IDEDMA
693 hwif->ide_dma_off = &stb04xxx_ide_dma_off;
694 hwif->ide_dma_off_quietly = &stb04xxx_ide_dma_off_quietly;
695 hwif->ide_dma_host_off = &stb04xxx_ide_dma_off_quietly;
696 hwif->ide_dma_on = &stb04xxx_ide_dma_on;
697 hwif->ide_dma_host_on = &stb04xxx_ide_dma_on;
698 hwif->ide_dma_check = &stb04xxx_ide_dma_check;
699 hwif->ide_dma_read = &stb04xxx_ide_dma_read;
700 hwif->ide_dma_write = &stb04xxx_ide_dma_write;
701 hwif->ide_dma_begin = &stb04xxx_ide_dma_begin;
702 hwif->ide_dma_end = &stb04xxx_ide_dma_end;
703 hwif->ide_dma_test_irq = &stb04xxx_ide_dma_test_irq;
704 hwif->ide_dma_verbose = &stb04xxx_ide_dma_verbose;
709 memcpy(hwif->io_ports, hw->io_ports,
710 sizeof (hw->io_ports));
711 hwif->irq = ide_dev->irq;