2 * Copyright 2002 MontaVista Software Inc.
3 * Completed implementation.
4 * Author: Armin Kuster <akuster@mvista.com>
5 * MontaVista Software, Inc. <source@mvista.com>
7 * Module name: ocp_stbxxxx.c
13 * Version 07/23/02 - Armin
14 * removed many mtdcr/mfdcr dma calls to standard 4xx dma calls
17 #include <linux/types.h>
18 #include <linux/hdreg.h>
19 #include <linux/delay.h>
20 #include <linux/ide.h>
23 #include <asm/scatterlist.h>
24 #include <asm/ppc4xx_dma.h>
26 #include "ide_modes.h"
31 /* use DMA channel 2 for IDE DMA operations */
32 #define IDE_DMACH 2 /* 2nd DMA channel */
33 #define IDE_DMA_INT 6 /* IDE dma channel 2 interrupt */
35 #define WMODE 0 /* default to DMA line mode */
38 /* psc=00, pwc=000001 phc=010, resvd-must-be-one=1 */
40 unsigned long dmacr_def_line = 0x00002A02;
42 /* psc=00, pwc=000110 phc=010, resvd-must-be-one=1 */
44 unsigned long dmacr_def_word = 0x0000CA02;
46 #ifdef CONFIG_REDWOOD_4
47 #define DCRXBCR_MDMA2 0xC0000000
48 #else /* CONFIG_REDWOOD_6 */
49 #define DCRXBCR_MDMA2 0x80000000
52 #define DCRXBCR_WRITE 0x20000000
53 #define DCRXBCR_ACTIVATE 0x10000000
55 #ifdef CONFIG_REDWOOD_4
56 #define IDE_CMD_OFF 0x00100000
57 #define IDE_CTL_OFF 0x00100000
60 /* Function Prototypes */
61 static void redwood_ide_tune_drive(ide_drive_t *, byte);
62 static byte redwood_ide_dma_2_pio(byte);
63 static int redwood_ide_tune_chipset(ide_drive_t *, byte);
66 static void dump_dcrs(void)
68 printk("DMASR=%x\n", mfdcr(DCRN_DMASR));
69 printk("DMACR2=%x\n", mfdcr(DCRN_DMACR2));
70 printk("DMACT2=%d\n", mfdcr(DCRN_DMACT2));
71 printk("DMAS2=%x\n", mfdcr(DCRN_DMAS2));
72 printk("DMASA2=%x\n", mfdcr(DCRN_DMASA2));
73 printk("DMADA2=%x\n", mfdcr(DCRN_DMADA2));
75 if (mfdcr(DCRN_DMASR) & 0x00200000) {
76 printk("BESR=%x\n", mfdcr(DCRN_BESR));
77 printk("BEAR=%x\n", mfdcr(DCRN_BEAR));
78 printk("PLB0_BESR=%x\n", mfdcr(DCRN_PLB0_BESR));
79 printk("PLB0_BEAR=%x\n", mfdcr(DCRN_PLB0_BEAR));
80 printk("PLB1_BESR=%x\n", mfdcr(DCRN_PLB1_BESR));
81 printk("PLB1_BEAR=%x\n", mfdcr(DCRN_PLB1_BEAR));
82 printk("OPB0_BESR0=%x\n", mfdcr(DCRN_POB0_BESR0));
83 printk("OPB0_BEAR=%x\n", mfdcr(DCRN_POB0_BEAR));
84 printk("SDRAM0_BESR=%x\n", mfdcr(0x1E1));
85 printk("SDRAM0_BEAR=%x\n", mfdcr(0x1E2));
86 printk("SDRAM1_BESR=%x\n", mfdcr(0x1C1));
87 printk("SDRAM1_BEAR=%x\n", mfdcr(0x1C2));
95 redwood_ide_tune_drive(ide_drive_t * drive, byte pio)
97 pio = ide_get_best_pio_mode(drive, pio, 5, NULL);
101 redwood_ide_dma_2_pio(byte xfer_rate)
131 redwood_ide_tune_chipset(ide_drive_t * drive, byte speed)
135 redwood_ide_tune_drive(drive, redwood_ide_dma_2_pio(speed));
137 if (!drive->init_speed)
138 drive->init_speed = speed;
139 err = ide_config_drive_speed(drive, speed);
140 drive->current_speed = speed;
144 #ifdef CONFIG_BLK_DEV_IDEDMA
145 static int redwood_config_drive_for_dma(ide_drive_t *drive)
147 struct hd_driveid *id = drive->id;
148 ide_hwif_t *hwif = HWIF(drive);
150 if (id && (id->capability & 1) && hwif->autodma) {
152 * Enable DMA on any drive that has
153 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
155 if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
156 return hwif->ide_dma_on(drive);
158 * Enable DMA on any drive that has mode2 DMA
159 * (multi or single) enabled
161 if (id->field_valid & 2) /* regular DMA */
162 if ((id->dma_mword & 0x404) == 0x404 ||
163 (id->dma_1word & 0x404) == 0x404)
164 return hwif->ide_dma_on(drive);
166 // if (hwif->tuneproc != NULL) hwif->tuneproc(drive, 255);
167 return hwif->ide_dma_off_quietly(drive);
171 redwood_ide_dma_intr(ide_drive_t * drive)
176 ide_hwgroup_t *hwgroup = HWGROUP(drive);
177 struct request *rq = hwgroup->rq;
178 unsigned long block, b1, b2, b3, b4;
180 nsect = rq->current_nr_sectors;
182 dma_stat = HWIF(drive)->ide_dma_end(drive);
185 rq->buffer += nsect << 9;
187 i = (rq->nr_sectors -= nsect);
188 ide_end_request(drive, 1);
190 b1 = HWIF(drive)->INB(IDE_SECTOR_REG);
191 b2 = HWIF(drive)->INB(IDE_LCYL_REG);
192 b3 = HWIF(drive)->INB(IDE_HCYL_REG);
193 b4 = HWIF(drive)->INB(IDE_SELECT_REG);
194 block = ((b4 & 0x0f) << 24) + (b3 << 16) + (b2 << 8) + (b1);
196 if (drive->select.b.lba) {
197 HWIF(drive)->OUTB(block, IDE_SECTOR_REG);
198 HWIF(drive)->OUTB(block >>= 8, IDE_LCYL_REG);
199 HWIF(drive)->OUTB(block >>= 8, IDE_HCYL_REG);
200 HWIF(drive)->OUTB(((block >> 8) & 0x0f) | drive->select.all,
203 unsigned int sect, head, cyl, track;
204 track = block / drive->sect;
205 sect = block % drive->sect + 1;
206 HWIF(drive)->OUTB(sect, IDE_SECTOR_REG);
207 head = track % drive->head;
208 cyl = track / drive->head;
209 HWIF(drive)->OUTB(cyl, IDE_LCYL_REG);
210 HWIF(drive)->OUTB(cyl >> 8, IDE_HCYL_REG);
211 HWIF(drive)->OUTB(head | drive->select.all, IDE_SELECT_REG);
215 dma_stat = HWIF(drive)->ide_dma_read(drive);
217 dma_stat = HWIF(drive)->ide_dma_write(drive);
223 static int redwood_dma_timer_expiry(ide_drive_t *drive)
225 ide_hwif_t *hwif = HWIF(drive);
226 u8 dma_stat = hwif->INB(hwif->dma_status);
228 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
229 drive->name, dma_stat);
231 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
234 HWGROUP(drive)->expiry = NULL; /* one free ride for now */
236 /* 1 dmaing, 2 error, 4 intr */
238 if (dma_stat & 2) { /* ERROR */
239 (void) hwif->ide_dma_end(drive);
240 return DRIVER(drive)->error(drive,
241 "dma_timer_expiry", hwif->INB(IDE_STATUS_REG));
243 if (dma_stat & 1) /* DMAing */
246 if (dma_stat & 4) /* Got an Interrupt */
247 HWGROUP(drive)->handler(drive);
252 static int redwood_ide_dma_end(ide_drive_t *drive)
254 drive->waiting_for_dma = 0;
257 disable_dma_interrupt(IDE_DMACH);
258 disable_dma(IDE_DMACH);
262 static int redwood_ide_dma_off_quietly(ide_drive_t *drive)
264 drive->using_dma = 0;
265 return redwood_ide_dma_end(drive);
268 static int redwood_ide_dma_off(ide_drive_t *drive)
270 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
271 return redwood_ide_dma_off_quietly(drive);
274 static int redwood_ide_dma_on(ide_drive_t *drive)
276 mtdcr(DCRN_DMACR2, 0);
277 clr_dma_status(IDE_DMACH);
280 mtdcr(DCRN_DCRXBCR, 0);
281 mtdcr(DCRN_CICCR, mfdcr(DCRN_CICCR) | 0x00000400);
283 /* Configure CIC reg for line mode dma */
284 mtdcr(DCRN_CICCR, mfdcr(DCRN_CICCR) & ~0x00000400);
286 drive->using_dma = 1;
290 static int redwood_ide_dma_check(ide_drive_t *drive)
292 return redwood_config_drive_for_dma(drive);
295 static int redwood_ide_dma_begin(ide_drive_t *drive)
299 enable_dma_interrupt(IDE_DMACH);
300 enable_dma(IDE_DMACH);
304 static int redwood_ide_dma_io(ide_drive_t *drive, int reading)
306 ide_hwif_t *hwif = HWIF(drive);
307 struct request *rq = HWGROUP(drive)->rq;
308 unsigned long length;
310 if (drive->media != ide_disk)
313 if (get_channel_config(IDE_DMACH, &dma_ch) & DMA_CHANNEL_BUSY ) /* DMA is busy? */
317 dma_cache_inv((unsigned long) rq->buffer,
318 rq->current_nr_sectors * 512);
321 set_src_addr(IDE_DMACH, 0);
322 set_dma_addr(IDE_DMACH, virt_to_bus(rq->buffer));
324 set_src_addr(IDE_DMACH, IDE_DMA_ADDR);
325 set_dst_addr(IDE_DMACH, virt_to_bus(rq->buffer));
328 dma_cache_wback_inv((unsigned long) rq->buffer,
329 rq->current_nr_sectors * 512);
331 set_dma_addr(IDE_DMACH, virt_to_bus(rq->buffer));
332 set_dst_addr(IDE_DMACH, 0);
334 set_src_addr(IDE_DMACH, virt_to_bus(rq->buffer));
335 set_dst_addr(IDE_DMACH, IDE_DMA_ADDR);
339 hwif->OUTB(rq->current_nr_sectors, IDE_NSECTOR_REG);
340 length = rq->current_nr_sectors * 512;
342 /* set_dma_count doesn't do M2M line xfer sizes right. */
345 mtdcr(DCRN_DMACT2, length >> 2);
347 mtdcr(DCRN_DMACT2, length >> 4);
352 mtdcr(DCRN_DMACR2, DMA_TD |
353 SET_DMA_TM(TM_PERIPHERAL) |
355 SET_DMA_DAI(1) | dmacr_def_word);
356 set_dma_mode(IDE_DMACH, DMA_MODE_READ);
359 mtdcr(DCRN_DCRXBCR, DCRXBCR_MDMA2 | DCRXBCR_ACTIVATE);
360 mtdcr(DCRN_DMACR2, SET_DMA_DAI(1) | SET_DMA_SAI(0) |
361 DMA_MODE_MM_DEVATSRC | SET_DMA_PW(PW_64) |
363 set_dma_mode(IDE_DMACH, DMA_MODE_MM_DEVATSRC);
367 mtdcr(DCRN_DMACR2, SET_DMA_TM(TM_PERIPHERAL) |
369 SET_DMA_DAI(1) | dmacr_def_word);
370 set_dma_mode(IDE_DMACH, DMA_MODE_WRITE);
372 mtdcr(DCRN_DCRXBCR, DCRXBCR_WRITE | DCRXBCR_MDMA2 |
374 mtdcr(DCRN_DMACR2,SET_DMA_DAI(0) | SET_DMA_SAI(1) |
375 DMA_MODE_MM_DEVATDST| SET_DMA_PW(PW_64) |
377 set_dma_mode(IDE_DMACH, DMA_MODE_MM_DEVATDST);
381 drive->waiting_for_dma = 1;
382 ide_set_handler(drive, &redwood_ide_dma_intr, 2*WAIT_CMD,
383 redwood_dma_timer_expiry);
384 hwif->OUTB(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
385 return HWIF(drive)->ide_dma_begin(drive);
388 static int redwood_ide_dma_read(ide_drive_t *drive)
390 return redwood_ide_dma_io(drive, 1);
393 static int redwood_ide_dma_write(ide_drive_t *drive)
395 return redwood_ide_dma_io(drive, 0);
398 /* returns 1 if dma irq issued, 0 otherwise */
399 static int redwood_ide_dma_test_irq(ide_drive_t *drive)
404 static int redwood_ide_dma_verbose(ide_drive_t * drive)
406 struct hd_driveid *id = drive->id;
408 if (id->field_valid & 2) {
409 if (id->dma_mword & 0x0004) {
411 } else if (id->dma_mword & 0x0002) {
413 } else if (id->dma_mword & 1) {
415 } else if (id->dma_1word & 0x0004) {
419 ide_get_best_pio_mode(drive, 255, 5, NULL));
429 ibm4xx_ide_spinup(int index)
432 ide_ioreg_t *io_ports;
434 printk("ide_redwood: waiting for drive ready..");
435 io_ports = ide_hwifs[index].io_ports;
437 /* wait until drive is not busy (it may be spinning up) */
438 for (i = 0; i < 30; i++) {
440 stat = inb_p(io_ports[7]);
441 /* wait for !busy & ready */
442 if ((stat & 0x80) == 0) {
446 udelay(1000 * 1000); /* 1 second */
452 outb_p(0xa0 | 0x10, io_ports[6]);
454 for (i = 0; i < 30; i++) {
456 stat = inb_p(io_ports[7]);
457 /* wait for !busy & ready */
458 if ((stat & 0x80) == 0) {
462 udelay(1000 * 1000); /* 1 second */
467 outb_p(0xa0, io_ports[6]);
468 printk("Drive spun up \n");
472 nonpci_ide_default_irq(ide_ioreg_t base)
478 nonpci_ide_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
479 ide_ioreg_t ctrl_port, int *irq)
481 unsigned long ioaddr;
482 #ifdef CONFIG_REDWOOD_4
483 unsigned long reg = data_port;
484 unsigned long xilinx;
489 printk("IBM Redwood 4/6 IDE driver version %s\n", IDEVR);
491 if (!request_region(REDWOOD_IDE_CMD, 0x10, "IDE"))
494 if (!request_region(REDWOOD_IDE_CTRL, 2, "IDE")) {
495 release_region(REDWOOD_IDE_CMD, 0x10);
499 #ifdef CONFIG_REDWOOD_4
500 mtdcr(DCRN_DCRXICR, 0x40000000); /* set dcrx internal arbiter */
502 /* add RE & OEN to value set by boot rom */
503 mtdcr(DCRN_BRCR3, 0x407cfffe);
505 /* reconstruct phys addrs from EBIU config regs for CS2# */
506 reg = ((mfdcr(DCRN_BRCR2) & 0xff000000) >> 4) | 0xf0000000;
507 xilinx = reg | 0x00040000;
508 reg = reg | IDE_CMD_OFF;
510 ioaddr = (unsigned long)ioremap(reg, 0x10);
511 xilinx = (unsigned long)ioremap(xilinx, 0x10);
515 writew( i & ~0x8001, xilinx);
516 writew( 0, xilinx+7*2);
517 udelay(10*1000); /* 10 ms */
520 /* init xilinx control registers - enable ide mux, clear reset bit */
521 writew( i | 0x8001, xilinx);
522 writew( 0, xilinx+7*2);
524 #else /* CONFIG_REDWOOD_6 */
525 ioaddr = (unsigned long) ioremap(REDWOOD_IDE_CMD, 0x10);
530 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
531 hw->io_ports[i] = ioaddr;
534 hw->io_ports[IDE_CONTROL_OFFSET] =
535 (unsigned long) ioremap(REDWOOD_IDE_CTRL, 2);
537 /* use DMA channel 2 for IDE DMA operations */
540 /*Word Mode psc(11-12)=00,pwc(13-18)=000110, phc(19-21)=010, 22=1, 30=1 ---- 0xCB02*/
542 dma_ch.mode =DMA_MODE_READ; /* xfer from peripheral to mem */
544 dma_ch.buffer_enable = 0;
545 dma_ch.tce_enable = 0;
546 dma_ch.etd_output = 0;
548 dma_ch.pl = EXTERNAL_PERIPHERAL; /* no op */
549 dma_ch.pwidth = PW_16;
552 dma_ch.psc = 0; /* set the max setup cycles */
553 dma_ch.pwc = 6; /* set the max wait cycles */
554 dma_ch.phc = 2; /* set the max hold cycles */
555 dma_ch.cp = PRIORITY_LOW;
556 dma_ch.int_enable = 0;
557 dma_ch.ch_enable = 0; /* No chaining */
558 dma_ch.tcd_disable = 1; /* No chaining */
560 /*Line Mode psc(11-12)=00,pwc(13-18)=000001, phc(19-21)=010, 22=1, 30=1 ---- 0x2B02*/
562 dma_ch.mode =DMA_MODE_MM_DEVATSRC; /* xfer from peripheral to mem */
564 dma_ch.buffer_enable = 0;
565 dma_ch.tce_enable = 0;
566 dma_ch.etd_output = 0;
568 dma_ch.pl = EXTERNAL_PERIPHERAL; /* no op */
569 dma_ch.pwidth = PW_64; /* Line mode on stbs */
572 dma_ch.psc = 0; /* set the max setup cycles */
573 dma_ch.pwc = 1; /* set the max wait cycles */
574 dma_ch.phc = 2; /* set the max hold cycles */
575 dma_ch.cp = PRIORITY_LOW;
576 dma_ch.int_enable = 0;
577 dma_ch.ch_enable = 0; /* No chaining */
578 dma_ch.tcd_disable = 1; /* No chaining */
581 if (hw_init_dma_channel(IDE_DMACH, &dma_ch) != DMA_STATUS_GOOD)
584 disable_dma_interrupt(IDE_DMACH);
586 /* init CIC control reg to enable IDE interface PIO mode */
587 mtdcr(DCRN_CICCR, (mfdcr(DCRN_CICCR) & 0xffff7bff) | 0x0003);
590 * init CIC select2 reg to connect external DMA port 3 to internal
594 /* FIXME: EXT_DMA_3 is out of bounds for map_dma_port. */
595 /* map_dma_port(IDE_DMACH,EXT_DMA_3,DMA_CHAN_2); */
596 mtdcr(DCRN_DMAS2, (mfdcr(DCRN_DMAS2) & 0xfffffff0) | 0x00000002);
598 /* Verified BRCR7 already set per manual. */
601 hwif = &ide_hwifs[index];
602 hwif->tuneproc = &redwood_ide_tune_drive;
603 hwif->drives[0].autotune = 1;
604 #ifdef CONFIG_BLK_DEV_IDEDMA
606 hwif->ide_dma_off = &redwood_ide_dma_off;
607 hwif->ide_dma_off_quietly = &redwood_ide_dma_off_quietly;
608 hwif->ide_dma_host_off = &redwood_ide_dma_off_quietly;
609 hwif->ide_dma_on = &redwood_ide_dma_on;
610 hwif->ide_dma_host_on = &redwood_ide_dma_on;
611 hwif->ide_dma_check = &redwood_ide_dma_check;
612 hwif->ide_dma_read = &redwood_ide_dma_read;
613 hwif->ide_dma_write = &redwood_ide_dma_write;
614 hwif->ide_dma_begin = &redwood_ide_dma_begin;
615 hwif->ide_dma_end = &redwood_ide_dma_end;
616 hwif->ide_dma_test_irq = &redwood_ide_dma_test_irq;
617 hwif->ide_dma_verbose = &redwood_ide_dma_verbose;
619 hwif->speedproc = &redwood_ide_tune_chipset;
622 memcpy(hwif->io_ports, hw->io_ports, sizeof (hw->io_ports));
624 ibm4xx_ide_spinup(index);