2 * linux/drivers/ide/ide-taskfile.c Version 0.38 March 05, 2003
4 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
5 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2001-2002 Klaus Smolin
7 * IBM Storage Technology Division
9 * The big the bad and the ugly.
11 * Problems to be fixed because of BH interface or the lack therefore.
13 * Fill me in stupid !!!
16 * General refers to the Controller and Driver "pair".
18 * Under the context of Linux it generally refers to an interrupt handler.
19 * However, it correctly describes the 'HOST'
21 * The amount of data needed to be transfered as predefined in the
22 * setup of the device.
24 * The 'DATA BLOCK' associated to the 'DATA HANDLER', and can be as
25 * small as a single sector or as large as the entire command block
29 #include <linux/config.h>
30 #define __NO_VERSION__
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/kernel.h>
35 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/major.h>
39 #include <linux/errno.h>
40 #include <linux/genhd.h>
41 #include <linux/blkpg.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/delay.h>
45 #include <linux/hdreg.h>
46 #include <linux/ide.h>
48 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
52 #include <asm/bitops.h>
54 #define DEBUG_TASKFILE 0 /* unset when fixed */
57 #define DTF(x...) printk(x)
65 #define task_rq_offset(rq) \
66 (((rq)->nr_sectors - (rq)->current_nr_sectors) * SECTOR_SIZE)
69 * for now, taskfile requests are special :/
71 * However, upon the creation of the atapi version of packet_command
72 * data-phase ISR plus it own diagnostics and extensions for direct access
73 * (ioctl,read,write,rip,stream -- atapi), the kmap/kunmap for PIO will
76 inline char *task_map_rq (struct request *rq, unsigned long *flags)
79 return ide_map_buffer(rq, flags);
80 return rq->buffer + task_rq_offset(rq);
83 inline void task_unmap_rq (struct request *rq, char *buf, unsigned long *flags)
86 ide_unmap_buffer(buf, flags);
89 inline u32 task_read_24 (ide_drive_t *drive)
91 return (HWIF(drive)->INB(IDE_HCYL_REG)<<16) |
92 (HWIF(drive)->INB(IDE_LCYL_REG)<<8) |
93 HWIF(drive)->INB(IDE_SECTOR_REG);
96 EXPORT_SYMBOL(task_read_24);
98 static void ata_bswap_data (void *buffer, int wcount)
103 *p = *p << 8 | *p >> 8; p++;
104 *p = *p << 8 | *p >> 8; p++;
109 void taskfile_input_data (ide_drive_t *drive, void *buffer, u32 wcount)
111 HWIF(drive)->ata_input_data(drive, buffer, wcount);
113 ata_bswap_data(buffer, wcount);
116 EXPORT_SYMBOL(taskfile_input_data);
118 void taskfile_output_data (ide_drive_t *drive, void *buffer, u32 wcount)
121 ata_bswap_data(buffer, wcount);
122 HWIF(drive)->ata_output_data(drive, buffer, wcount);
123 ata_bswap_data(buffer, wcount);
125 HWIF(drive)->ata_output_data(drive, buffer, wcount);
129 EXPORT_SYMBOL(taskfile_output_data);
131 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
134 memset(&args, 0, sizeof(ide_task_t));
135 args.tfRegister[IDE_NSECTOR_OFFSET] = 0x01;
136 if (drive->media == ide_disk)
137 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_IDENTIFY;
139 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_PIDENTIFY;
140 args.command_type = ide_cmd_type_parser(&args);
141 return ide_raw_taskfile(drive, &args, buf);
144 EXPORT_SYMBOL(taskfile_lib_get_identify);
146 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
147 void debug_taskfile (ide_drive_t *drive, ide_task_t *args)
149 printk(KERN_INFO "%s: ", drive->name);
150 // printk("TF.0=x%02x ", args->tfRegister[IDE_DATA_OFFSET]);
151 printk("TF.1=x%02x ", args->tfRegister[IDE_FEATURE_OFFSET]);
152 printk("TF.2=x%02x ", args->tfRegister[IDE_NSECTOR_OFFSET]);
153 printk("TF.3=x%02x ", args->tfRegister[IDE_SECTOR_OFFSET]);
154 printk("TF.4=x%02x ", args->tfRegister[IDE_LCYL_OFFSET]);
155 printk("TF.5=x%02x ", args->tfRegister[IDE_HCYL_OFFSET]);
156 printk("TF.6=x%02x ", args->tfRegister[IDE_SELECT_OFFSET]);
157 printk("TF.7=x%02x\n", args->tfRegister[IDE_COMMAND_OFFSET]);
158 printk(KERN_INFO "%s: ", drive->name);
159 // printk("HTF.0=x%02x ", args->hobRegister[IDE_DATA_OFFSET_HOB]);
160 printk("HTF.1=x%02x ", args->hobRegister[IDE_FEATURE_OFFSET_HOB]);
161 printk("HTF.2=x%02x ", args->hobRegister[IDE_NSECTOR_OFFSET_HOB]);
162 printk("HTF.3=x%02x ", args->hobRegister[IDE_SECTOR_OFFSET_HOB]);
163 printk("HTF.4=x%02x ", args->hobRegister[IDE_LCYL_OFFSET_HOB]);
164 printk("HTF.5=x%02x ", args->hobRegister[IDE_HCYL_OFFSET_HOB]);
165 printk("HTF.6=x%02x ", args->hobRegister[IDE_SELECT_OFFSET_HOB]);
166 printk("HTF.7=x%02x\n", args->hobRegister[IDE_CONTROL_OFFSET_HOB]);
168 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
170 ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
172 ide_hwif_t *hwif = HWIF(drive);
173 task_struct_t *taskfile = (task_struct_t *) task->tfRegister;
174 hob_struct_t *hobfile = (hob_struct_t *) task->hobRegister;
175 u8 HIHI = (drive->addressing == 1) ? 0xE0 : 0xEF;
177 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
178 void debug_taskfile(drive, task);
179 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
181 /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
182 if (IDE_CONTROL_REG) {
184 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
186 SELECT_MASK(drive, 0);
188 if (drive->addressing == 1) {
189 hwif->OUTB(hobfile->feature, IDE_FEATURE_REG);
190 hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
191 hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
192 hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
193 hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
196 hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
197 hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
198 hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
199 hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
200 hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
202 hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG);
203 if (task->handler != NULL) {
204 ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
205 if (task->prehandler != NULL)
206 return task->prehandler(drive, task->rq);
209 /* for dma commands we down set the handler */
211 if (blk_fs_request(task->rq) && drive->using_dma) {
212 if (rq_data_dir(task->rq) == READ) {
213 if (hwif->ide_dma_read(drive))
216 if (hwif->ide_dma_write(drive))
220 if (!drive->using_dma && (task->handler == NULL))
223 switch(taskfile->command) {
224 case WIN_WRITEDMA_ONCE:
226 case WIN_WRITEDMA_EXT:
227 hwif->ide_dma_write(drive);
229 case WIN_READDMA_ONCE:
231 case WIN_READDMA_EXT:
232 case WIN_IDENTIFY_DMA:
233 hwif->ide_dma_read(drive);
236 if (task->handler == NULL)
242 switch(taskfile->command) {
243 case WIN_WRITEDMA_ONCE:
245 case WIN_WRITEDMA_EXT:
246 if (drive->using_dma && !(hwif->ide_dma_write(drive)))
248 case WIN_READDMA_ONCE:
250 case WIN_READDMA_EXT:
251 case WIN_IDENTIFY_DMA:
252 if (drive->using_dma && !(hwif->ide_dma_read(drive)))
261 EXPORT_SYMBOL(do_rw_taskfile);
264 * Error reporting, in human readable form (luxurious, but a memory hog).
266 u8 taskfile_dump_status (ide_drive_t *drive, const char *msg, u8 stat)
268 ide_hwif_t *hwif = HWIF(drive);
272 local_irq_set(flags);
273 printk("%s: %s: status=0x%02x", drive->name, msg, stat);
274 #if FANCY_STATUS_DUMPS
276 if (stat & BUSY_STAT) {
279 if (stat & READY_STAT) printk("DriveReady ");
280 if (stat & WRERR_STAT) printk("DeviceFault ");
281 if (stat & SEEK_STAT) printk("SeekComplete ");
282 if (stat & DRQ_STAT) printk("DataRequest ");
283 if (stat & ECC_STAT) printk("CorrectedError ");
284 if (stat & INDEX_STAT) printk("Index ");
285 if (stat & ERR_STAT) printk("Error ");
288 #endif /* FANCY_STATUS_DUMPS */
290 if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
291 err = hwif->INB(IDE_ERROR_REG);
292 printk("%s: %s: error=0x%02x", drive->name, msg, err);
293 #if FANCY_STATUS_DUMPS
294 if (drive->media == ide_disk)
298 if (err & ABRT_ERR) printk("DriveStatusError ");
299 if (err & ICRC_ERR) printk("Bad%s", (err & ABRT_ERR) ? "CRC " : "Sector ");
300 if (err & ECC_ERR) printk("UncorrectableError ");
301 if (err & ID_ERR) printk("SectorIdNotFound ");
302 if (err & TRK0_ERR) printk("TrackZeroNotFound ");
303 if (err & MARK_ERR) printk("AddrMarkNotFound ");
305 if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR ||
306 (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
307 if (drive->addressing == 1) {
310 u32 low = task_read_24(drive);
311 hwif->OUTB(0x80, IDE_CONTROL_REG);
312 high = task_read_24(drive);
313 sectors = ((u64)high << 24) | low;
314 printk(", LBAsect=%lld", sectors);
316 u8 cur = hwif->INB(IDE_SELECT_REG);
317 u8 low = hwif->INB(IDE_LCYL_REG);
318 u8 high = hwif->INB(IDE_HCYL_REG);
319 u8 sect = hwif->INB(IDE_SECTOR_REG);
322 printk(", LBAsect=%d", (u32)
323 ((cur&0xf)<<24)|(high<<16)|
326 printk(", CHS=%d/%d/%d",
331 if (HWGROUP(drive)->rq)
332 printk(", sector=%lu",
333 HWGROUP(drive)->rq->sector);
336 #endif /* FANCY_STATUS_DUMPS */
339 local_irq_restore(flags);
343 EXPORT_SYMBOL(taskfile_dump_status);
346 * Clean up after success/failure of an explicit taskfile operation.
348 void ide_end_taskfile (ide_drive_t *drive, u8 stat, u8 err)
350 ide_hwif_t *hwif = HWIF(drive);
354 task_ioreg_t command;
356 spin_lock_irqsave(&io_request_lock, flags);
357 rq = HWGROUP(drive)->rq;
358 spin_unlock_irqrestore(&io_request_lock, flags);
359 args = (ide_task_t *) rq->special;
361 command = args->tfRegister[IDE_COMMAND_OFFSET];
364 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
366 if (args->tf_in_flags.b.data) {
367 u16 data = hwif->INW(IDE_DATA_REG);
368 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF;
369 args->hobRegister[IDE_DATA_OFFSET_HOB] = (data >> 8) & 0xFF;
371 args->tfRegister[IDE_ERROR_OFFSET] = err;
372 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
373 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
374 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
375 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
376 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG);
377 args->tfRegister[IDE_STATUS_OFFSET] = stat;
378 if ((drive->id->command_set_2 & 0x0400) &&
379 (drive->id->cfs_enable_2 & 0x0400) &&
380 (drive->addressing == 1)) {
381 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG_HOB);
382 args->hobRegister[IDE_FEATURE_OFFSET_HOB] = hwif->INB(IDE_FEATURE_REG);
383 args->hobRegister[IDE_NSECTOR_OFFSET_HOB] = hwif->INB(IDE_NSECTOR_REG);
384 args->hobRegister[IDE_SECTOR_OFFSET_HOB] = hwif->INB(IDE_SECTOR_REG);
385 args->hobRegister[IDE_LCYL_OFFSET_HOB] = hwif->INB(IDE_LCYL_REG);
386 args->hobRegister[IDE_HCYL_OFFSET_HOB] = hwif->INB(IDE_HCYL_REG);
390 /* taskfile_settings_update(drive, args, command); */
392 if (args->posthandler != NULL)
393 args->posthandler(drive, args);
396 spin_lock_irqsave(&io_request_lock, flags);
397 blkdev_dequeue_request(rq);
398 HWGROUP(drive)->rq = NULL;
399 end_that_request_last(rq);
400 spin_unlock_irqrestore(&io_request_lock, flags);
403 EXPORT_SYMBOL(ide_end_taskfile);
406 * try_to_flush_leftover_data() is invoked in response to a drive
407 * unexpectedly having its DRQ_STAT bit set. As an alternative to
408 * resetting the drive, this routine tries to clear the condition
409 * by read a sector's worth of data from the drive. Of course,
410 * this may not help if the drive is *waiting* for data from *us*.
412 void task_try_to_flush_leftover_data (ide_drive_t *drive)
414 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
416 if (drive->media != ide_disk)
420 unsigned int wcount = (i > 16) ? 16 : i;
422 taskfile_input_data(drive, buffer, wcount);
426 EXPORT_SYMBOL(task_try_to_flush_leftover_data);
429 * taskfile_error() takes action based on the error returned by the drive.
431 ide_startstop_t taskfile_error (ide_drive_t *drive, const char *msg, u8 stat)
437 err = taskfile_dump_status(drive, msg, stat);
438 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
442 /* retry only "normal" I/O: */
443 if (rq->cmd == IDE_DRIVE_TASKFILE) {
445 ide_end_taskfile(drive, stat, err);
448 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
449 /* other bits are useless when BUSY */
450 rq->errors |= ERROR_RESET;
452 if (drive->media != ide_disk)
454 if (stat & ERR_STAT) {
455 /* err has different meaning on cdrom and tape */
456 if (err == ABRT_ERR) {
457 if (drive->select.b.lba &&
458 (hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY))
459 /* some newer drives don't
460 * support WIN_SPECIFY
463 } else if ((err & BAD_CRC) == BAD_CRC) {
464 /* UDMA crc error -- just retry the operation */
466 } else if (err & (BBD_ERR | ECC_ERR)) {
467 /* retries won't help these */
468 rq->errors = ERROR_MAX;
469 } else if (err & TRK0_ERR) {
470 /* help it find track zero */
471 rq->errors |= ERROR_RECAL;
475 if ((stat & DRQ_STAT) && rq_data_dir(rq) != WRITE)
476 task_try_to_flush_leftover_data(drive);
478 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) {
480 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
482 if (rq->errors >= ERROR_MAX) {
483 DRIVER(drive)->end_request(drive, 0);
485 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
487 return ide_do_reset(drive);
489 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
490 drive->special.b.recalibrate = 1;
496 EXPORT_SYMBOL(taskfile_error);
499 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
501 ide_startstop_t set_multmode_intr (ide_drive_t *drive)
503 ide_hwif_t *hwif = HWIF(drive);
506 if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
507 drive->mult_count = drive->mult_req;
509 drive->mult_req = drive->mult_count = 0;
510 drive->special.b.recalibrate = 1;
511 (void) ide_dump_status(drive, "set_multmode", stat);
516 EXPORT_SYMBOL(set_multmode_intr);
519 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
521 ide_startstop_t set_geometry_intr (ide_drive_t *drive)
523 ide_hwif_t *hwif = HWIF(drive);
527 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
530 if (OK_STAT(stat, READY_STAT, BAD_STAT))
533 if (stat & (ERR_STAT|DRQ_STAT))
534 return DRIVER(drive)->error(drive, "set_geometry_intr", stat);
536 if (HWGROUP(drive)->handler != NULL)
538 ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
542 EXPORT_SYMBOL(set_geometry_intr);
545 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
547 ide_startstop_t recal_intr (ide_drive_t *drive)
549 ide_hwif_t *hwif = HWIF(drive);
552 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
553 return DRIVER(drive)->error(drive, "recal_intr", stat);
557 EXPORT_SYMBOL(recal_intr);
560 * Handler for commands without a data phase
562 ide_startstop_t task_no_data_intr (ide_drive_t *drive)
564 ide_task_t *args = HWGROUP(drive)->rq->special;
565 ide_hwif_t *hwif = HWIF(drive);
569 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
570 DTF("%s: command opcode 0x%02x\n", drive->name,
571 args->tfRegister[IDE_COMMAND_OFFSET]);
572 return DRIVER(drive)->error(drive, "task_no_data_intr", stat);
573 /* calls ide_end_drive_cmd */
576 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
581 EXPORT_SYMBOL(task_no_data_intr);
584 * Handler for command with PIO data-in phase, READ
587 * FIXME before 2.4 enable ...
588 * DATA integrity issue upon error. <andre@linux-ide.org>
590 ide_startstop_t task_in_intr (ide_drive_t *drive)
592 struct request *rq = HWGROUP(drive)->rq;
593 ide_hwif_t *hwif = HWIF(drive);
598 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) {
599 if (stat & (ERR_STAT|DRQ_STAT)) {
601 DTF("%s: attempting to recover last " \
602 "sector counter status=0x%02x\n",
605 * Expect a BUG BOMB if we attempt to rewind the
606 * offset in the BH aka PAGE in the current BLOCK
607 * segment. This is different than the HOST segment.
611 rq->current_nr_sectors++;
612 return DRIVER(drive)->error(drive, "task_in_intr", stat);
614 if (!(stat & BUSY_STAT)) {
615 DTF("task_in_intr to Soon wait for next interrupt\n");
616 if (HWGROUP(drive)->handler == NULL)
617 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
624 * Holding point for a brain dump of a thought :-/
627 if (!OK_STAT(stat,DRIVE_READY,drive->bad_wstat)) {
628 DTF("%s: READ attempting to recover last " \
629 "sector counter status=0x%02x\n",
631 rq->current_nr_sectors++;
632 return DRIVER(drive)->error(drive, "task_in_intr", stat);
634 if (!rq->current_nr_sectors)
635 if (!DRIVER(drive)->end_request(drive, 1))
638 if (--rq->current_nr_sectors <= 0)
639 if (!DRIVER(drive)->end_request(drive, 1))
643 pBuf = task_map_rq(rq, &flags);
644 DTF("Read: %p, rq->current_nr_sectors: %d, stat: %02x\n",
645 pBuf, (int) rq->current_nr_sectors, stat);
646 taskfile_input_data(drive, pBuf, SECTOR_WORDS);
647 task_unmap_rq(rq, pBuf, &flags);
649 * FIXME :: We really can not legally get a new page/bh
650 * regardless, if this is the end of our segment.
651 * BH walking or segment can only be updated after we have a good
652 * hwif->INB(IDE_STATUS_REG); return.
654 if (--rq->current_nr_sectors <= 0)
655 if (!DRIVER(drive)->end_request(drive, 1))
658 * ERM, it is techincally legal to leave/exit here but it makes
659 * a mess of the code ...
661 if (HWGROUP(drive)->handler == NULL)
662 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
666 EXPORT_SYMBOL(task_in_intr);
669 * Handler for command with Read Multiple
671 ide_startstop_t task_mulin_intr (ide_drive_t *drive)
673 ide_hwif_t *hwif = HWIF(drive);
674 struct request *rq = HWGROUP(drive)->rq;
676 unsigned int msect = drive->mult_count;
681 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) {
682 if (stat & (ERR_STAT|DRQ_STAT)) {
684 rq->current_nr_sectors += drive->mult_count;
686 * NOTE: could rewind beyond beginning :-/
689 printk("%s: MULTI-READ assume all data " \
690 "transfered is bad status=0x%02x\n",
693 return DRIVER(drive)->error(drive, "task_mulin_intr", stat);
695 /* no data yet, so wait for another interrupt */
696 if (HWGROUP(drive)->handler == NULL)
697 ide_set_handler(drive, &task_mulin_intr, WAIT_WORSTCASE, NULL);
702 nsect = rq->current_nr_sectors;
705 pBuf = task_map_rq(rq, &flags);
706 DTF("Multiread: %p, nsect: %d, msect: %d, " \
707 " rq->current_nr_sectors: %d\n",
708 pBuf, nsect, msect, rq->current_nr_sectors);
709 taskfile_input_data(drive, pBuf, nsect * SECTOR_WORDS);
710 task_unmap_rq(rq, pBuf, &flags);
712 rq->current_nr_sectors -= nsect;
715 * FIXME :: We really can not legally get a new page/bh
716 * regardless, if this is the end of our segment.
717 * BH walking or segment can only be updated after we have a
718 * good hwif->INB(IDE_STATUS_REG); return.
720 if (!rq->current_nr_sectors) {
721 if (!DRIVER(drive)->end_request(drive, 1))
725 if (HWGROUP(drive)->handler == NULL)
726 ide_set_handler(drive, &task_mulin_intr, WAIT_WORSTCASE, NULL);
730 EXPORT_SYMBOL(task_mulin_intr);
733 * VERIFY ME before 2.4 ... unexpected race is possible based on details
734 * RMK with 74LS245/373/374 TTL buffer logic because of passthrough.
736 ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
740 ide_startstop_t startstop;
742 if (ide_wait_stat(&startstop, drive, DATA_READY,
743 drive->bad_wstat, WAIT_DRQ)) {
744 printk(KERN_ERR "%s: no DRQ after issuing WRITE%s\n",
746 drive->addressing ? "_EXT" : "");
749 /* For Write_sectors we need to stuff the first sector */
750 pBuf = task_map_rq(rq, &flags);
751 taskfile_output_data(drive, pBuf, SECTOR_WORDS);
752 rq->current_nr_sectors--;
753 task_unmap_rq(rq, pBuf, &flags);
757 EXPORT_SYMBOL(pre_task_out_intr);
760 * Handler for command with PIO data-out phase WRITE
762 * WOOHOO this is a CORRECT STATE DIAGRAM NOW, <andre@linux-ide.org>
764 ide_startstop_t task_out_intr (ide_drive_t *drive)
766 ide_hwif_t *hwif = HWIF(drive);
767 struct request *rq = HWGROUP(drive)->rq;
772 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), DRIVE_READY, drive->bad_wstat)) {
773 DTF("%s: WRITE attempting to recover last " \
774 "sector counter status=0x%02x\n",
776 rq->current_nr_sectors++;
777 return DRIVER(drive)->error(drive, "task_out_intr", stat);
780 * Safe to update request for partial completions.
781 * We have a good STATUS CHECK!!!
783 if (!rq->current_nr_sectors)
784 if (!DRIVER(drive)->end_request(drive, 1))
786 if ((rq->current_nr_sectors==1) ^ (stat & DRQ_STAT)) {
787 rq = HWGROUP(drive)->rq;
788 pBuf = task_map_rq(rq, &flags);
789 DTF("write: %p, rq->current_nr_sectors: %d\n",
790 pBuf, (int) rq->current_nr_sectors);
791 taskfile_output_data(drive, pBuf, SECTOR_WORDS);
792 task_unmap_rq(rq, pBuf, &flags);
794 rq->current_nr_sectors--;
796 if (HWGROUP(drive)->handler == NULL)
797 ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
801 EXPORT_SYMBOL(task_out_intr);
803 #undef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
805 ide_startstop_t pre_task_mulout_intr (ide_drive_t *drive, struct request *rq)
807 #ifdef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
808 ide_hwif_t *hwif = HWIF(drive);
810 unsigned int nsect = 0, msect = drive->mult_count;
813 #endif /* ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
815 ide_task_t *args = rq->special;
816 ide_startstop_t startstop;
820 * assign private copy for multi-write
822 memcpy(&HWGROUP(drive)->wrq, rq, sizeof(struct request));
825 if (ide_wait_stat(&startstop, drive, DATA_READY,
826 drive->bad_wstat, WAIT_DRQ)) {
827 printk(KERN_ERR "%s: no DRQ after issuing %s\n",
829 drive->addressing ? "MULTWRITE_EXT" : "MULTWRITE");
832 #ifdef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
835 nsect = rq->current_nr_sectors;
838 pBuf = task_map_rq(rq, &flags);
839 DTF("Pre-Multiwrite: %p, nsect: %d, msect: %d, " \
840 "rq->current_nr_sectors: %ld\n",
841 pBuf, nsect, msect, rq->current_nr_sectors);
843 taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
844 task_unmap_rq(rq, pBuf, &flags);
845 rq->current_nr_sectors -= nsect;
846 if (!rq->current_nr_sectors) {
847 if (!DRIVER(drive)->end_request(drive, 1))
849 stat = hwif->INB(IDE_STATUS_REG);
856 #else /* ! ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
857 if (!(drive_is_ready(drive))) {
859 for (i=0; i<100; i++) {
860 if (drive_is_ready(drive))
866 * WARNING :: if the drive as not acked good status we may not
867 * move the DATA-TRANSFER T-Bar as BSY != 0. <andre@linux-ide.org>
869 return args->handler(drive);
870 #endif /* ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
873 EXPORT_SYMBOL(pre_task_mulout_intr);
876 * FIXME before enabling in 2.4 ... DATA integrity issue upon error.
879 * Handler for command write multiple
880 * Called directly from execute_drive_cmd for the first bunch of sectors,
881 * afterwards only by the ISR
883 ide_startstop_t task_mulout_intr (ide_drive_t *drive)
885 ide_hwif_t *hwif = HWIF(drive);
886 u8 stat = hwif->INB(IDE_STATUS_REG);
887 struct request *rq = HWGROUP(drive)->rq;
889 ide_startstop_t startstop = ide_stopped;
890 unsigned int msect = drive->mult_count;
895 * (ks/hs): Handle last IRQ on multi-sector transfer,
896 * occurs after all data was sent in this chunk
898 if (rq->current_nr_sectors == 0) {
899 if (stat & (ERR_STAT|DRQ_STAT)) {
901 rq->current_nr_sectors += drive->mult_count;
903 * NOTE: could rewind beyond beginning :-/
906 printk(KERN_ERR "%s: MULTI-WRITE assume all data " \
907 "transfered is bad status=0x%02x\n",
910 return DRIVER(drive)->error(drive, "task_mulout_intr", stat);
913 DRIVER(drive)->end_request(drive, 1);
917 * DON'T be lazy code the above and below togather !!!
919 if (!OK_STAT(stat,DATA_READY,BAD_R_STAT)) {
920 if (stat & (ERR_STAT|DRQ_STAT)) {
922 rq->current_nr_sectors += drive->mult_count;
924 * NOTE: could rewind beyond beginning :-/
927 printk(KERN_ERR "%s: MULTI-WRITE assume all data " \
928 "transfered is bad status=0x%02x\n",
931 return DRIVER(drive)->error(drive, "task_mulout_intr", stat);
933 /* no data yet, so wait for another interrupt */
934 if (HWGROUP(drive)->handler == NULL)
935 ide_set_handler(drive, &task_mulout_intr, WAIT_WORSTCASE, NULL);
939 #ifndef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
940 if (HWGROUP(drive)->handler != NULL) {
941 unsigned long lflags;
942 spin_lock_irqsave(&io_request_lock, lflags);
943 HWGROUP(drive)->handler = NULL;
944 del_timer(&HWGROUP(drive)->timer);
945 spin_unlock_irqrestore(&io_request_lock, lflags);
947 #endif /* ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
950 nsect = rq->current_nr_sectors;
953 pBuf = task_map_rq(rq, &flags);
954 DTF("Multiwrite: %p, nsect: %d, msect: %d, " \
955 "rq->current_nr_sectors: %ld\n",
956 pBuf, nsect, msect, rq->current_nr_sectors);
958 taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
959 task_unmap_rq(rq, pBuf, &flags);
960 rq->current_nr_sectors -= nsect;
962 * FIXME :: We really can not legally get a new page/bh
963 * regardless, if this is the end of our segment.
964 * BH walking or segment can only be updated after we
965 * have a good hwif->INB(IDE_STATUS_REG); return.
967 if (!rq->current_nr_sectors) {
968 if (!DRIVER(drive)->end_request(drive, 1))
974 if (HWGROUP(drive)->handler == NULL)
975 ide_set_handler(drive, &task_mulout_intr, WAIT_WORSTCASE, NULL);
979 EXPORT_SYMBOL(task_mulout_intr);
981 /* Called by internal to feature out type of command being called */
982 //ide_pre_handler_t * ide_pre_handler_parser (task_struct_t *taskfile, hob_struct_t *hobfile)
983 ide_pre_handler_t * ide_pre_handler_parser (struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile)
985 switch(taskfile->command) {
986 /* IDE_DRIVE_TASK_RAW_WRITE */
987 case CFA_WRITE_MULTI_WO_ERASE:
988 // case WIN_WRITE_LONG:
989 // case WIN_WRITE_LONG_ONCE:
991 case WIN_MULTWRITE_EXT:
992 return &pre_task_mulout_intr;
994 /* IDE_DRIVE_TASK_OUT */
996 // case WIN_WRITE_ONCE:
998 case WIN_WRITE_VERIFY:
999 case WIN_WRITE_BUFFER:
1000 case CFA_WRITE_SECT_WO_ERASE:
1001 case WIN_DOWNLOAD_MICROCODE:
1002 return &pre_task_out_intr;
1003 /* IDE_DRIVE_TASK_OUT */
1005 if (taskfile->feature == SMART_WRITE_LOG_SECTOR)
1006 return &pre_task_out_intr;
1008 // case WIN_WRITEDMA_ONCE:
1009 case WIN_WRITEDMA_QUEUED:
1010 case WIN_WRITEDMA_EXT:
1011 case WIN_WRITEDMA_QUEUED_EXT:
1012 /* IDE_DRIVE_TASK_OUT */
1019 EXPORT_SYMBOL(ide_pre_handler_parser);
1021 /* Called by internal to feature out type of command being called */
1022 //ide_handler_t * ide_handler_parser (task_struct_t *taskfile, hob_struct_t *hobfile)
1023 ide_handler_t * ide_handler_parser (struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile)
1025 switch(taskfile->command) {
1028 case CFA_TRANSLATE_SECTOR:
1029 case WIN_READ_BUFFER:
1031 // case WIN_READ_ONCE:
1033 return &task_in_intr;
1034 case WIN_SECURITY_DISABLE:
1035 case WIN_SECURITY_ERASE_UNIT:
1036 case WIN_SECURITY_SET_PASS:
1037 case WIN_SECURITY_UNLOCK:
1038 case WIN_DOWNLOAD_MICROCODE:
1039 case CFA_WRITE_SECT_WO_ERASE:
1040 case WIN_WRITE_BUFFER:
1041 case WIN_WRITE_VERIFY:
1043 // case WIN_WRITE_ONCE:
1045 return &task_out_intr;
1046 // case WIN_READ_LONG:
1047 // case WIN_READ_LONG_ONCE:
1049 case WIN_MULTREAD_EXT:
1050 return &task_mulin_intr;
1051 // case WIN_WRITE_LONG:
1052 // case WIN_WRITE_LONG_ONCE:
1053 case CFA_WRITE_MULTI_WO_ERASE:
1055 case WIN_MULTWRITE_EXT:
1056 return &task_mulout_intr;
1058 switch(taskfile->feature) {
1059 case SMART_READ_VALUES:
1060 case SMART_READ_THRESHOLDS:
1061 case SMART_READ_LOG_SECTOR:
1062 return &task_in_intr;
1063 case SMART_WRITE_LOG_SECTOR:
1064 return &task_out_intr;
1066 return &task_no_data_intr;
1068 case CFA_REQ_EXT_ERROR_CODE:
1069 case CFA_ERASE_SECTORS:
1071 // case WIN_VERIFY_ONCE:
1072 case WIN_VERIFY_EXT:
1074 return &task_no_data_intr;
1076 return &set_geometry_intr;
1078 // case WIN_RESTORE:
1082 case WIN_FLUSH_CACHE:
1083 case WIN_FLUSH_CACHE_EXT:
1084 case WIN_STANDBYNOW1:
1085 case WIN_STANDBYNOW2:
1089 case WIN_CHECKPOWERMODE1:
1090 case WIN_CHECKPOWERMODE2:
1091 case WIN_GETMEDIASTATUS:
1092 case WIN_MEDIAEJECT:
1093 return &task_no_data_intr;
1095 return &set_multmode_intr;
1096 case WIN_READ_NATIVE_MAX:
1098 case WIN_READ_NATIVE_MAX_EXT:
1099 case WIN_SET_MAX_EXT:
1100 case WIN_SECURITY_ERASE_PREPARE:
1101 case WIN_SECURITY_FREEZE_LOCK:
1103 case WIN_DOORUNLOCK:
1104 case WIN_SETFEATURES:
1105 return &task_no_data_intr;
1106 case DISABLE_SEAGATE:
1107 case EXABYTE_ENABLE_NEST:
1108 return &task_no_data_intr;
1109 #ifdef CONFIG_BLK_DEV_IDEDMA
1111 // case WIN_READDMA_ONCE:
1112 case WIN_IDENTIFY_DMA:
1113 case WIN_READDMA_QUEUED:
1114 case WIN_READDMA_EXT:
1115 case WIN_READDMA_QUEUED_EXT:
1117 // case WIN_WRITEDMA_ONCE:
1118 case WIN_WRITEDMA_QUEUED:
1119 case WIN_WRITEDMA_EXT:
1120 case WIN_WRITEDMA_QUEUED_EXT:
1124 case WIN_DEVICE_RESET:
1125 case WIN_QUEUED_SERVICE:
1132 EXPORT_SYMBOL(ide_handler_parser);
1134 ide_post_handler_t * ide_post_handler_parser (struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile)
1136 switch(taskfile->command) {
1137 case WIN_SPECIFY: /* set_geometry_intr */
1138 case WIN_RESTORE: /* recal_intr */
1139 case WIN_SETMULT: /* set_multmode_intr */
1145 EXPORT_SYMBOL(ide_post_handler_parser);
1147 /* Called by ioctl to feature out type of command being called */
1148 int ide_cmd_type_parser (ide_task_t *args)
1151 task_struct_t *taskfile = (task_struct_t *) args->tfRegister;
1152 hob_struct_t *hobfile = (hob_struct_t *) args->hobRegister;
1154 args->prehandler = ide_pre_handler_parser(taskfile, hobfile);
1155 args->handler = ide_handler_parser(taskfile, hobfile);
1156 args->posthandler = ide_post_handler_parser(taskfile, hobfile);
1158 switch(args->tfRegister[IDE_COMMAND_OFFSET]) {
1161 return IDE_DRIVE_TASK_IN;
1162 case CFA_TRANSLATE_SECTOR:
1164 // case WIN_READ_ONCE:
1166 case WIN_READ_BUFFER:
1167 return IDE_DRIVE_TASK_IN;
1169 // case WIN_WRITE_ONCE:
1171 case WIN_WRITE_VERIFY:
1172 case WIN_WRITE_BUFFER:
1173 case CFA_WRITE_SECT_WO_ERASE:
1174 case WIN_DOWNLOAD_MICROCODE:
1175 return IDE_DRIVE_TASK_RAW_WRITE;
1176 // case WIN_READ_LONG:
1177 // case WIN_READ_LONG_ONCE:
1179 case WIN_MULTREAD_EXT:
1180 return IDE_DRIVE_TASK_IN;
1181 // case WIN_WRITE_LONG:
1182 // case WIN_WRITE_LONG_ONCE:
1183 case CFA_WRITE_MULTI_WO_ERASE:
1185 case WIN_MULTWRITE_EXT:
1186 return IDE_DRIVE_TASK_RAW_WRITE;
1187 case WIN_SECURITY_DISABLE:
1188 case WIN_SECURITY_ERASE_UNIT:
1189 case WIN_SECURITY_SET_PASS:
1190 case WIN_SECURITY_UNLOCK:
1191 return IDE_DRIVE_TASK_OUT;
1193 args->tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS;
1194 args->tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS;
1195 switch(args->tfRegister[IDE_FEATURE_OFFSET]) {
1196 case SMART_READ_VALUES:
1197 case SMART_READ_THRESHOLDS:
1198 case SMART_READ_LOG_SECTOR:
1199 return IDE_DRIVE_TASK_IN;
1200 case SMART_WRITE_LOG_SECTOR:
1201 return IDE_DRIVE_TASK_OUT;
1203 return IDE_DRIVE_TASK_NO_DATA;
1205 #ifdef CONFIG_BLK_DEV_IDEDMA
1207 // case WIN_READDMA_ONCE:
1208 case WIN_IDENTIFY_DMA:
1209 case WIN_READDMA_QUEUED:
1210 case WIN_READDMA_EXT:
1211 case WIN_READDMA_QUEUED_EXT:
1212 return IDE_DRIVE_TASK_IN;
1214 // case WIN_WRITEDMA_ONCE:
1215 case WIN_WRITEDMA_QUEUED:
1216 case WIN_WRITEDMA_EXT:
1217 case WIN_WRITEDMA_QUEUED_EXT:
1218 return IDE_DRIVE_TASK_RAW_WRITE;
1220 case WIN_SETFEATURES:
1221 switch(args->tfRegister[IDE_FEATURE_OFFSET]) {
1222 case SETFEATURES_EN_8BIT:
1223 case SETFEATURES_EN_WCACHE:
1224 return IDE_DRIVE_TASK_NO_DATA;
1225 case SETFEATURES_XFER:
1226 return IDE_DRIVE_TASK_SET_XFER;
1227 case SETFEATURES_DIS_DEFECT:
1228 case SETFEATURES_EN_APM:
1229 case SETFEATURES_DIS_MSN:
1230 case SETFEATURES_DIS_RETRY:
1231 case SETFEATURES_EN_AAM:
1232 case SETFEATURES_RW_LONG:
1233 case SETFEATURES_SET_CACHE:
1234 case SETFEATURES_DIS_RLA:
1235 case SETFEATURES_EN_RI:
1236 case SETFEATURES_EN_SI:
1237 case SETFEATURES_DIS_RPOD:
1238 case SETFEATURES_DIS_WCACHE:
1239 case SETFEATURES_EN_DEFECT:
1240 case SETFEATURES_DIS_APM:
1241 case SETFEATURES_EN_ECC:
1242 case SETFEATURES_EN_MSN:
1243 case SETFEATURES_EN_RETRY:
1244 case SETFEATURES_EN_RLA:
1245 case SETFEATURES_PREFETCH:
1246 case SETFEATURES_4B_RW_LONG:
1247 case SETFEATURES_DIS_AAM:
1248 case SETFEATURES_EN_RPOD:
1249 case SETFEATURES_DIS_RI:
1250 case SETFEATURES_DIS_SI:
1252 return IDE_DRIVE_TASK_NO_DATA;
1255 case CFA_REQ_EXT_ERROR_CODE:
1256 case CFA_ERASE_SECTORS:
1258 // case WIN_VERIFY_ONCE:
1259 case WIN_VERIFY_EXT:
1264 case WIN_FLUSH_CACHE:
1265 case WIN_FLUSH_CACHE_EXT:
1266 case WIN_STANDBYNOW1:
1267 case WIN_STANDBYNOW2:
1271 case DISABLE_SEAGATE:
1272 case WIN_CHECKPOWERMODE1:
1273 case WIN_CHECKPOWERMODE2:
1274 case WIN_GETMEDIASTATUS:
1275 case WIN_MEDIAEJECT:
1277 case WIN_READ_NATIVE_MAX:
1279 case WIN_READ_NATIVE_MAX_EXT:
1280 case WIN_SET_MAX_EXT:
1281 case WIN_SECURITY_ERASE_PREPARE:
1282 case WIN_SECURITY_FREEZE_LOCK:
1283 case EXABYTE_ENABLE_NEST:
1285 case WIN_DOORUNLOCK:
1286 return IDE_DRIVE_TASK_NO_DATA;
1289 case WIN_DEVICE_RESET:
1290 case WIN_QUEUED_SERVICE:
1293 return IDE_DRIVE_TASK_INVALID;
1297 EXPORT_SYMBOL(ide_cmd_type_parser);
1300 * This function is intended to be used prior to invoking ide_do_drive_cmd().
1302 void ide_init_drive_taskfile (struct request *rq)
1304 memset(rq, 0, sizeof(*rq));
1305 rq->cmd = IDE_DRIVE_TASK_NO_DATA;
1308 EXPORT_SYMBOL(ide_init_drive_taskfile);
1312 int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
1316 ide_init_drive_taskfile(&rq);
1317 rq.cmd = IDE_DRIVE_TASKFILE;
1321 * (ks) We transfer currently only whole sectors.
1322 * This is suffient for now. But, it would be great,
1323 * if we would find a solution to transfer any size.
1324 * To support special commands like READ LONG.
1326 if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
1328 rq.current_nr_sectors = rq.nr_sectors = (args->hobRegister[IDE_NSECTOR_OFFSET_HOB] << 8) | args->tfRegister[IDE_NSECTOR_OFFSET];
1329 /* rq.hard_cur_sectors */
1331 rq.current_nr_sectors = rq.nr_sectors = data_size / SECTOR_SIZE;
1332 /* rq.hard_cur_sectors */
1335 if (args->tf_out_flags.all == 0) {
1337 * clean up kernel settings for driver sanity, regardless.
1338 * except for discrete diag services.
1340 args->posthandler = ide_post_handler_parser(
1341 (struct hd_drive_task_hdr *) args->tfRegister,
1342 (struct hd_drive_hob_hdr *) args->hobRegister);
1346 return ide_do_drive_cmd(drive, &rq, ide_wait);
1351 int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
1354 unsigned long flags;
1355 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1356 unsigned int major = HWIF(drive)->major;
1357 struct list_head *queue_head = &drive->queue.queue_head;
1358 DECLARE_COMPLETION(wait);
1360 if (HWIF(drive)->chipset == ide_pdc4030 && buf != NULL)
1361 return -ENOSYS; /* special drive cmds not supported */
1363 memset(rq, 0, sizeof(*rq));
1364 rq->cmd = IDE_DRIVE_TASKFILE;
1368 * (ks) We transfer currently only whole sectors.
1369 * This is suffient for now. But, it would be great,
1370 * if we would find a solution to transfer any size.
1371 * To support special commands like READ LONG.
1373 if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
1374 if (data_size == 0) {
1375 ata_nsector_t nsector;
1376 nsector.b.low = args->hobRegister[IDE_NSECTOR_OFFSET_HOB];
1377 nsector.b.high = args->tfRegister[IDE_NSECTOR_OFFSET];
1378 rq.nr_sectors = nsector.all;
1380 rq.nr_sectors = data_size / SECTOR_SIZE;
1382 rq.current_nr_sectors = rq.nr_sectors;
1383 // rq.hard_cur_sectors = rq.nr_sectors;
1386 if (args->tf_out_flags.all == 0) {
1388 * clean up kernel settings for driver sanity, regardless.
1389 * except for discrete diag services.
1391 args->posthandler = ide_post_handler_parser(
1392 (struct hd_drive_task_hdr *) args->tfRegister,
1393 (struct hd_drive_hob_hdr *) args->hobRegister);
1397 rq->rq_status = RQ_ACTIVE;
1398 rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
1399 rq->waiting = &wait;
1401 spin_lock_irqsave(&io_request_lock, flags);
1402 queue_head = queue_head->prev;
1403 list_add(&rq->queue, queue_head);
1404 ide_do_request(hwgroup, 0);
1405 spin_unlock_irqrestore(&io_request_lock, flags);
1407 wait_for_completion(&wait); /* wait for it to be serviced */
1408 return rq->errors ? -EIO : 0; /* return -EIO if errors */
1413 EXPORT_SYMBOL(ide_diag_taskfile);
1415 int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
1417 return ide_diag_taskfile(drive, args, 0, buf);
1420 EXPORT_SYMBOL(ide_raw_taskfile);
1422 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
1423 char * ide_ioctl_verbose (unsigned int cmd)
1428 char * ide_task_cmd_verbose (u8 task)
1432 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
1434 #define MAX_DMA (256*SECTOR_WORDS)
1436 ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *);
1437 ide_startstop_t flagged_task_no_data_intr(ide_drive_t *);
1438 ide_startstop_t flagged_task_in_intr(ide_drive_t *);
1439 ide_startstop_t flagged_task_mulin_intr(ide_drive_t *);
1440 ide_startstop_t flagged_pre_task_out_intr(ide_drive_t *, struct request *);
1441 ide_startstop_t flagged_task_out_intr(ide_drive_t *);
1442 ide_startstop_t flagged_pre_task_mulout_intr(ide_drive_t *, struct request *);
1443 ide_startstop_t flagged_task_mulout_intr(ide_drive_t *);
1445 int ide_taskfile_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1447 ide_task_request_t *req_task;
1451 task_ioreg_t *argsptr = args.tfRegister;
1452 task_ioreg_t *hobsptr = args.hobRegister;
1454 int tasksize = sizeof(struct ide_task_request_s);
1457 u8 io_32bit = drive->io_32bit;
1459 // printk("IDE Taskfile ...\n");
1461 req_task = kmalloc(tasksize, GFP_KERNEL);
1462 if (req_task == NULL) return -ENOMEM;
1463 memset(req_task, 0, tasksize);
1464 if (copy_from_user(req_task, (void *) arg, tasksize)) {
1469 taskout = (int) req_task->out_size;
1470 taskin = (int) req_task->in_size;
1473 int outtotal = tasksize;
1474 outbuf = kmalloc(taskout, GFP_KERNEL);
1475 if (outbuf == NULL) {
1479 memset(outbuf, 0, taskout);
1480 if (copy_from_user(outbuf, (void *)arg + outtotal, taskout)) {
1487 int intotal = tasksize + taskout;
1488 inbuf = kmalloc(taskin, GFP_KERNEL);
1489 if (inbuf == NULL) {
1493 memset(inbuf, 0, taskin);
1494 if (copy_from_user(inbuf, (void *)arg + intotal , taskin)) {
1500 memset(&args, 0, sizeof(ide_task_t));
1501 memcpy(argsptr, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
1502 memcpy(hobsptr, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE);
1504 args.tf_in_flags = req_task->in_flags;
1505 args.tf_out_flags = req_task->out_flags;
1506 args.data_phase = req_task->data_phase;
1507 args.command_type = req_task->req_cmd;
1509 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
1510 DTF("%s: ide_ioctl_cmd %s: ide_task_cmd %s\n",
1512 ide_ioctl_verbose(cmd),
1513 ide_task_cmd_verbose(args.tfRegister[IDE_COMMAND_OFFSET]));
1514 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
1516 drive->io_32bit = 0;
1517 switch(req_task->data_phase) {
1518 case TASKFILE_OUT_DMAQ:
1519 case TASKFILE_OUT_DMA:
1520 err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1522 case TASKFILE_IN_DMAQ:
1523 case TASKFILE_IN_DMA:
1524 err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1526 case TASKFILE_IN_OUT:
1528 args.prehandler = &pre_task_out_intr;
1529 args.handler = &task_out_intr;
1530 args.posthandler = NULL;
1531 err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1532 args.prehandler = NULL;
1533 args.handler = &task_in_intr;
1534 args.posthandler = NULL;
1535 err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1541 case TASKFILE_MULTI_OUT:
1542 if (!drive->mult_count) {
1543 /* (hs): give up if multcount is not set */
1544 printk(KERN_ERR "%s: %s Multimode Write " \
1545 "multcount is not set\n",
1546 drive->name, __FUNCTION__);
1550 if (args.tf_out_flags.all != 0) {
1551 args.prehandler = &flagged_pre_task_mulout_intr;
1552 args.handler = &flagged_task_mulout_intr;
1554 args.prehandler = &pre_task_mulout_intr;
1555 args.handler = &task_mulout_intr;
1557 err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1560 if (args.tf_out_flags.all != 0) {
1561 args.prehandler = &flagged_pre_task_out_intr;
1562 args.handler = &flagged_task_out_intr;
1564 args.prehandler = &pre_task_out_intr;
1565 args.handler = &task_out_intr;
1567 err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1569 case TASKFILE_MULTI_IN:
1570 if (!drive->mult_count) {
1571 /* (hs): give up if multcount is not set */
1572 printk(KERN_ERR "%s: %s Multimode Read failure " \
1573 "multcount is not set\n",
1574 drive->name, __FUNCTION__);
1578 if (args.tf_out_flags.all != 0) {
1579 args.handler = &flagged_task_mulin_intr;
1581 args.handler = &task_mulin_intr;
1583 err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1586 if (args.tf_out_flags.all != 0) {
1587 args.handler = &flagged_task_in_intr;
1589 args.handler = &task_in_intr;
1591 err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1593 case TASKFILE_NO_DATA:
1594 if (args.tf_out_flags.all != 0) {
1595 args.handler = &flagged_task_no_data_intr;
1597 args.handler = &task_no_data_intr;
1599 err = ide_diag_taskfile(drive, &args, 0, NULL);
1606 memcpy(req_task->io_ports, &(args.tfRegister), HDIO_DRIVE_TASK_HDR_SIZE);
1607 memcpy(req_task->hob_ports, &(args.hobRegister), HDIO_DRIVE_HOB_HDR_SIZE);
1608 req_task->in_flags = args.tf_in_flags;
1609 req_task->out_flags = args.tf_out_flags;
1611 if (copy_to_user((void *)arg, req_task, tasksize)) {
1616 int outtotal = tasksize;
1617 if (copy_to_user((void *)arg+outtotal, outbuf, taskout)) {
1623 int intotal = tasksize + taskout;
1624 if (copy_to_user((void *)arg+intotal, inbuf, taskin)) {
1636 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
1638 drive->io_32bit = io_32bit;
1643 EXPORT_SYMBOL(ide_taskfile_ioctl);
1645 int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
1652 memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
1653 ide_init_drive_cmd(&rq);
1659 return ide_do_drive_cmd(drive, &rq, ide_wait);
1662 EXPORT_SYMBOL(ide_wait_cmd);
1665 * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
1667 int ide_cmd_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1671 u8 args[4], *argbuf = args;
1676 if (NULL == (void *) arg) {
1678 ide_init_drive_cmd(&rq);
1679 return ide_do_drive_cmd(drive, &rq, ide_wait);
1682 if (copy_from_user(args, (void *)arg, 4))
1685 memset(&tfargs, 0, sizeof(ide_task_t));
1686 tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
1687 tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
1688 tfargs.tfRegister[IDE_SECTOR_OFFSET] = args[1];
1689 tfargs.tfRegister[IDE_LCYL_OFFSET] = 0x00;
1690 tfargs.tfRegister[IDE_HCYL_OFFSET] = 0x00;
1691 tfargs.tfRegister[IDE_SELECT_OFFSET] = 0x00;
1692 tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
1695 argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
1696 argbuf = kmalloc(argsize, GFP_KERNEL);
1699 memcpy(argbuf, args, 4);
1701 if (set_transfer(drive, &tfargs)) {
1702 xfer_rate = args[1];
1703 if (ide_ata66_check(drive, &tfargs))
1707 err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
1709 if (!err && xfer_rate) {
1710 /* active-retuning-calls future */
1711 ide_set_xfer_rate(drive, xfer_rate);
1712 ide_driveid_update(drive);
1715 if (copy_to_user((void *)arg, argbuf, argsize))
1724 u8 args[4], *argbuf = args;
1729 if (NULL == (void *) arg) {
1731 ide_init_drive_cmd(&rq);
1732 return ide_do_drive_cmd(drive, &rq, ide_wait);
1735 if (copy_from_user(args, (void *)arg, 4))
1738 memset(&tfargs, 0, sizeof(ide_task_t));
1739 tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
1740 tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
1741 tfargs.tfRegister[IDE_SECTOR_OFFSET] = args[1];
1742 tfargs.tfRegister[IDE_LCYL_OFFSET] = 0x00;
1743 tfargs.tfRegister[IDE_HCYL_OFFSET] = 0x00;
1744 tfargs.tfRegister[IDE_SELECT_OFFSET] = 0x00;
1745 tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
1748 argsize = (SECTOR_WORDS * 4 * args[3]);
1749 argbuf = kmalloc(argsize, GFP_KERNEL);
1754 if (set_transfer(drive, &tfargs)) {
1755 xfer_rate = args[1];
1756 if (ide_ata66_check(drive, &tfargs))
1760 tfargs.command_type = ide_cmd_type_parser(&tfargs);
1761 err = ide_raw_taskfile(drive, &tfargs, argbuf);
1763 if (!err && xfer_rate) {
1764 /* active-retuning-calls future */
1765 ide_set_xfer_rate(drive, xfer_rate);
1766 ide_driveid_update(drive);
1769 args[0] = tfargs.tfRegister[IDE_COMMAND_OFFSET];
1770 args[1] = tfargs.tfRegister[IDE_FEATURE_OFFSET];
1771 args[2] = tfargs.tfRegister[IDE_NSECTOR_OFFSET];
1774 if (copy_to_user((void *)arg, argbuf, 4))
1776 if (argbuf != NULL) {
1777 if (copy_to_user((void *)arg, argbuf + 4, argsize))
1786 EXPORT_SYMBOL(ide_cmd_ioctl);
1788 int ide_wait_cmd_task (ide_drive_t *drive, u8 *buf)
1792 ide_init_drive_cmd(&rq);
1793 rq.cmd = IDE_DRIVE_TASK;
1795 return ide_do_drive_cmd(drive, &rq, ide_wait);
1798 EXPORT_SYMBOL(ide_wait_cmd_task);
1801 * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
1803 int ide_task_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1806 u8 args[7], *argbuf = args;
1809 if (copy_from_user(args, (void *)arg, 7))
1811 err = ide_wait_cmd_task(drive, argbuf);
1812 if (copy_to_user((void *)arg, argbuf, argsize))
1817 EXPORT_SYMBOL(ide_task_ioctl);
1820 * NOTICE: This is additions from IBM to provide a discrete interface,
1821 * for selective taskregister access operations. Nice JOB Klaus!!!
1822 * Glad to be able to work and co-develop this with you and IBM.
1824 ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
1826 ide_hwif_t *hwif = HWIF(drive);
1827 task_struct_t *taskfile = (task_struct_t *) task->tfRegister;
1828 hob_struct_t *hobfile = (hob_struct_t *) task->hobRegister;
1834 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
1835 void debug_taskfile(drive, task);
1836 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
1839 * (ks) Check taskfile in/out flags.
1840 * If set, then execute as it is defined.
1841 * If not set, then define default settings.
1842 * The default values are:
1843 * write and read all taskfile registers (except data)
1844 * write and read the hob registers (sector,nsector,lcyl,hcyl)
1846 if (task->tf_out_flags.all == 0) {
1847 task->tf_out_flags.all = IDE_TASKFILE_STD_OUT_FLAGS;
1848 if (drive->addressing == 1)
1849 task->tf_out_flags.all |= (IDE_HOB_STD_OUT_FLAGS << 8);
1852 if (task->tf_in_flags.all == 0) {
1853 task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
1854 if (drive->addressing == 1)
1855 task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
1858 /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
1859 if (IDE_CONTROL_REG)
1861 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
1862 SELECT_MASK(drive, 0);
1865 status = hwif->INB(IDE_STATUS_REG);
1866 if (status & 0x80) {
1867 printk("flagged_taskfile -> Bad status. Status = %02x. wait 100 usec ...\n", status);
1869 status = hwif->INB(IDE_STATUS_REG);
1870 printk("flagged_taskfile -> Status = %02x\n", status);
1874 if (task->tf_out_flags.b.data) {
1875 u16 data = taskfile->data + (hobfile->data << 8);
1876 hwif->OUTW(data, IDE_DATA_REG);
1879 /* (ks) send hob registers first */
1880 if (task->tf_out_flags.b.nsector_hob)
1881 hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
1882 if (task->tf_out_flags.b.sector_hob)
1883 hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
1884 if (task->tf_out_flags.b.lcyl_hob)
1885 hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
1886 if (task->tf_out_flags.b.hcyl_hob)
1887 hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
1889 /* (ks) Send now the standard registers */
1890 if (task->tf_out_flags.b.error_feature)
1891 hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
1892 /* refers to number of sectors to transfer */
1893 if (task->tf_out_flags.b.nsector)
1894 hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
1895 /* refers to sector offset or start sector */
1896 if (task->tf_out_flags.b.sector)
1897 hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
1898 if (task->tf_out_flags.b.lcyl)
1899 hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
1900 if (task->tf_out_flags.b.hcyl)
1901 hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
1904 * (ks) In the flagged taskfile approch, we will used all specified
1905 * registers and the register value will not be changed. Except the
1906 * select bit (master/slave) in the drive_head register. We must make
1907 * sure that the desired drive is selected.
1909 hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG);
1910 switch(task->data_phase) {
1912 case TASKFILE_OUT_DMAQ:
1913 case TASKFILE_OUT_DMA:
1914 hwif->ide_dma_write(drive);
1917 case TASKFILE_IN_DMAQ:
1918 case TASKFILE_IN_DMA:
1919 hwif->ide_dma_read(drive);
1923 if (task->handler == NULL)
1926 /* Issue the command */
1927 ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
1928 if (task->prehandler != NULL)
1929 return task->prehandler(drive, HWGROUP(drive)->rq);
1935 EXPORT_SYMBOL(flagged_taskfile);
1937 ide_startstop_t flagged_task_no_data_intr (ide_drive_t *drive)
1939 ide_hwif_t *hwif = HWIF(drive);
1944 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT)) {
1945 if (stat & ERR_STAT) {
1946 return DRIVER(drive)->error(drive, "flagged_task_no_data_intr", stat);
1949 * (ks) Unexpected ATA data phase detected.
1950 * This should not happen. But, it can !
1951 * I am not sure, which function is best to clean up
1952 * this situation. I choose: ide_error(...)
1954 return DRIVER(drive)->error(drive, "flagged_task_no_data_intr (unexpected phase)", stat);
1957 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
1963 * Handler for command with PIO data-in phase
1965 ide_startstop_t flagged_task_in_intr (ide_drive_t *drive)
1967 ide_hwif_t *hwif = HWIF(drive);
1968 u8 stat = hwif->INB(IDE_STATUS_REG);
1969 struct request *rq = HWGROUP(drive)->rq;
1973 if (rq->current_nr_sectors == 0)
1974 return DRIVER(drive)->error(drive, "flagged_task_in_intr (no data requested)", stat);
1976 if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
1977 if (stat & ERR_STAT) {
1978 return DRIVER(drive)->error(drive, "flagged_task_in_intr", stat);
1981 * (ks) Unexpected ATA data phase detected.
1982 * This should not happen. But, it can !
1983 * I am not sure, which function is best to clean up
1984 * this situation. I choose: ide_error(...)
1986 return DRIVER(drive)->error(drive, "flagged_task_in_intr (unexpected data phase)", stat);
1989 pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
1990 DTF("Read - rq->current_nr_sectors: %d, status: %02x\n", (int) rq->current_nr_sectors, stat);
1992 taskfile_input_data(drive, pBuf, SECTOR_WORDS);
1994 if (--rq->current_nr_sectors != 0) {
1996 * (ks) We don't know which command was executed.
1997 * So, we wait the 'WORSTCASE' value.
1999 ide_set_handler(drive, &flagged_task_in_intr, WAIT_WORSTCASE, NULL);
2003 * (ks) Last sector was transfered, wait until drive is ready.
2004 * This can take up to 10 usec. We willl wait max 50 us.
2006 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
2008 ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2013 ide_startstop_t flagged_task_mulin_intr (ide_drive_t *drive)
2015 ide_hwif_t *hwif = HWIF(drive);
2016 u8 stat = hwif->INB(IDE_STATUS_REG);
2017 struct request *rq = HWGROUP(drive)->rq;
2020 unsigned int msect, nsect;
2022 if (rq->current_nr_sectors == 0)
2023 return DRIVER(drive)->error(drive, "flagged_task_mulin_intr (no data requested)", stat);
2025 msect = drive->mult_count;
2027 return DRIVER(drive)->error(drive, "flagged_task_mulin_intr (multimode not set)", stat);
2029 if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
2030 if (stat & ERR_STAT) {
2031 return DRIVER(drive)->error(drive, "flagged_task_mulin_intr", stat);
2034 * (ks) Unexpected ATA data phase detected.
2035 * This should not happen. But, it can !
2036 * I am not sure, which function is best to clean up
2037 * this situation. I choose: ide_error(...)
2039 return DRIVER(drive)->error(drive, "flagged_task_mulin_intr (unexpected data phase)", stat);
2042 nsect = (rq->current_nr_sectors > msect) ? msect : rq->current_nr_sectors;
2043 pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2045 DTF("Multiread: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
2046 pBuf, nsect, rq->current_nr_sectors);
2048 taskfile_input_data(drive, pBuf, nsect * SECTOR_WORDS);
2050 rq->current_nr_sectors -= nsect;
2051 if (rq->current_nr_sectors != 0) {
2053 * (ks) We don't know which command was executed.
2054 * So, we wait the 'WORSTCASE' value.
2056 ide_set_handler(drive, &flagged_task_mulin_intr, WAIT_WORSTCASE, NULL);
2061 * (ks) Last sector was transfered, wait until drive is ready.
2062 * This can take up to 10 usec. We willl wait max 50 us.
2064 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
2066 ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2072 * Pre handler for command with PIO data-out phase
2074 ide_startstop_t flagged_pre_task_out_intr (ide_drive_t *drive, struct request *rq)
2076 ide_hwif_t *hwif = HWIF(drive);
2077 u8 stat = hwif->INB(IDE_STATUS_REG);
2078 ide_startstop_t startstop;
2080 if (!rq->current_nr_sectors) {
2081 return DRIVER(drive)->error(drive, "flagged_pre_task_out_intr (write data not specified)", stat);
2084 if (ide_wait_stat(&startstop, drive, DATA_READY,
2085 BAD_W_STAT, WAIT_DRQ)) {
2086 printk(KERN_ERR "%s: No DRQ bit after issuing write command.\n", drive->name);
2090 taskfile_output_data(drive, rq->buffer, SECTOR_WORDS);
2091 --rq->current_nr_sectors;
2096 ide_startstop_t flagged_task_out_intr (ide_drive_t *drive)
2098 ide_hwif_t *hwif = HWIF(drive);
2099 u8 stat = hwif->INB(IDE_STATUS_REG);
2100 struct request *rq = HWGROUP(drive)->rq;
2103 if (!OK_STAT(stat, DRIVE_READY, BAD_W_STAT))
2104 return DRIVER(drive)->error(drive, "flagged_task_out_intr", stat);
2106 if (!rq->current_nr_sectors) {
2107 ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2111 if (!OK_STAT(stat, DATA_READY, BAD_W_STAT)) {
2113 * (ks) Unexpected ATA data phase detected.
2114 * This should not happen. But, it can !
2115 * I am not sure, which function is best to clean up
2116 * this situation. I choose: ide_error(...)
2118 return DRIVER(drive)->error(drive, "flagged_task_out_intr (unexpected data phase)", stat);
2121 pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2122 DTF("Write - rq->current_nr_sectors: %d, status: %02x\n",
2123 (int) rq->current_nr_sectors, stat);
2125 taskfile_output_data(drive, pBuf, SECTOR_WORDS);
2126 --rq->current_nr_sectors;
2129 * (ks) We don't know which command was executed.
2130 * So, we wait the 'WORSTCASE' value.
2132 ide_set_handler(drive, &flagged_task_out_intr, WAIT_WORSTCASE, NULL);
2137 ide_startstop_t flagged_pre_task_mulout_intr (ide_drive_t *drive, struct request *rq)
2139 ide_hwif_t *hwif = HWIF(drive);
2140 u8 stat = hwif->INB(IDE_STATUS_REG);
2142 ide_startstop_t startstop;
2143 unsigned int msect, nsect;
2145 if (!rq->current_nr_sectors)
2146 return DRIVER(drive)->error(drive, "flagged_pre_task_mulout_intr (write data not specified)", stat);
2148 msect = drive->mult_count;
2150 return DRIVER(drive)->error(drive, "flagged_pre_task_mulout_intr (multimode not set)", stat);
2152 if (ide_wait_stat(&startstop, drive, DATA_READY,
2153 BAD_W_STAT, WAIT_DRQ)) {
2154 printk(KERN_ERR "%s: No DRQ bit after issuing write command.\n", drive->name);
2158 nsect = (rq->current_nr_sectors > msect) ? msect : rq->current_nr_sectors;
2159 pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2160 DTF("Multiwrite: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
2161 pBuf, nsect, rq->current_nr_sectors);
2163 taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
2165 rq->current_nr_sectors -= nsect;
2170 ide_startstop_t flagged_task_mulout_intr (ide_drive_t *drive)
2172 ide_hwif_t *hwif = HWIF(drive);
2173 u8 stat = hwif->INB(IDE_STATUS_REG);
2174 struct request *rq = HWGROUP(drive)->rq;
2176 unsigned int msect, nsect;
2178 msect = drive->mult_count;
2180 return DRIVER(drive)->error(drive, "flagged_task_mulout_intr (multimode not set)", stat);
2182 if (!OK_STAT(stat, DRIVE_READY, BAD_W_STAT))
2183 return DRIVER(drive)->error(drive, "flagged_task_mulout_intr", stat);
2185 if (!rq->current_nr_sectors) {
2186 ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2190 if (!OK_STAT(stat, DATA_READY, BAD_W_STAT)) {
2192 * (ks) Unexpected ATA data phase detected.
2193 * This should not happen. But, it can !
2194 * I am not sure, which function is best to clean up
2195 * this situation. I choose: ide_error(...)
2197 return DRIVER(drive)->error(drive, "flagged_task_mulout_intr (unexpected data phase)", stat);
2200 nsect = (rq->current_nr_sectors > msect) ? msect : rq->current_nr_sectors;
2201 pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2202 DTF("Multiwrite: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
2203 pBuf, nsect, rq->current_nr_sectors);
2205 taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
2206 rq->current_nr_sectors -= nsect;
2209 * (ks) We don't know which command was executed.
2210 * So, we wait the 'WORSTCASE' value.
2212 ide_set_handler(drive, &flagged_task_mulout_intr, WAIT_WORSTCASE, NULL);
2218 * Beginning of Taskfile OPCODE Library and feature sets.
2221 #ifdef CONFIG_PKT_TASK_IOCTL
2223 int pkt_taskfile_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2226 switch(req_task->data_phase) {
2227 case TASKFILE_P_OUT_DMAQ:
2228 case TASKFILE_P_IN_DMAQ:
2229 case TASKFILE_P_OUT_DMA:
2230 case TASKFILE_P_IN_DMA:
2231 case TASKFILE_P_OUT:
2238 EXPORT_SYMBOL(pkt_taskfile_ioctl);
2240 #endif /* CONFIG_PKT_TASK_IOCTL */