2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
7 * 2_by_8 routines added by Simon Munton
11 * $Id: cfi_cmdset_0002.c,v 1.62 2003/01/24 23:30:13 dwmw2 Exp $
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
20 #include <asm/byteorder.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/mtd/map.h>
27 #include <linux/mtd/cfi.h>
29 #define AMD_BOOTLOC_BUG
31 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
32 static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
33 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
34 static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
35 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
36 static void cfi_amdstd_sync (struct mtd_info *);
37 static int cfi_amdstd_suspend (struct mtd_info *);
38 static void cfi_amdstd_resume (struct mtd_info *);
39 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static void cfi_amdstd_destroy(struct mtd_info *);
43 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
44 static struct mtd_info *cfi_amdstd_setup (struct map_info *);
47 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
48 probe: NULL, /* Not usable directly */
49 destroy: cfi_amdstd_destroy,
50 name: "cfi_cmdset_0002",
54 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
56 struct cfi_private *cfi = map->fldrv_priv;
57 unsigned char bootloc;
58 int ofs_factor = cfi->interleave * cfi->device_type;
61 __u32 base = cfi->chips[0].start;
63 if (cfi->cfi_mode==CFI_MODE_CFI){
64 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
66 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
68 major = cfi_read_query(map, base + (adr+3)*ofs_factor);
69 minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
71 printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
73 cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
75 cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
76 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
77 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
78 cfi->mfr = cfi_read_query(map, base);
79 cfi->id = cfi_read_query(map, base + ofs_factor);
81 /* Wheee. Bring me the head of someone at AMD. */
82 #ifdef AMD_BOOTLOC_BUG
83 if (((major << 8) | minor) < 0x3131) {
84 /* CFI version 1.0 => don't trust bootloc */
86 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
87 bootloc = 3; /* top boot */
89 bootloc = 2; /* bottom boot */
94 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
95 bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
97 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
98 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
100 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
101 int j = (cfi->cfiq->NumEraseRegions-1)-i;
104 swap = cfi->cfiq->EraseRegionInfo[i];
105 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
106 cfi->cfiq->EraseRegionInfo[j] = swap;
109 switch (cfi->device_type) {
110 case CFI_DEVICETYPE_X8:
111 cfi->addr_unlock1 = 0x555;
112 cfi->addr_unlock2 = 0x2aa;
114 case CFI_DEVICETYPE_X16:
115 cfi->addr_unlock1 = 0xaaa;
116 if (map->buswidth == cfi->interleave) {
117 /* X16 chip(s) in X8 mode */
118 cfi->addr_unlock2 = 0x555;
120 cfi->addr_unlock2 = 0x554;
123 case CFI_DEVICETYPE_X32:
124 cfi->addr_unlock1 = 0x1555;
125 cfi->addr_unlock2 = 0xaaa;
128 printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
133 for (i=0; i< cfi->numchips; i++) {
134 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
135 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
136 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
139 map->fldrv = &cfi_amdstd_chipdrv;
141 cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
142 return cfi_amdstd_setup(map);
145 static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
147 struct cfi_private *cfi = map->fldrv_priv;
148 struct mtd_info *mtd;
149 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
151 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
152 printk(KERN_NOTICE "number of %s chips: %d\n",
153 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
156 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
160 memset(mtd, 0, sizeof(*mtd));
162 mtd->type = MTD_NORFLASH;
163 /* Also select the correct geometry setup too */
164 mtd->size = devsize * cfi->numchips;
166 if (cfi->cfiq->NumEraseRegions == 1) {
167 /* No need to muck about with multiple erase sizes */
168 mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
170 unsigned long offset = 0;
173 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
174 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
175 if (!mtd->eraseregions) {
176 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
180 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
181 unsigned long ernum, ersize;
182 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
183 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
185 if (mtd->erasesize < ersize) {
186 mtd->erasesize = ersize;
188 for (j=0; j<cfi->numchips; j++) {
189 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
190 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
191 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
193 offset += (ersize * ernum);
195 if (offset != devsize) {
197 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
202 for (i=0; i<mtd->numeraseregions;i++){
203 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
204 i,mtd->eraseregions[i].offset,
205 mtd->eraseregions[i].erasesize,
206 mtd->eraseregions[i].numblocks);
211 switch (CFIDEV_BUSWIDTH)
217 if (mtd->numeraseregions > 1)
218 mtd->erase = cfi_amdstd_erase_varsize;
221 if (((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1)
222 mtd->erase = cfi_amdstd_erase_chip;
224 mtd->erase = cfi_amdstd_erase_onesize;
225 mtd->read = cfi_amdstd_read;
226 mtd->write = cfi_amdstd_write;
230 printk(KERN_WARNING "Unsupported buswidth\n");
234 if (cfi->fast_prog) {
235 /* In cfi_amdstd_write() we frob the protection stuff
236 without paying any attention to the state machine.
237 This upsets in-progress erases. So we turn this flag
238 off for now till the code gets fixed. */
239 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling fast programming due to code brokenness.\n");
244 /* does this chip have a secsi area? */
255 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
256 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
263 mtd->sync = cfi_amdstd_sync;
264 mtd->suspend = cfi_amdstd_suspend;
265 mtd->resume = cfi_amdstd_resume;
266 mtd->flags = MTD_CAP_NORFLASH;
267 map->fldrv = &cfi_amdstd_chipdrv;
268 mtd->name = map->name;
274 if(mtd->eraseregions)
275 kfree(mtd->eraseregions);
278 kfree(cfi->cmdset_priv);
283 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
285 DECLARE_WAITQUEUE(wait, current);
286 unsigned long timeo = jiffies + HZ;
289 cfi_spin_lock(chip->mutex);
291 if (chip->state != FL_READY){
293 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
295 set_current_state(TASK_UNINTERRUPTIBLE);
296 add_wait_queue(&chip->wq, &wait);
298 cfi_spin_unlock(chip->mutex);
301 remove_wait_queue(&chip->wq, &wait);
303 if(signal_pending(current))
306 timeo = jiffies + HZ;
313 chip->state = FL_READY;
315 map->copy_from(map, buf, adr, len);
318 cfi_spin_unlock(chip->mutex);
323 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
325 struct map_info *map = mtd->priv;
326 struct cfi_private *cfi = map->fldrv_priv;
331 /* ofs: offset within the first chip that the first read should start */
333 chipnum = (from >> cfi->chipshift);
334 ofs = from - (chipnum << cfi->chipshift);
340 unsigned long thislen;
342 if (chipnum >= cfi->numchips)
345 if ((len + ofs -1) >> cfi->chipshift)
346 thislen = (1<<cfi->chipshift) - ofs;
350 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
364 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
366 DECLARE_WAITQUEUE(wait, current);
367 unsigned long timeo = jiffies + HZ;
368 struct cfi_private *cfi = map->fldrv_priv;
371 cfi_spin_lock(chip->mutex);
373 if (chip->state != FL_READY){
375 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
377 set_current_state(TASK_UNINTERRUPTIBLE);
378 add_wait_queue(&chip->wq, &wait);
380 cfi_spin_unlock(chip->mutex);
383 remove_wait_queue(&chip->wq, &wait);
385 if(signal_pending(current))
388 timeo = jiffies + HZ;
395 chip->state = FL_READY;
397 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
398 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
399 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
401 map->copy_from(map, buf, adr, len);
403 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
404 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
405 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
406 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
409 cfi_spin_unlock(chip->mutex);
414 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
416 struct map_info *map = mtd->priv;
417 struct cfi_private *cfi = map->fldrv_priv;
423 /* ofs: offset within the first chip that the first read should start */
425 /* 8 secsi bytes per chip */
433 unsigned long thislen;
435 if (chipnum >= cfi->numchips)
438 if ((len + ofs -1) >> 3)
439 thislen = (1<<3) - ofs;
443 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
457 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
459 unsigned long timeo = jiffies + HZ;
460 unsigned int oldstatus, status;
461 unsigned int dq6, dq5;
462 struct cfi_private *cfi = map->fldrv_priv;
463 DECLARE_WAITQUEUE(wait, current);
466 cfi_spin_lock(chip->mutex);
468 if (chip->state != FL_READY) {
470 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state);
472 set_current_state(TASK_UNINTERRUPTIBLE);
473 add_wait_queue(&chip->wq, &wait);
475 cfi_spin_unlock(chip->mutex);
478 remove_wait_queue(&chip->wq, &wait);
480 printk(KERN_DEBUG "Wake up to write:\n");
481 if(signal_pending(current))
484 timeo = jiffies + HZ;
489 chip->state = FL_WRITING;
493 if (fast) { /* Unlock bypass */
494 cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
497 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
498 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
499 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
502 cfi_write(map, datum, adr);
504 cfi_spin_unlock(chip->mutex);
505 cfi_udelay(chip->word_write_time);
506 cfi_spin_lock(chip->mutex);
508 /* Polling toggle bits instead of reading back many times
509 This ensures that write operation is really completed,
510 or tells us why it failed. */
513 timeo = jiffies + (HZ/1000) + 1; /* setting timeout to 1ms for now */
515 oldstatus = cfi_read(map, adr);
516 status = cfi_read(map, adr);
518 while( (status & dq6) != (oldstatus & dq6) &&
519 (status & dq5) != dq5 &&
520 !time_after(jiffies, timeo) ) {
522 if (need_resched()) {
523 cfi_spin_unlock(chip->mutex);
525 cfi_spin_lock(chip->mutex);
529 oldstatus = cfi_read( map, adr );
530 status = cfi_read( map, adr );
533 if( (status & dq6) != (oldstatus & dq6) ) {
534 /* The erasing didn't stop?? */
535 if( (status & dq5) == dq5 ) {
536 /* When DQ5 raises, we must check once again
537 if DQ6 is toggling. If not, the erase has been
538 completed OK. If not, reset chip. */
539 oldstatus = cfi_read(map, adr);
540 status = cfi_read(map, adr);
542 if ( (oldstatus & 0x00FF) == (status & 0x00FF) ) {
543 printk(KERN_WARNING "Warning: DQ5 raised while program operation was in progress, however operation completed OK\n" );
545 /* DQ5 is active so we can do a reset and stop the erase */
546 cfi_write(map, CMD(0xF0), chip->start);
547 printk(KERN_WARNING "Internal flash device timeout occurred or write operation was performed while flash was programming.\n" );
550 printk(KERN_WARNING "Waiting for write to complete timed out in do_write_oneword.\n");
552 chip->state = FL_READY;
554 cfi_spin_unlock(chip->mutex);
561 chip->state = FL_READY;
563 cfi_spin_unlock(chip->mutex);
568 static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
570 struct map_info *map = mtd->priv;
571 struct cfi_private *cfi = map->fldrv_priv;
574 unsigned long ofs, chipstart;
580 chipnum = to >> cfi->chipshift;
581 ofs = to - (chipnum << cfi->chipshift);
582 chipstart = cfi->chips[chipnum].start;
584 /* If it's not bus-aligned, do the first byte write */
585 if (ofs & (CFIDEV_BUSWIDTH-1)) {
586 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
587 int i = ofs - bus_ofs;
592 map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
593 while (len && i < CFIDEV_BUSWIDTH)
594 tmp_buf[i++] = buf[n++], len--;
596 if (cfi_buswidth_is_2()) {
597 datum = *(__u16*)tmp_buf;
598 } else if (cfi_buswidth_is_4()) {
599 datum = *(__u32*)tmp_buf;
601 return -EINVAL; /* should never happen, but be safe */
604 ret = do_write_oneword(map, &cfi->chips[chipnum],
613 if (ofs >> cfi->chipshift) {
616 if (chipnum == cfi->numchips)
621 if (cfi->fast_prog) {
622 /* Go into unlock bypass mode */
623 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
624 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
625 cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
628 /* We are now aligned, write as much as possible */
629 while(len >= CFIDEV_BUSWIDTH) {
632 if (cfi_buswidth_is_1()) {
634 } else if (cfi_buswidth_is_2()) {
635 datum = *(__u16*)buf;
636 } else if (cfi_buswidth_is_4()) {
637 datum = *(__u32*)buf;
641 ret = do_write_oneword(map, &cfi->chips[chipnum],
642 ofs, datum, cfi->fast_prog);
645 /* Get out of unlock bypass mode */
646 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
647 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
652 ofs += CFIDEV_BUSWIDTH;
653 buf += CFIDEV_BUSWIDTH;
654 (*retlen) += CFIDEV_BUSWIDTH;
655 len -= CFIDEV_BUSWIDTH;
657 if (ofs >> cfi->chipshift) {
659 /* Get out of unlock bypass mode */
660 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
661 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
666 if (chipnum == cfi->numchips)
668 chipstart = cfi->chips[chipnum].start;
670 /* Go into unlock bypass mode for next set of chips */
671 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
672 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
673 cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
679 /* Get out of unlock bypass mode */
680 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
681 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
684 /* Write the trailing bytes if any */
685 if (len & (CFIDEV_BUSWIDTH-1)) {
690 map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
692 tmp_buf[i++] = buf[n++];
694 if (cfi_buswidth_is_2()) {
695 datum = *(__u16*)tmp_buf;
696 } else if (cfi_buswidth_is_4()) {
697 datum = *(__u32*)tmp_buf;
699 return -EINVAL; /* should never happen, but be safe */
702 ret = do_write_oneword(map, &cfi->chips[chipnum],
713 static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
715 unsigned int oldstatus, status;
716 unsigned int dq6, dq5;
717 unsigned long timeo = jiffies + HZ;
719 struct cfi_private *cfi = map->fldrv_priv;
720 DECLARE_WAITQUEUE(wait, current);
723 cfi_spin_lock(chip->mutex);
725 if (chip->state != FL_READY){
726 set_current_state(TASK_UNINTERRUPTIBLE);
727 add_wait_queue(&chip->wq, &wait);
729 cfi_spin_unlock(chip->mutex);
732 remove_wait_queue(&chip->wq, &wait);
734 if(signal_pending(current))
737 timeo = jiffies + HZ;
742 chip->state = FL_ERASING;
744 /* Handle devices with one erase region, that only implement
745 * the chip erase command.
748 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
749 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
750 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
751 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
752 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
753 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
754 timeo = jiffies + (HZ*20);
755 adr = cfi->addr_unlock1;
757 /* Wait for the end of programing/erasure by using the toggle method.
758 * As long as there is a programming procedure going on, bit 6 of the last
759 * written byte is toggling it's state with each consectuve read.
760 * The toggling stops as soon as the procedure is completed.
762 * If the process has gone on for too long on the chip bit 5 gets.
763 * After bit5 is set you can kill the operation by sending a reset
764 * command to the chip.
769 oldstatus = cfi_read(map, adr);
770 status = cfi_read(map, adr);
771 while( ((status & dq6) != (oldstatus & dq6)) &&
772 ((status & dq5) != dq5) &&
773 !time_after(jiffies, timeo)) {
776 /* an initial short sleep */
777 cfi_spin_unlock(chip->mutex);
778 schedule_timeout(HZ/100);
779 cfi_spin_lock(chip->mutex);
781 if (chip->state != FL_ERASING) {
782 /* Someone's suspended the erase. Sleep */
783 set_current_state(TASK_UNINTERRUPTIBLE);
784 add_wait_queue(&chip->wq, &wait);
786 cfi_spin_unlock(chip->mutex);
787 printk("erase suspended. Sleeping\n");
790 remove_wait_queue(&chip->wq, &wait);
792 if (signal_pending(current))
795 timeo = jiffies + (HZ*2); /* FIXME */
796 cfi_spin_lock(chip->mutex);
800 /* Busy wait for 1/10 of a milisecond */
803 ((status & dq6) != (oldstatus & dq6)) &&
804 ((status & dq5) != dq5);
807 /* Latency issues. Drop the lock, wait a while and retry */
808 cfi_spin_unlock(chip->mutex);
812 cfi_spin_lock(chip->mutex);
813 oldstatus = cfi_read(map, adr);
814 status = cfi_read(map, adr);
816 oldstatus = cfi_read(map, adr);
817 status = cfi_read(map, adr);
819 if ((status & dq6) != (oldstatus & dq6)) {
820 /* The erasing didn't stop?? */
821 if ((status & dq5) == dq5) {
822 /* dq5 is active so we can do a reset and stop the erase */
823 cfi_write(map, CMD(0xF0), chip->start);
825 chip->state = FL_READY;
827 cfi_spin_unlock(chip->mutex);
828 printk("waiting for erase to complete timed out.\n");
833 chip->state = FL_READY;
835 cfi_spin_unlock(chip->mutex);
840 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
842 unsigned int oldstatus, status;
843 unsigned int dq6, dq5;
844 unsigned long timeo = jiffies + HZ;
845 struct cfi_private *cfi = map->fldrv_priv;
846 DECLARE_WAITQUEUE(wait, current);
849 cfi_spin_lock(chip->mutex);
851 if (chip->state != FL_READY){
852 set_current_state(TASK_UNINTERRUPTIBLE);
853 add_wait_queue(&chip->wq, &wait);
855 cfi_spin_unlock(chip->mutex);
858 remove_wait_queue(&chip->wq, &wait);
860 if(signal_pending(current))
863 timeo = jiffies + HZ;
868 chip->state = FL_ERASING;
872 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
873 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
874 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
875 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
876 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
877 cfi_write(map, CMD(0x30), adr);
879 timeo = jiffies + (HZ*20);
881 /* Wait for the end of programing/erasure by using the toggle method.
882 * As long as there is a programming procedure going on, bit 6 of the last
883 * written byte is toggling it's state with each consectuve read.
884 * The toggling stops as soon as the procedure is completed.
886 * If the process has gone on for too long on the chip bit 5 gets.
887 * After bit5 is set you can kill the operation by sending a reset
888 * command to the chip.
893 oldstatus = cfi_read(map, adr);
894 status = cfi_read(map, adr);
895 while( ((status & dq6) != (oldstatus & dq6)) &&
896 ((status & dq5) != dq5) &&
897 !time_after(jiffies, timeo)) {
900 /* an initial short sleep */
901 cfi_spin_unlock(chip->mutex);
902 schedule_timeout(HZ/100);
903 cfi_spin_lock(chip->mutex);
905 if (chip->state != FL_ERASING) {
906 /* Someone's suspended the erase. Sleep */
907 set_current_state(TASK_UNINTERRUPTIBLE);
908 add_wait_queue(&chip->wq, &wait);
910 cfi_spin_unlock(chip->mutex);
911 printk(KERN_DEBUG "erase suspended. Sleeping\n");
914 remove_wait_queue(&chip->wq, &wait);
916 if (signal_pending(current))
919 timeo = jiffies + (HZ*2); /* FIXME */
920 cfi_spin_lock(chip->mutex);
924 /* Busy wait for 1/10 of a milisecond */
927 ((status & dq6) != (oldstatus & dq6)) &&
928 ((status & dq5) != dq5);
931 /* Latency issues. Drop the lock, wait a while and retry */
932 cfi_spin_unlock(chip->mutex);
936 cfi_spin_lock(chip->mutex);
937 oldstatus = cfi_read(map, adr);
938 status = cfi_read(map, adr);
940 oldstatus = cfi_read(map, adr);
941 status = cfi_read(map, adr);
943 if( (status & dq6) != (oldstatus & dq6) )
945 /* The erasing didn't stop?? */
946 if( ( status & dq5 ) == dq5 )
948 /* When DQ5 raises, we must check once again if DQ6 is toggling.
949 If not, the erase has been completed OK. If not, reset chip. */
950 oldstatus = cfi_read( map, adr );
951 status = cfi_read( map, adr );
953 if( ( oldstatus & 0x00FF ) == ( status & 0x00FF ) )
955 printk( "Warning: DQ5 raised while erase operation was in progress, but erase completed OK\n" );
959 /* DQ5 is active so we can do a reset and stop the erase */
960 cfi_write(map, CMD(0xF0), chip->start);
961 printk( KERN_WARNING "Internal flash device timeout occured or write operation was performed while flash was erasing\n" );
966 printk( "Waiting for erase to complete timed out in do_erase_oneblock.\n");
968 chip->state = FL_READY;
970 cfi_spin_unlock(chip->mutex);
977 chip->state = FL_READY;
979 cfi_spin_unlock(chip->mutex);
983 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
985 struct map_info *map = mtd->priv;
986 struct cfi_private *cfi = map->fldrv_priv;
987 unsigned long adr, len;
988 int chipnum, ret = 0;
990 struct mtd_erase_region_info *regions = mtd->eraseregions;
992 if (instr->addr > mtd->size)
995 if ((instr->len + instr->addr) > mtd->size)
998 /* Check that both start and end of the requested erase are
999 * aligned with the erasesize at the appropriate addresses.
1004 /* Skip all erase regions which are ended before the start of
1005 the requested erase. Actually, to save on the calculations,
1006 we skip to the first erase region which starts after the
1007 start of the requested erase, and then go back one.
1010 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
1014 /* OK, now i is pointing at the erase region in which this
1015 erase request starts. Check the start of the requested
1016 erase range is aligned with the erase size which is in
1020 if (instr->addr & (regions[i].erasesize-1))
1023 /* Remember the erase region we start on */
1026 /* Next, check that the end of the requested erase is aligned
1027 * with the erase region at that address.
1030 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
1033 /* As before, drop back one to point at the region in which
1034 the address actually falls
1038 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
1041 chipnum = instr->addr >> cfi->chipshift;
1042 adr = instr->addr - (chipnum << cfi->chipshift);
1048 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1053 adr += regions[i].erasesize;
1054 len -= regions[i].erasesize;
1056 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1059 if (adr >> cfi->chipshift) {
1063 if (chipnum >= cfi->numchips)
1068 instr->state = MTD_ERASE_DONE;
1069 if (instr->callback)
1070 instr->callback(instr);
1075 static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
1077 struct map_info *map = mtd->priv;
1078 struct cfi_private *cfi = map->fldrv_priv;
1079 unsigned long adr, len;
1080 int chipnum, ret = 0;
1082 if (instr->addr & (mtd->erasesize - 1))
1085 if (instr->len & (mtd->erasesize -1))
1088 if ((instr->len + instr->addr) > mtd->size)
1091 chipnum = instr->addr >> cfi->chipshift;
1092 adr = instr->addr - (chipnum << cfi->chipshift);
1096 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1101 adr += mtd->erasesize;
1102 len -= mtd->erasesize;
1104 if (adr >> cfi->chipshift) {
1108 if (chipnum >= cfi->numchips)
1113 instr->state = MTD_ERASE_DONE;
1114 if (instr->callback)
1115 instr->callback(instr);
1120 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1122 struct map_info *map = mtd->priv;
1123 struct cfi_private *cfi = map->fldrv_priv;
1126 if (instr->addr != 0)
1129 if (instr->len != mtd->size)
1132 ret = do_erase_chip(map, &cfi->chips[0]);
1136 instr->state = MTD_ERASE_DONE;
1137 if (instr->callback)
1138 instr->callback(instr);
1143 static void cfi_amdstd_sync (struct mtd_info *mtd)
1145 struct map_info *map = mtd->priv;
1146 struct cfi_private *cfi = map->fldrv_priv;
1148 struct flchip *chip;
1150 DECLARE_WAITQUEUE(wait, current);
1152 for (i=0; !ret && i<cfi->numchips; i++) {
1153 chip = &cfi->chips[i];
1156 cfi_spin_lock(chip->mutex);
1158 switch(chip->state) {
1162 case FL_JEDEC_QUERY:
1163 chip->oldstate = chip->state;
1164 chip->state = FL_SYNCING;
1165 /* No need to wake_up() on this state change -
1166 * as the whole point is that nobody can do anything
1167 * with the chip now anyway.
1170 cfi_spin_unlock(chip->mutex);
1174 /* Not an idle state */
1175 set_current_state(TASK_UNINTERRUPTIBLE);
1176 add_wait_queue(&chip->wq, &wait);
1178 cfi_spin_unlock(chip->mutex);
1182 remove_wait_queue(&chip->wq, &wait);
1188 /* Unlock the chips again */
1190 for (i--; i >=0; i--) {
1191 chip = &cfi->chips[i];
1193 cfi_spin_lock(chip->mutex);
1195 if (chip->state == FL_SYNCING) {
1196 chip->state = chip->oldstate;
1199 cfi_spin_unlock(chip->mutex);
1204 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1206 struct map_info *map = mtd->priv;
1207 struct cfi_private *cfi = map->fldrv_priv;
1209 struct flchip *chip;
1212 for (i=0; !ret && i<cfi->numchips; i++) {
1213 chip = &cfi->chips[i];
1215 cfi_spin_lock(chip->mutex);
1217 switch(chip->state) {
1221 case FL_JEDEC_QUERY:
1222 chip->oldstate = chip->state;
1223 chip->state = FL_PM_SUSPENDED;
1224 /* No need to wake_up() on this state change -
1225 * as the whole point is that nobody can do anything
1226 * with the chip now anyway.
1228 case FL_PM_SUSPENDED:
1235 cfi_spin_unlock(chip->mutex);
1238 /* Unlock the chips again */
1241 for (i--; i >=0; i--) {
1242 chip = &cfi->chips[i];
1244 cfi_spin_lock(chip->mutex);
1246 if (chip->state == FL_PM_SUSPENDED) {
1247 chip->state = chip->oldstate;
1250 cfi_spin_unlock(chip->mutex);
1257 static void cfi_amdstd_resume(struct mtd_info *mtd)
1259 struct map_info *map = mtd->priv;
1260 struct cfi_private *cfi = map->fldrv_priv;
1262 struct flchip *chip;
1264 for (i=0; i<cfi->numchips; i++) {
1266 chip = &cfi->chips[i];
1268 cfi_spin_lock(chip->mutex);
1270 if (chip->state == FL_PM_SUSPENDED) {
1271 chip->state = FL_READY;
1272 cfi_write(map, CMD(0xF0), chip->start);
1276 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1278 cfi_spin_unlock(chip->mutex);
1282 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1284 struct map_info *map = mtd->priv;
1285 struct cfi_private *cfi = map->fldrv_priv;
1286 kfree(cfi->cmdset_priv);
1289 kfree(mtd->eraseregions);
1292 static char im_name[]="cfi_cmdset_0002";
1294 int __init cfi_amdstd_init(void)
1296 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1300 static void __exit cfi_amdstd_exit(void)
1302 inter_module_unregister(im_name);
1305 module_init(cfi_amdstd_init);
1306 module_exit(cfi_amdstd_exit);
1308 MODULE_LICENSE("GPL");
1309 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1310 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");