2 * MTD map driver for AMD compatible flash chips (non-CFI)
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
6 * $Id: amd_flash.c,v 1.19 2003/01/24 13:30:11 dwmw2 Exp $
8 * Copyright (c) 2001 Axis Communications AB
10 * This file is under GPL.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/mtd/map.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/flashchip.h>
26 /* There's no limit. It exists only to avoid realloc. */
27 #define MAX_AMD_CHIPS 8
29 #define DEVICE_TYPE_X8 (8 / 8)
30 #define DEVICE_TYPE_X16 (16 / 8)
31 #define DEVICE_TYPE_X32 (32 / 8)
34 #define ADDR_MANUFACTURER 0x0000
35 #define ADDR_DEVICE_ID 0x0001
36 #define ADDR_SECTOR_LOCK 0x0002
37 #define ADDR_HANDSHAKE 0x0003
38 #define ADDR_UNLOCK_1 0x0555
39 #define ADDR_UNLOCK_2 0x02AA
42 #define CMD_UNLOCK_DATA_1 0x00AA
43 #define CMD_UNLOCK_DATA_2 0x0055
44 #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
45 #define CMD_UNLOCK_BYPASS_MODE 0x0020
46 #define CMD_PROGRAM_UNLOCK_DATA 0x00A0
47 #define CMD_RESET_DATA 0x00F0
48 #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
51 #define CMD_UNLOCK_SECTOR 0x0060
54 #define MANUFACTURER_AMD 0x0001
55 #define MANUFACTURER_ATMEL 0x001F
56 #define MANUFACTURER_FUJITSU 0x0004
57 #define MANUFACTURER_ST 0x0020
58 #define MANUFACTURER_SST 0x00BF
59 #define MANUFACTURER_TOSHIBA 0x0098
62 #define AM29F800BB 0x2258
63 #define AM29F800BT 0x22D6
64 #define AM29LV800BB 0x225B
65 #define AM29LV800BT 0x22DA
66 #define AM29LV160DT 0x22C4
67 #define AM29LV160DB 0x2249
68 #define AM29BDS323D 0x22D1
69 #define AM29BDS643D 0x227E
72 #define AT49xV16x 0x00C0
73 #define AT49xV16xT 0x00C2
76 #define MBM29LV160TE 0x22C4
77 #define MBM29LV160BE 0x2249
78 #define MBM29LV800BB 0x225B
81 #define M29W800T 0x00D7
82 #define M29W160DT 0x22C4
83 #define M29W160DB 0x2249
86 #define SST39LF800 0x2781
87 #define SST39LF160 0x2782
90 #define TC58FVT160 0x00C2
91 #define TC58FVB160 0x0043
95 struct amd_flash_private {
99 unsigned long chipshift;
100 // const char *im_name;
101 struct flchip chips[0];
104 struct amd_flash_info {
109 const int numeraseregions;
110 const struct mtd_erase_region_info regions[4];
115 static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
117 static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
119 static int amd_flash_erase(struct mtd_info *, struct erase_info *);
120 static void amd_flash_sync(struct mtd_info *);
121 static int amd_flash_suspend(struct mtd_info *);
122 static void amd_flash_resume(struct mtd_info *);
123 static void amd_flash_destroy(struct mtd_info *);
124 static struct mtd_info *amd_flash_probe(struct map_info *map);
127 static struct mtd_chip_driver amd_flash_chipdrv = {
128 probe: amd_flash_probe,
129 destroy: amd_flash_destroy,
136 static const char im_name[] = "amd_flash";
140 static inline __u32 wide_read(struct map_info *map, __u32 addr)
142 if (map->buswidth == 1) {
143 return map->read8(map, addr);
144 } else if (map->buswidth == 2) {
145 return map->read16(map, addr);
146 } else if (map->buswidth == 4) {
147 return map->read32(map, addr);
153 static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
155 if (map->buswidth == 1) {
156 map->write8(map, val, addr);
157 } else if (map->buswidth == 2) {
158 map->write16(map, val, addr);
159 } else if (map->buswidth == 4) {
160 map->write32(map, val, addr);
164 static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
166 const struct amd_flash_private *private = map->fldrv_priv;
167 if ((private->interleave == 2) &&
168 (private->device_type == DEVICE_TYPE_X16)) {
175 static inline void send_unlock(struct map_info *map, unsigned long base)
177 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
178 base + (map->buswidth * ADDR_UNLOCK_1));
179 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
180 base + (map->buswidth * ADDR_UNLOCK_2));
183 static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
185 send_unlock(map, base);
186 wide_write(map, make_cmd(map, cmd),
187 base + (map->buswidth * ADDR_UNLOCK_1));
190 static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
191 __u32 cmd, unsigned long addr)
193 send_unlock(map, base);
194 wide_write(map, make_cmd(map, cmd), addr);
197 static inline int flash_is_busy(struct map_info *map, unsigned long addr,
201 if ((interleave == 2) && (map->buswidth == 4)) {
204 read1 = wide_read(map, addr);
205 read2 = wide_read(map, addr);
207 return (((read1 >> 16) & D6_MASK) !=
208 ((read2 >> 16) & D6_MASK)) ||
209 (((read1 & 0xffff) & D6_MASK) !=
210 ((read2 & 0xffff) & D6_MASK));
213 return ((wide_read(map, addr) & D6_MASK) !=
214 (wide_read(map, addr) & D6_MASK));
217 static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
220 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
222 (sect_addr | (0x40 * map->buswidth)) :
223 (sect_addr & ~(0x40 * map->buswidth)) ;
225 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
227 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
228 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
229 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
230 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
233 static inline int is_sector_locked(struct map_info *map,
234 unsigned long sect_addr)
238 wide_write(map, CMD_RESET_DATA, 0);
239 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
241 /* status is 0x0000 for unlocked and 0x0001 for locked */
242 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
243 wide_write(map, CMD_RESET_DATA, 0);
247 static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
250 struct map_info *map;
251 struct mtd_erase_region_info *merip;
252 int eraseoffset, erasesize, eraseblocks;
259 /* Pass the whole chip through sector by sector and check for each
260 sector if the sector and the given interval overlap */
261 for(i = 0; i < mtd->numeraseregions; i++) {
262 merip = &mtd->eraseregions[i];
264 eraseoffset = merip->offset;
265 erasesize = merip->erasesize;
266 eraseblocks = merip->numblocks;
268 if (ofs > eraseoffset + erasesize)
271 while (eraseblocks > 0) {
272 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
273 unlock_sector(map, eraseoffset, is_unlock);
275 lock_status = is_sector_locked(map, eraseoffset);
277 if (is_unlock && lock_status) {
278 printk("Cannot unlock sector at address %x length %xx\n",
279 eraseoffset, merip->erasesize);
281 } else if (!is_unlock && !lock_status) {
282 printk("Cannot lock sector at address %x length %x\n",
283 eraseoffset, merip->erasesize);
287 eraseoffset += erasesize;
294 static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
296 return amd_flash_do_unlock(mtd, ofs, len, 1);
299 static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
301 return amd_flash_do_unlock(mtd, ofs, len, 0);
306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
307 * matching table entry (-1 if not found or alias for already found chip).
309 static int probe_new_chip(struct mtd_info *mtd, __u32 base,
310 struct flchip *chips,
311 struct amd_flash_private *private,
312 const struct amd_flash_info *table, int table_size)
316 struct map_info *map = mtd->priv;
317 struct amd_flash_private temp;
320 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
322 map->fldrv_priv = &temp;
324 /* Enter autoselect mode. */
325 send_cmd(map, base, CMD_RESET_DATA);
326 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
328 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
329 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
331 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
332 ((dev_id >> 16) == (dev_id & 0xffff))) {
339 for (i = 0; i < table_size; i++) {
340 if ((mfr_id == table[i].mfr_id) &&
341 (dev_id == table[i].dev_id)) {
345 /* Is this an alias for an already found chip?
346 * In that case that chip should be in
347 * autoselect mode now.
349 for (j = 0; j < private->numchips; j++) {
354 wide_read(map, chips[j].start +
359 wide_read(map, chips[j].start +
362 if (temp.interleave == 2) {
363 mfr_id_other &= 0xffff;
364 dev_id_other &= 0xffff;
366 if ((mfr_id_other == mfr_id) &&
367 (dev_id_other == dev_id)) {
369 /* Exit autoselect mode. */
377 if (private->numchips == MAX_AMD_CHIPS) {
379 "%s: Too many flash chips "
380 "detected. Increase "
381 "MAX_AMD_CHIPS from %d.\n",
382 map->name, MAX_AMD_CHIPS);
387 chips[private->numchips].start = base;
388 chips[private->numchips].state = FL_READY;
389 chips[private->numchips].mutex =
390 &chips[private->numchips]._spinlock;
394 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
395 temp.interleave, (table[i].size)/(1024*1024),
396 table[i].name, base);
398 mtd->size += table[i].size * temp.interleave;
399 mtd->numeraseregions += table[i].numeraseregions;
405 /* Exit autoselect mode. */
406 send_cmd(map, base, CMD_RESET_DATA);
408 if (i == table_size) {
409 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
410 "mfr id 0x%x, dev id 0x%x\n", map->name,
411 base, mfr_id, dev_id);
412 map->fldrv_priv = NULL;
417 private->device_type = temp.device_type;
418 private->interleave = temp.interleave;
425 static struct mtd_info *amd_flash_probe(struct map_info *map)
427 /* Keep this table on the stack so that it gets deallocated after the
430 const struct amd_flash_info table[] = {
432 mfr_id: MANUFACTURER_AMD,
434 name: "AMD AM29LV160DT",
438 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
439 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
440 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
441 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
444 mfr_id: MANUFACTURER_AMD,
446 name: "AMD AM29LV160DB",
450 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
451 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
452 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
453 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
456 mfr_id: MANUFACTURER_TOSHIBA,
458 name: "Toshiba TC58FVT160",
462 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
463 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
464 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
465 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
468 mfr_id: MANUFACTURER_FUJITSU,
469 dev_id: MBM29LV160TE,
470 name: "Fujitsu MBM29LV160TE",
474 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
475 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
476 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
477 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
480 mfr_id: MANUFACTURER_TOSHIBA,
482 name: "Toshiba TC58FVB160",
486 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
487 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
488 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
489 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
492 mfr_id: MANUFACTURER_FUJITSU,
493 dev_id: MBM29LV160BE,
494 name: "Fujitsu MBM29LV160BE",
498 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
499 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
500 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
501 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
504 mfr_id: MANUFACTURER_AMD,
506 name: "AMD AM29LV800BB",
510 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
511 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
512 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
513 { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
516 mfr_id: MANUFACTURER_AMD,
518 name: "AMD AM29F800BB",
522 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
523 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
524 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
525 { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
528 mfr_id: MANUFACTURER_AMD,
530 name: "AMD AM29LV800BT",
534 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
535 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
536 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
537 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
540 mfr_id: MANUFACTURER_AMD,
542 name: "AMD AM29F800BT",
546 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
547 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
548 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
549 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
552 mfr_id: MANUFACTURER_AMD,
554 name: "AMD AM29LV800BB",
558 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
559 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
560 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
561 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
564 mfr_id: MANUFACTURER_FUJITSU,
565 dev_id: MBM29LV800BB,
566 name: "Fujitsu MBM29LV800BB",
570 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
571 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
572 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
573 { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
576 mfr_id: MANUFACTURER_ST,
582 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
583 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
584 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
585 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
588 mfr_id: MANUFACTURER_ST,
590 name: "ST M29W160DT",
594 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
595 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
596 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
597 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
600 mfr_id: MANUFACTURER_ST,
602 name: "ST M29W160DB",
606 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
607 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
608 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
609 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
612 mfr_id: MANUFACTURER_AMD,
614 name: "AMD AM29BDS323D",
618 { offset: 0x000000, erasesize: 0x10000, numblocks: 48 },
619 { offset: 0x300000, erasesize: 0x10000, numblocks: 15 },
620 { offset: 0x3f0000, erasesize: 0x02000, numblocks: 8 },
623 mfr_id: MANUFACTURER_AMD,
625 name: "AMD AM29BDS643D",
629 { offset: 0x000000, erasesize: 0x10000, numblocks: 96 },
630 { offset: 0x600000, erasesize: 0x10000, numblocks: 31 },
631 { offset: 0x7f0000, erasesize: 0x02000, numblocks: 8 },
634 mfr_id: MANUFACTURER_ATMEL,
636 name: "Atmel AT49xV16x",
640 { offset: 0x000000, erasesize: 0x02000, numblocks: 8 },
641 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
644 mfr_id: MANUFACTURER_ATMEL,
646 name: "Atmel AT49xV16xT",
650 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
651 { offset: 0x1F0000, erasesize: 0x02000, numblocks: 8 }
656 struct mtd_info *mtd;
657 struct flchip chips[MAX_AMD_CHIPS];
658 int table_pos[MAX_AMD_CHIPS];
659 struct amd_flash_private temp;
660 struct amd_flash_private *private;
667 mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
670 "%s: kmalloc failed for info structure\n", map->name);
673 memset(mtd, 0, sizeof(*mtd));
676 memset(&temp, 0, sizeof(temp));
678 printk("%s: Probing for AMD compatible flash...\n", map->name);
680 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
681 sizeof(table)/sizeof(table[0])))
684 "%s: Found no AMD compatible device at location zero\n",
692 chips[0].state = FL_READY;
693 chips[0].mutex = &chips[0]._spinlock;
695 for (size = mtd->size; size > 1; size >>= 1) {
698 switch (temp.interleave) {
707 /* Find out if there are any more chips in the map. */
708 for (base = (1 << temp.chipshift);
710 base += (1 << temp.chipshift)) {
711 int numchips = temp.numchips;
712 table_pos[numchips] = probe_new_chip(mtd, base, chips,
713 &temp, table, sizeof(table)/sizeof(table[0]));
716 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
717 mtd->numeraseregions, GFP_KERNEL);
718 if (!mtd->eraseregions) {
719 printk(KERN_WARNING "%s: Failed to allocate "
720 "memory for MTD erase region info\n", map->name);
722 map->fldrv_priv = NULL;
728 for (i = 0; i < temp.numchips; i++) {
733 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
734 mtd->eraseregions[reg_idx].offset = offset +
735 (table[table_pos[i]].regions[j].offset *
737 mtd->eraseregions[reg_idx].erasesize =
738 table[table_pos[i]].regions[j].erasesize *
740 mtd->eraseregions[reg_idx].numblocks =
741 table[table_pos[i]].regions[j].numblocks;
743 mtd->eraseregions[reg_idx].erasesize) {
745 mtd->eraseregions[reg_idx].erasesize;
747 dev_size += mtd->eraseregions[reg_idx].erasesize *
748 mtd->eraseregions[reg_idx].numblocks;
753 mtd->type = MTD_NORFLASH;
754 mtd->flags = MTD_CAP_NORFLASH;
755 mtd->name = map->name;
756 mtd->erase = amd_flash_erase;
757 mtd->read = amd_flash_read;
758 mtd->write = amd_flash_write;
759 mtd->sync = amd_flash_sync;
760 mtd->suspend = amd_flash_suspend;
761 mtd->resume = amd_flash_resume;
762 mtd->lock = amd_flash_lock;
763 mtd->unlock = amd_flash_unlock;
765 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
766 temp.numchips), GFP_KERNEL);
769 "%s: kmalloc failed for private structure\n", map->name);
771 map->fldrv_priv = NULL;
774 memcpy(private, &temp, sizeof(temp));
775 memcpy(private->chips, chips,
776 sizeof(struct flchip) * private->numchips);
777 for (i = 0; i < private->numchips; i++) {
778 init_waitqueue_head(&private->chips[i].wq);
779 spin_lock_init(&private->chips[i]._spinlock);
782 map->fldrv_priv = private;
784 map->fldrv = &amd_flash_chipdrv;
792 static inline int read_one_chip(struct map_info *map, struct flchip *chip,
793 loff_t adr, size_t len, u_char *buf)
795 DECLARE_WAITQUEUE(wait, current);
796 unsigned long timeo = jiffies + HZ;
799 spin_lock_bh(chip->mutex);
801 if (chip->state != FL_READY){
802 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
803 map->name, chip->state);
804 set_current_state(TASK_UNINTERRUPTIBLE);
805 add_wait_queue(&chip->wq, &wait);
807 spin_unlock_bh(chip->mutex);
810 remove_wait_queue(&chip->wq, &wait);
812 if(signal_pending(current)) {
816 timeo = jiffies + HZ;
823 chip->state = FL_READY;
825 map->copy_from(map, buf, adr, len);
828 spin_unlock_bh(chip->mutex);
835 static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
836 size_t *retlen, u_char *buf)
838 struct map_info *map = mtd->priv;
839 struct amd_flash_private *private = map->fldrv_priv;
844 if ((from + len) > mtd->size) {
845 printk(KERN_WARNING "%s: read request past end of device "
846 "(0x%lx)\n", map->name, (unsigned long)from + len);
851 /* Offset within the first chip that the first read should start. */
852 chipnum = (from >> private->chipshift);
853 ofs = from - (chipnum << private->chipshift);
858 unsigned long this_len;
860 if (chipnum >= private->numchips) {
864 if ((len + ofs - 1) >> private->chipshift) {
865 this_len = (1 << private->chipshift) - ofs;
870 ret = read_one_chip(map, &private->chips[chipnum], ofs,
889 static int write_one_word(struct map_info *map, struct flchip *chip,
890 unsigned long adr, __u32 datum)
892 unsigned long timeo = jiffies + HZ;
893 struct amd_flash_private *private = map->fldrv_priv;
894 DECLARE_WAITQUEUE(wait, current);
899 spin_lock_bh(chip->mutex);
901 if (chip->state != FL_READY){
902 printk("%s: waiting for chip to write, state = %d\n",
903 map->name, chip->state);
904 set_current_state(TASK_UNINTERRUPTIBLE);
905 add_wait_queue(&chip->wq, &wait);
907 spin_unlock_bh(chip->mutex);
910 remove_wait_queue(&chip->wq, &wait);
911 printk(KERN_INFO "%s: woke up to write\n", map->name);
912 if(signal_pending(current))
915 timeo = jiffies + HZ;
920 chip->state = FL_WRITING;
924 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
925 wide_write(map, datum, adr);
928 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
929 if (need_resched()) {
930 spin_unlock_bh(chip->mutex);
932 spin_lock_bh(chip->mutex);
937 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
942 if ((verify = wide_read(map, adr)) != datum) {
943 printk(KERN_WARNING "%s: write to 0x%lx failed. "
944 "datum = %x, verify = %x\n",
945 map->name, adr, datum, verify);
951 chip->state = FL_READY;
953 spin_unlock_bh(chip->mutex);
960 static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
961 size_t *retlen, const u_char *buf)
963 struct map_info *map = mtd->priv;
964 struct amd_flash_private *private = map->fldrv_priv;
968 unsigned long chipstart;
975 chipnum = to >> private->chipshift;
976 ofs = to - (chipnum << private->chipshift);
977 chipstart = private->chips[chipnum].start;
979 /* If it's not bus-aligned, do the first byte write. */
980 if (ofs & (map->buswidth - 1)) {
981 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
982 int i = ofs - bus_ofs;
987 map->copy_from(map, tmp_buf,
988 bus_ofs + private->chips[chipnum].start,
990 while (len && i < map->buswidth)
991 tmp_buf[i++] = buf[n++], len--;
993 if (map->buswidth == 2) {
994 datum = *(__u16*)tmp_buf;
995 } else if (map->buswidth == 4) {
996 datum = *(__u32*)tmp_buf;
998 return -EINVAL; /* should never happen, but be safe */
1001 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
1011 if (ofs >> private->chipshift) {
1014 if (chipnum == private->numchips) {
1020 /* We are now aligned, write as much as possible. */
1021 while(len >= map->buswidth) {
1024 if (map->buswidth == 1) {
1025 datum = *(__u8*)buf;
1026 } else if (map->buswidth == 2) {
1027 datum = *(__u16*)buf;
1028 } else if (map->buswidth == 4) {
1029 datum = *(__u32*)buf;
1034 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1040 ofs += map->buswidth;
1041 buf += map->buswidth;
1042 (*retlen) += map->buswidth;
1043 len -= map->buswidth;
1045 if (ofs >> private->chipshift) {
1048 if (chipnum == private->numchips) {
1051 chipstart = private->chips[chipnum].start;
1055 if (len & (map->buswidth - 1)) {
1060 map->copy_from(map, tmp_buf,
1061 ofs + private->chips[chipnum].start,
1064 tmp_buf[i++] = buf[n++];
1067 if (map->buswidth == 2) {
1068 datum = *(__u16*)tmp_buf;
1069 } else if (map->buswidth == 4) {
1070 datum = *(__u32*)tmp_buf;
1072 return -EINVAL; /* should never happen, but be safe */
1075 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1089 static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1090 unsigned long adr, u_long size)
1092 unsigned long timeo = jiffies + HZ;
1093 struct amd_flash_private *private = map->fldrv_priv;
1094 DECLARE_WAITQUEUE(wait, current);
1097 spin_lock_bh(chip->mutex);
1099 if (chip->state != FL_READY){
1100 set_current_state(TASK_UNINTERRUPTIBLE);
1101 add_wait_queue(&chip->wq, &wait);
1103 spin_unlock_bh(chip->mutex);
1106 remove_wait_queue(&chip->wq, &wait);
1108 if (signal_pending(current)) {
1112 timeo = jiffies + HZ;
1117 chip->state = FL_ERASING;
1121 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1122 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1124 timeo = jiffies + (HZ * 20);
1126 spin_unlock_bh(chip->mutex);
1127 schedule_timeout(HZ);
1128 spin_lock_bh(chip->mutex);
1130 while (flash_is_busy(map, adr, private->interleave)) {
1132 if (chip->state != FL_ERASING) {
1133 /* Someone's suspended the erase. Sleep */
1134 set_current_state(TASK_UNINTERRUPTIBLE);
1135 add_wait_queue(&chip->wq, &wait);
1137 spin_unlock_bh(chip->mutex);
1138 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1141 remove_wait_queue(&chip->wq, &wait);
1143 if (signal_pending(current)) {
1147 timeo = jiffies + (HZ*2); /* FIXME */
1148 spin_lock_bh(chip->mutex);
1152 /* OK Still waiting */
1153 if (time_after(jiffies, timeo)) {
1154 chip->state = FL_READY;
1155 spin_unlock_bh(chip->mutex);
1156 printk(KERN_WARNING "%s: waiting for erase to complete "
1157 "timed out.\n", map->name);
1163 /* Latency issues. Drop the lock, wait a while and retry */
1164 spin_unlock_bh(chip->mutex);
1171 spin_lock_bh(chip->mutex);
1174 /* Verify every single word */
1180 for (address = adr; address < (adr + size); address++) {
1181 if ((verify = map->read8(map, address)) != 0xFF) {
1187 chip->state = FL_READY;
1188 spin_unlock_bh(chip->mutex);
1190 "%s: verify error at 0x%x, size %ld.\n",
1191 map->name, address, size);
1199 chip->state = FL_READY;
1201 spin_unlock_bh(chip->mutex);
1208 static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1210 struct map_info *map = mtd->priv;
1211 struct amd_flash_private *private = map->fldrv_priv;
1212 unsigned long adr, len;
1217 struct mtd_erase_region_info *regions = mtd->eraseregions;
1219 if (instr->addr > mtd->size) {
1223 if ((instr->len + instr->addr) > mtd->size) {
1227 /* Check that both start and end of the requested erase are
1228 * aligned with the erasesize at the appropriate addresses.
1233 /* Skip all erase regions which are ended before the start of
1234 the requested erase. Actually, to save on the calculations,
1235 we skip to the first erase region which starts after the
1236 start of the requested erase, and then go back one.
1239 while ((i < mtd->numeraseregions) &&
1240 (instr->addr >= regions[i].offset)) {
1245 /* OK, now i is pointing at the erase region in which this
1246 * erase request starts. Check the start of the requested
1247 * erase range is aligned with the erase size which is in
1251 if (instr->addr & (regions[i].erasesize-1)) {
1255 /* Remember the erase region we start on. */
1259 /* Next, check that the end of the requested erase is aligned
1260 * with the erase region at that address.
1263 while ((i < mtd->numeraseregions) &&
1264 ((instr->addr + instr->len) >= regions[i].offset)) {
1268 /* As before, drop back one to point at the region in which
1269 * the address actually falls.
1274 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1278 chipnum = instr->addr >> private->chipshift;
1279 adr = instr->addr - (chipnum << private->chipshift);
1285 ret = erase_one_block(map, &private->chips[chipnum], adr,
1286 regions[i].erasesize);
1292 adr += regions[i].erasesize;
1293 len -= regions[i].erasesize;
1295 if ((adr % (1 << private->chipshift)) ==
1296 ((regions[i].offset + (regions[i].erasesize *
1297 regions[i].numblocks))
1298 % (1 << private->chipshift))) {
1302 if (adr >> private->chipshift) {
1305 if (chipnum >= private->numchips) {
1311 instr->state = MTD_ERASE_DONE;
1312 if (instr->callback) {
1313 instr->callback(instr);
1321 static void amd_flash_sync(struct mtd_info *mtd)
1323 struct map_info *map = mtd->priv;
1324 struct amd_flash_private *private = map->fldrv_priv;
1326 struct flchip *chip;
1328 DECLARE_WAITQUEUE(wait, current);
1330 for (i = 0; !ret && (i < private->numchips); i++) {
1331 chip = &private->chips[i];
1334 spin_lock_bh(chip->mutex);
1336 switch(chip->state) {
1340 case FL_JEDEC_QUERY:
1341 chip->oldstate = chip->state;
1342 chip->state = FL_SYNCING;
1343 /* No need to wake_up() on this state change -
1344 * as the whole point is that nobody can do anything
1345 * with the chip now anyway.
1348 spin_unlock_bh(chip->mutex);
1352 /* Not an idle state */
1353 set_current_state(TASK_UNINTERRUPTIBLE);
1354 add_wait_queue(&chip->wq, &wait);
1356 spin_unlock_bh(chip->mutex);
1360 remove_wait_queue(&chip->wq, &wait);
1366 /* Unlock the chips again */
1367 for (i--; i >= 0; i--) {
1368 chip = &private->chips[i];
1370 spin_lock_bh(chip->mutex);
1372 if (chip->state == FL_SYNCING) {
1373 chip->state = chip->oldstate;
1376 spin_unlock_bh(chip->mutex);
1382 static int amd_flash_suspend(struct mtd_info *mtd)
1384 printk("amd_flash_suspend(): not implemented!\n");
1390 static void amd_flash_resume(struct mtd_info *mtd)
1392 printk("amd_flash_resume(): not implemented!\n");
1397 static void amd_flash_destroy(struct mtd_info *mtd)
1399 struct map_info *map = mtd->priv;
1400 struct amd_flash_private *private = map->fldrv_priv;
1404 int __init amd_flash_init(void)
1406 register_mtd_chip_driver(&amd_flash_chipdrv);
1410 void __exit amd_flash_exit(void)
1412 unregister_mtd_chip_driver(&amd_flash_chipdrv);
1415 module_init(amd_flash_init);
1416 module_exit(amd_flash_exit);
1418 MODULE_LICENSE("GPL");
1419 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1420 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");