[MTD] CHIPS: Recognize Spansion CFI 1.4 chips
[powerpc.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.180 2005/07/20 21:01:13 tpoynor Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 #ifdef CONFIG_MTD_OTP
59 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64                                             struct otp_info *, size_t);
65 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66                                             struct otp_info *, size_t);
67 #endif
68 static int cfi_intelext_suspend (struct mtd_info *);
69 static void cfi_intelext_resume (struct mtd_info *);
70 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
71
72 static void cfi_intelext_destroy(struct mtd_info *);
73
74 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75
76 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78
79 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80                      size_t *retlen, u_char **mtdbuf);
81 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82                         size_t len);
83
84 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86 #include "fwh_lock.h"
87
88
89
90 /*
91  *  *********** SETUP AND PROBE BITS  ***********
92  */
93
94 static struct mtd_chip_driver cfi_intelext_chipdrv = {
95         .probe          = NULL, /* Not usable directly */
96         .destroy        = cfi_intelext_destroy,
97         .name           = "cfi_cmdset_0001",
98         .module         = THIS_MODULE
99 };
100
101 /* #define DEBUG_LOCK_BITS */
102 /* #define DEBUG_CFI_FEATURES */
103
104 #ifdef DEBUG_CFI_FEATURES
105 static void cfi_tell_features(struct cfi_pri_intelext *extp)
106 {
107         int i;
108         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
109         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
111         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
112         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
113         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
114         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
115         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
116         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119         for (i=10; i<32; i++) {
120                 if (extp->FeatureSupport & (1<<i)) 
121                         printk("     - Unknown Bit %X:      supported\n", i);
122         }
123         
124         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126         for (i=1; i<8; i++) {
127                 if (extp->SuspendCmdSupport & (1<<i))
128                         printk("     - Unknown Bit %X:               supported\n", i);
129         }
130         
131         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134         for (i=2; i<16; i++) {
135                 if (extp->BlkStatusRegMask & (1<<i))
136                         printk("     - Unknown Bit %X Active: yes\n",i);
137         }
138         
139         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
140                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141         if (extp->VppOptimal)
142                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
143                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144 }
145 #endif
146
147 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
149 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150 {
151         struct map_info *map = mtd->priv;
152         struct cfi_private *cfi = map->fldrv_priv;
153         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
154
155         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
156                             "erase on write disabled.\n");
157         extp->SuspendCmdSupport &= ~1;
158 }
159 #endif
160
161 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
162 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
163 {
164         struct map_info *map = mtd->priv;
165         struct cfi_private *cfi = map->fldrv_priv;
166         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
167
168         if (cfip && (cfip->FeatureSupport&4)) {
169                 cfip->FeatureSupport &= ~4;
170                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
171         }
172 }
173 #endif
174
175 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176 {
177         struct map_info *map = mtd->priv;
178         struct cfi_private *cfi = map->fldrv_priv;
179         
180         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
181         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
182 }
183
184 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188         
189         /* Note this is done after the region info is endian swapped */
190         cfi->cfiq->EraseRegionInfo[1] =
191                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
192 };
193
194 static void fixup_use_point(struct mtd_info *mtd, void *param)
195 {
196         struct map_info *map = mtd->priv;
197         if (!mtd->point && map_is_linear(map)) {
198                 mtd->point   = cfi_intelext_point;
199                 mtd->unpoint = cfi_intelext_unpoint;
200         }
201 }
202
203 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         if (cfi->cfiq->BufWriteTimeoutTyp) {
208                 printk(KERN_INFO "Using buffer write method\n" );
209                 mtd->write = cfi_intelext_write_buffers;
210         }
211 }
212
213 static struct cfi_fixup cfi_fixup_table[] = {
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 
216 #endif
217 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
219 #endif
220 #if !FORCE_WORD_WRITE
221         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
222 #endif
223         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
224         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
225         { 0, 0, NULL, NULL }
226 };
227
228 static struct cfi_fixup jedec_fixup_table[] = {
229         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
230         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
231         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
232         { 0, 0, NULL, NULL }
233 };
234 static struct cfi_fixup fixup_table[] = {
235         /* The CFI vendor ids and the JEDEC vendor IDs appear
236          * to be common.  It is like the devices id's are as
237          * well.  This table is to pick all cases where
238          * we know that is the case.
239          */
240         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
241         { 0, 0, NULL, NULL }
242 };
243
244 static inline struct cfi_pri_intelext *
245 read_pri_intelext(struct map_info *map, __u16 adr)
246 {
247         struct cfi_pri_intelext *extp;
248         unsigned int extp_size = sizeof(*extp);
249
250  again:
251         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
252         if (!extp)
253                 return NULL;
254
255         if (extp->MajorVersion != '1' ||
256             (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
257                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
258                        "version %c.%c.\n",  extp->MajorVersion,
259                        extp->MinorVersion);
260                 kfree(extp);
261                 return NULL;
262         }
263
264         /* Do some byteswapping if necessary */
265         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
266         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
267         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
268
269         if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
270                 unsigned int extra_size = 0;
271                 int nb_parts, i;
272
273                 /* Protection Register info */
274                 extra_size += (extp->NumProtectionFields - 1) *
275                               sizeof(struct cfi_intelext_otpinfo);
276
277                 /* Burst Read info */
278                 extra_size += 6;
279
280                 /* Number of hardware-partitions */
281                 extra_size += 1;
282                 if (extp_size < sizeof(*extp) + extra_size)
283                         goto need_more;
284                 nb_parts = extp->extra[extra_size - 1];
285
286                 for (i = 0; i < nb_parts; i++) {
287                         struct cfi_intelext_regioninfo *rinfo;
288                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
289                         extra_size += sizeof(*rinfo);
290                         if (extp_size < sizeof(*extp) + extra_size)
291                                 goto need_more;
292                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
293                         extra_size += (rinfo->NumBlockTypes - 1)
294                                       * sizeof(struct cfi_intelext_blockinfo);
295                 }
296
297                 if (extp_size < sizeof(*extp) + extra_size) {
298                         need_more:
299                         extp_size = sizeof(*extp) + extra_size;
300                         kfree(extp);
301                         if (extp_size > 4096) {
302                                 printk(KERN_ERR
303                                         "%s: cfi_pri_intelext is too fat\n",
304                                         __FUNCTION__);
305                                 return NULL;
306                         }
307                         goto again;
308                 }
309         }
310                 
311         return extp;
312 }
313
314 /* This routine is made available to other mtd code via
315  * inter_module_register.  It must only be accessed through
316  * inter_module_get which will bump the use count of this module.  The
317  * addresses passed back in cfi are valid as long as the use count of
318  * this module is non-zero, i.e. between inter_module_get and
319  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
320  */
321 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
322 {
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct mtd_info *mtd;
325         int i;
326
327         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
328         if (!mtd) {
329                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
330                 return NULL;
331         }
332         memset(mtd, 0, sizeof(*mtd));
333         mtd->priv = map;
334         mtd->type = MTD_NORFLASH;
335
336         /* Fill in the default mtd operations */
337         mtd->erase   = cfi_intelext_erase_varsize;
338         mtd->read    = cfi_intelext_read;
339         mtd->write   = cfi_intelext_write_words;
340         mtd->sync    = cfi_intelext_sync;
341         mtd->lock    = cfi_intelext_lock;
342         mtd->unlock  = cfi_intelext_unlock;
343         mtd->suspend = cfi_intelext_suspend;
344         mtd->resume  = cfi_intelext_resume;
345         mtd->flags   = MTD_CAP_NORFLASH;
346         mtd->name    = map->name;
347
348         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
349
350         if (cfi->cfi_mode == CFI_MODE_CFI) {
351                 /* 
352                  * It's a real CFI chip, not one for which the probe
353                  * routine faked a CFI structure. So we read the feature
354                  * table from it.
355                  */
356                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
357                 struct cfi_pri_intelext *extp;
358
359                 extp = read_pri_intelext(map, adr);
360                 if (!extp) {
361                         kfree(mtd);
362                         return NULL;
363                 }
364
365                 /* Install our own private info structure */
366                 cfi->cmdset_priv = extp;        
367
368                 cfi_fixup(mtd, cfi_fixup_table);
369
370 #ifdef DEBUG_CFI_FEATURES
371                 /* Tell the user about it in lots of lovely detail */
372                 cfi_tell_features(extp);
373 #endif  
374
375                 if(extp->SuspendCmdSupport & 1) {
376                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
377                 }
378         }
379         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
380                 /* Apply jedec specific fixups */
381                 cfi_fixup(mtd, jedec_fixup_table);
382         }
383         /* Apply generic fixups */
384         cfi_fixup(mtd, fixup_table);
385
386         for (i=0; i< cfi->numchips; i++) {
387                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
388                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
389                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
390                 cfi->chips[i].ref_point_counter = 0;
391         }               
392
393         map->fldrv = &cfi_intelext_chipdrv;
394         
395         return cfi_intelext_setup(mtd);
396 }
397
398 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
399 {
400         struct map_info *map = mtd->priv;
401         struct cfi_private *cfi = map->fldrv_priv;
402         unsigned long offset = 0;
403         int i,j;
404         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
405
406         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
407
408         mtd->size = devsize * cfi->numchips;
409
410         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
411         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
412                         * mtd->numeraseregions, GFP_KERNEL);
413         if (!mtd->eraseregions) { 
414                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
415                 goto setup_err;
416         }
417         
418         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
419                 unsigned long ernum, ersize;
420                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
421                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
422
423                 if (mtd->erasesize < ersize) {
424                         mtd->erasesize = ersize;
425                 }
426                 for (j=0; j<cfi->numchips; j++) {
427                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
428                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
429                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
430                 }
431                 offset += (ersize * ernum);
432         }
433
434         if (offset != devsize) {
435                 /* Argh */
436                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
437                 goto setup_err;
438         }
439
440         for (i=0; i<mtd->numeraseregions;i++){
441                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
442                        i,mtd->eraseregions[i].offset,
443                        mtd->eraseregions[i].erasesize,
444                        mtd->eraseregions[i].numblocks);
445         }
446
447 #ifdef CONFIG_MTD_OTP
448         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
449         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
450         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
451         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
452         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
453         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
454 #endif
455
456         /* This function has the potential to distort the reality
457            a bit and therefore should be called last. */
458         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
459                 goto setup_err;
460
461         __module_get(THIS_MODULE);
462         register_reboot_notifier(&mtd->reboot_notifier);
463         return mtd;
464
465  setup_err:
466         if(mtd) {
467                 if(mtd->eraseregions)
468                         kfree(mtd->eraseregions);
469                 kfree(mtd);
470         }
471         kfree(cfi->cmdset_priv);
472         return NULL;
473 }
474
475 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
476                                         struct cfi_private **pcfi)
477 {
478         struct map_info *map = mtd->priv;
479         struct cfi_private *cfi = *pcfi;
480         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
481
482         /*
483          * Probing of multi-partition flash ships.
484          *
485          * To support multiple partitions when available, we simply arrange
486          * for each of them to have their own flchip structure even if they
487          * are on the same physical chip.  This means completely recreating
488          * a new cfi_private structure right here which is a blatent code
489          * layering violation, but this is still the least intrusive
490          * arrangement at this point. This can be rearranged in the future
491          * if someone feels motivated enough.  --nico
492          */
493         if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
494             && extp->FeatureSupport & (1 << 9)) {
495                 struct cfi_private *newcfi;
496                 struct flchip *chip;
497                 struct flchip_shared *shared;
498                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
499
500                 /* Protection Register info */
501                 offs = (extp->NumProtectionFields - 1) *
502                        sizeof(struct cfi_intelext_otpinfo);
503
504                 /* Burst Read info */
505                 offs += 6;
506
507                 /* Number of partition regions */
508                 numregions = extp->extra[offs];
509                 offs += 1;
510
511                 /* Number of hardware partitions */
512                 numparts = 0;
513                 for (i = 0; i < numregions; i++) {
514                         struct cfi_intelext_regioninfo *rinfo;
515                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
516                         numparts += rinfo->NumIdentPartitions;
517                         offs += sizeof(*rinfo)
518                                 + (rinfo->NumBlockTypes - 1) *
519                                   sizeof(struct cfi_intelext_blockinfo);
520                 }
521
522                 /*
523                  * All functions below currently rely on all chips having
524                  * the same geometry so we'll just assume that all hardware
525                  * partitions are of the same size too.
526                  */
527                 partshift = cfi->chipshift - __ffs(numparts);
528
529                 if ((1 << partshift) < mtd->erasesize) {
530                         printk( KERN_ERR
531                                 "%s: bad number of hw partitions (%d)\n",
532                                 __FUNCTION__, numparts);
533                         return -EINVAL;
534                 }
535
536                 numvirtchips = cfi->numchips * numparts;
537                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
538                 if (!newcfi)
539                         return -ENOMEM;
540                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
541                 if (!shared) {
542                         kfree(newcfi);
543                         return -ENOMEM;
544                 }
545                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
546                 newcfi->numchips = numvirtchips;
547                 newcfi->chipshift = partshift;
548
549                 chip = &newcfi->chips[0];
550                 for (i = 0; i < cfi->numchips; i++) {
551                         shared[i].writing = shared[i].erasing = NULL;
552                         spin_lock_init(&shared[i].lock);
553                         for (j = 0; j < numparts; j++) {
554                                 *chip = cfi->chips[i];
555                                 chip->start += j << partshift;
556                                 chip->priv = &shared[i];
557                                 /* those should be reset too since
558                                    they create memory references. */
559                                 init_waitqueue_head(&chip->wq);
560                                 spin_lock_init(&chip->_spinlock);
561                                 chip->mutex = &chip->_spinlock;
562                                 chip++;
563                         }
564                 }
565
566                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
567                                   "--> %d partitions of %d KiB\n",
568                                   map->name, cfi->numchips, cfi->interleave,
569                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
570
571                 map->fldrv_priv = newcfi;
572                 *pcfi = newcfi;
573                 kfree(cfi);
574         }
575
576         return 0;
577 }
578
579 /*
580  *  *********** CHIP ACCESS FUNCTIONS ***********
581  */
582
583 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
584 {
585         DECLARE_WAITQUEUE(wait, current);
586         struct cfi_private *cfi = map->fldrv_priv;
587         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
588         unsigned long timeo;
589         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
590
591  resettime:
592         timeo = jiffies + HZ;
593  retry:
594         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
595                 /*
596                  * OK. We have possibility for contension on the write/erase
597                  * operations which are global to the real chip and not per
598                  * partition.  So let's fight it over in the partition which
599                  * currently has authority on the operation.
600                  *
601                  * The rules are as follows:
602                  *
603                  * - any write operation must own shared->writing.
604                  *
605                  * - any erase operation must own _both_ shared->writing and
606                  *   shared->erasing.
607                  *
608                  * - contension arbitration is handled in the owner's context.
609                  *
610                  * The 'shared' struct can be read when its lock is taken.
611                  * However any writes to it can only be made when the current
612                  * owner's lock is also held.
613                  */
614                 struct flchip_shared *shared = chip->priv;
615                 struct flchip *contender;
616                 spin_lock(&shared->lock);
617                 contender = shared->writing;
618                 if (contender && contender != chip) {
619                         /*
620                          * The engine to perform desired operation on this
621                          * partition is already in use by someone else.
622                          * Let's fight over it in the context of the chip
623                          * currently using it.  If it is possible to suspend,
624                          * that other partition will do just that, otherwise
625                          * it'll happily send us to sleep.  In any case, when
626                          * get_chip returns success we're clear to go ahead.
627                          */
628                         int ret = spin_trylock(contender->mutex);
629                         spin_unlock(&shared->lock);
630                         if (!ret)
631                                 goto retry;
632                         spin_unlock(chip->mutex);
633                         ret = get_chip(map, contender, contender->start, mode);
634                         spin_lock(chip->mutex);
635                         if (ret) {
636                                 spin_unlock(contender->mutex);
637                                 return ret;
638                         }
639                         timeo = jiffies + HZ;
640                         spin_lock(&shared->lock);
641                 }
642
643                 /* We now own it */
644                 shared->writing = chip;
645                 if (mode == FL_ERASING)
646                         shared->erasing = chip;
647                 if (contender && contender != chip)
648                         spin_unlock(contender->mutex);
649                 spin_unlock(&shared->lock);
650         }
651
652         switch (chip->state) {
653
654         case FL_STATUS:
655                 for (;;) {
656                         status = map_read(map, adr);
657                         if (map_word_andequal(map, status, status_OK, status_OK))
658                                 break;
659
660                         /* At this point we're fine with write operations
661                            in other partitions as they don't conflict. */
662                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
663                                 break;
664
665                         if (time_after(jiffies, timeo)) {
666                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
667                                        status.x[0]);
668                                 return -EIO;
669                         }
670                         spin_unlock(chip->mutex);
671                         cfi_udelay(1);
672                         spin_lock(chip->mutex);
673                         /* Someone else might have been playing with it. */
674                         goto retry;
675                 }
676                                 
677         case FL_READY:
678         case FL_CFI_QUERY:
679         case FL_JEDEC_QUERY:
680                 return 0;
681
682         case FL_ERASING:
683                 if (!cfip ||
684                     !(cfip->FeatureSupport & 2) ||
685                     !(mode == FL_READY || mode == FL_POINT ||
686                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
687                         goto sleep;
688
689
690                 /* Erase suspend */
691                 map_write(map, CMD(0xB0), adr);
692
693                 /* If the flash has finished erasing, then 'erase suspend'
694                  * appears to make some (28F320) flash devices switch to
695                  * 'read' mode.  Make sure that we switch to 'read status'
696                  * mode so we get the right data. --rmk
697                  */
698                 map_write(map, CMD(0x70), adr);
699                 chip->oldstate = FL_ERASING;
700                 chip->state = FL_ERASE_SUSPENDING;
701                 chip->erase_suspended = 1;
702                 for (;;) {
703                         status = map_read(map, adr);
704                         if (map_word_andequal(map, status, status_OK, status_OK))
705                                 break;
706
707                         if (time_after(jiffies, timeo)) {
708                                 /* Urgh. Resume and pretend we weren't here.  */
709                                 map_write(map, CMD(0xd0), adr);
710                                 /* Make sure we're in 'read status' mode if it had finished */
711                                 map_write(map, CMD(0x70), adr);
712                                 chip->state = FL_ERASING;
713                                 chip->oldstate = FL_READY;
714                                 printk(KERN_ERR "Chip not ready after erase "
715                                        "suspended: status = 0x%lx\n", status.x[0]);
716                                 return -EIO;
717                         }
718
719                         spin_unlock(chip->mutex);
720                         cfi_udelay(1);
721                         spin_lock(chip->mutex);
722                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
723                            So we can just loop here. */
724                 }
725                 chip->state = FL_STATUS;
726                 return 0;
727
728         case FL_XIP_WHILE_ERASING:
729                 if (mode != FL_READY && mode != FL_POINT &&
730                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
731                         goto sleep;
732                 chip->oldstate = chip->state;
733                 chip->state = FL_READY;
734                 return 0;
735
736         case FL_POINT:
737                 /* Only if there's no operation suspended... */
738                 if (mode == FL_READY && chip->oldstate == FL_READY)
739                         return 0;
740
741         default:
742         sleep:
743                 set_current_state(TASK_UNINTERRUPTIBLE);
744                 add_wait_queue(&chip->wq, &wait);
745                 spin_unlock(chip->mutex);
746                 schedule();
747                 remove_wait_queue(&chip->wq, &wait);
748                 spin_lock(chip->mutex);
749                 goto resettime;
750         }
751 }
752
753 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
754 {
755         struct cfi_private *cfi = map->fldrv_priv;
756
757         if (chip->priv) {
758                 struct flchip_shared *shared = chip->priv;
759                 spin_lock(&shared->lock);
760                 if (shared->writing == chip && chip->oldstate == FL_READY) {
761                         /* We own the ability to write, but we're done */
762                         shared->writing = shared->erasing;
763                         if (shared->writing && shared->writing != chip) {
764                                 /* give back ownership to who we loaned it from */
765                                 struct flchip *loaner = shared->writing;
766                                 spin_lock(loaner->mutex);
767                                 spin_unlock(&shared->lock);
768                                 spin_unlock(chip->mutex);
769                                 put_chip(map, loaner, loaner->start);
770                                 spin_lock(chip->mutex);
771                                 spin_unlock(loaner->mutex);
772                                 wake_up(&chip->wq);
773                                 return;
774                         }
775                         shared->erasing = NULL;
776                         shared->writing = NULL;
777                 } else if (shared->erasing == chip && shared->writing != chip) {
778                         /*
779                          * We own the ability to erase without the ability
780                          * to write, which means the erase was suspended
781                          * and some other partition is currently writing.
782                          * Don't let the switch below mess things up since
783                          * we don't have ownership to resume anything.
784                          */
785                         spin_unlock(&shared->lock);
786                         wake_up(&chip->wq);
787                         return;
788                 }
789                 spin_unlock(&shared->lock);
790         }
791
792         switch(chip->oldstate) {
793         case FL_ERASING:
794                 chip->state = chip->oldstate;
795                 /* What if one interleaved chip has finished and the 
796                    other hasn't? The old code would leave the finished
797                    one in READY mode. That's bad, and caused -EROFS 
798                    errors to be returned from do_erase_oneblock because
799                    that's the only bit it checked for at the time.
800                    As the state machine appears to explicitly allow 
801                    sending the 0x70 (Read Status) command to an erasing
802                    chip and expecting it to be ignored, that's what we 
803                    do. */
804                 map_write(map, CMD(0xd0), adr);
805                 map_write(map, CMD(0x70), adr);
806                 chip->oldstate = FL_READY;
807                 chip->state = FL_ERASING;
808                 break;
809
810         case FL_XIP_WHILE_ERASING:
811                 chip->state = chip->oldstate;
812                 chip->oldstate = FL_READY;
813                 break;
814
815         case FL_READY:
816         case FL_STATUS:
817         case FL_JEDEC_QUERY:
818                 /* We should really make set_vpp() count, rather than doing this */
819                 DISABLE_VPP(map);
820                 break;
821         default:
822                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
823         }
824         wake_up(&chip->wq);
825 }
826
827 #ifdef CONFIG_MTD_XIP
828
829 /*
830  * No interrupt what so ever can be serviced while the flash isn't in array
831  * mode.  This is ensured by the xip_disable() and xip_enable() functions
832  * enclosing any code path where the flash is known not to be in array mode.
833  * And within a XIP disabled code path, only functions marked with __xipram
834  * may be called and nothing else (it's a good thing to inspect generated
835  * assembly to make sure inline functions were actually inlined and that gcc
836  * didn't emit calls to its own support functions). Also configuring MTD CFI
837  * support to a single buswidth and a single interleave is also recommended.
838  */
839
840 static void xip_disable(struct map_info *map, struct flchip *chip,
841                         unsigned long adr)
842 {
843         /* TODO: chips with no XIP use should ignore and return */
844         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
845         local_irq_disable();
846 }
847
848 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
849                                 unsigned long adr)
850 {
851         struct cfi_private *cfi = map->fldrv_priv;
852         if (chip->state != FL_POINT && chip->state != FL_READY) {
853                 map_write(map, CMD(0xff), adr);
854                 chip->state = FL_READY;
855         }
856         (void) map_read(map, adr);
857         xip_iprefetch();
858         local_irq_enable();
859 }
860
861 /*
862  * When a delay is required for the flash operation to complete, the
863  * xip_udelay() function is polling for both the given timeout and pending
864  * (but still masked) hardware interrupts.  Whenever there is an interrupt
865  * pending then the flash erase or write operation is suspended, array mode
866  * restored and interrupts unmasked.  Task scheduling might also happen at that
867  * point.  The CPU eventually returns from the interrupt or the call to
868  * schedule() and the suspended flash operation is resumed for the remaining
869  * of the delay period.
870  *
871  * Warning: this function _will_ fool interrupt latency tracing tools.
872  */
873
874 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
875                                 unsigned long adr, int usec)
876 {
877         struct cfi_private *cfi = map->fldrv_priv;
878         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
879         map_word status, OK = CMD(0x80);
880         unsigned long suspended, start = xip_currtime();
881         flstate_t oldstate, newstate;
882
883         do {
884                 cpu_relax();
885                 if (xip_irqpending() && cfip &&
886                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
887                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
888                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
889                         /*
890                          * Let's suspend the erase or write operation when
891                          * supported.  Note that we currently don't try to
892                          * suspend interleaved chips if there is already
893                          * another operation suspended (imagine what happens
894                          * when one chip was already done with the current
895                          * operation while another chip suspended it, then
896                          * we resume the whole thing at once).  Yes, it
897                          * can happen!
898                          */
899                         map_write(map, CMD(0xb0), adr);
900                         map_write(map, CMD(0x70), adr);
901                         usec -= xip_elapsed_since(start);
902                         suspended = xip_currtime();
903                         do {
904                                 if (xip_elapsed_since(suspended) > 100000) {
905                                         /*
906                                          * The chip doesn't want to suspend
907                                          * after waiting for 100 msecs.
908                                          * This is a critical error but there
909                                          * is not much we can do here.
910                                          */
911                                         return;
912                                 }
913                                 status = map_read(map, adr);
914                         } while (!map_word_andequal(map, status, OK, OK));
915
916                         /* Suspend succeeded */
917                         oldstate = chip->state;
918                         if (oldstate == FL_ERASING) {
919                                 if (!map_word_bitsset(map, status, CMD(0x40)))
920                                         break;
921                                 newstate = FL_XIP_WHILE_ERASING;
922                                 chip->erase_suspended = 1;
923                         } else {
924                                 if (!map_word_bitsset(map, status, CMD(0x04)))
925                                         break;
926                                 newstate = FL_XIP_WHILE_WRITING;
927                                 chip->write_suspended = 1;
928                         }
929                         chip->state = newstate;
930                         map_write(map, CMD(0xff), adr);
931                         (void) map_read(map, adr);
932                         asm volatile (".rep 8; nop; .endr");
933                         local_irq_enable();
934                         spin_unlock(chip->mutex);
935                         asm volatile (".rep 8; nop; .endr");
936                         cond_resched();
937
938                         /*
939                          * We're back.  However someone else might have
940                          * decided to go write to the chip if we are in
941                          * a suspended erase state.  If so let's wait
942                          * until it's done.
943                          */
944                         spin_lock(chip->mutex);
945                         while (chip->state != newstate) {
946                                 DECLARE_WAITQUEUE(wait, current);
947                                 set_current_state(TASK_UNINTERRUPTIBLE);
948                                 add_wait_queue(&chip->wq, &wait);
949                                 spin_unlock(chip->mutex);
950                                 schedule();
951                                 remove_wait_queue(&chip->wq, &wait);
952                                 spin_lock(chip->mutex);
953                         }
954                         /* Disallow XIP again */
955                         local_irq_disable();
956
957                         /* Resume the write or erase operation */
958                         map_write(map, CMD(0xd0), adr);
959                         map_write(map, CMD(0x70), adr);
960                         chip->state = oldstate;
961                         start = xip_currtime();
962                 } else if (usec >= 1000000/HZ) {
963                         /*
964                          * Try to save on CPU power when waiting delay
965                          * is at least a system timer tick period.
966                          * No need to be extremely accurate here.
967                          */
968                         xip_cpu_idle();
969                 }
970                 status = map_read(map, adr);
971         } while (!map_word_andequal(map, status, OK, OK)
972                  && xip_elapsed_since(start) < usec);
973 }
974
975 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
976
977 /*
978  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
979  * the flash is actively programming or erasing since we have to poll for
980  * the operation to complete anyway.  We can't do that in a generic way with
981  * a XIP setup so do it before the actual flash operation in this case
982  * and stub it out from INVALIDATE_CACHE_UDELAY.
983  */
984 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
985         INVALIDATE_CACHED_RANGE(map, from, size)
986
987 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
988         UDELAY(map, chip, adr, usec)
989
990 /*
991  * Extra notes:
992  *
993  * Activating this XIP support changes the way the code works a bit.  For
994  * example the code to suspend the current process when concurrent access
995  * happens is never executed because xip_udelay() will always return with the
996  * same chip state as it was entered with.  This is why there is no care for
997  * the presence of add_wait_queue() or schedule() calls from within a couple
998  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
999  * The queueing and scheduling are always happening within xip_udelay().
1000  *
1001  * Similarly, get_chip() and put_chip() just happen to always be executed
1002  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1003  * is in array mode, therefore never executing many cases therein and not
1004  * causing any problem with XIP.
1005  */
1006
1007 #else
1008
1009 #define xip_disable(map, chip, adr)
1010 #define xip_enable(map, chip, adr)
1011 #define XIP_INVAL_CACHED_RANGE(x...)
1012
1013 #define UDELAY(map, chip, adr, usec)  \
1014 do {  \
1015         spin_unlock(chip->mutex);  \
1016         cfi_udelay(usec);  \
1017         spin_lock(chip->mutex);  \
1018 } while (0)
1019
1020 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1021 do {  \
1022         spin_unlock(chip->mutex);  \
1023         INVALIDATE_CACHED_RANGE(map, adr, len);  \
1024         cfi_udelay(usec);  \
1025         spin_lock(chip->mutex);  \
1026 } while (0)
1027
1028 #endif
1029
1030 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1031 {
1032         unsigned long cmd_addr;
1033         struct cfi_private *cfi = map->fldrv_priv;
1034         int ret = 0;
1035
1036         adr += chip->start;
1037
1038         /* Ensure cmd read/writes are aligned. */ 
1039         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1040
1041         spin_lock(chip->mutex);
1042
1043         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1044
1045         if (!ret) {
1046                 if (chip->state != FL_POINT && chip->state != FL_READY)
1047                         map_write(map, CMD(0xff), cmd_addr);
1048
1049                 chip->state = FL_POINT;
1050                 chip->ref_point_counter++;
1051         }
1052         spin_unlock(chip->mutex);
1053
1054         return ret;
1055 }
1056
1057 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1058 {
1059         struct map_info *map = mtd->priv;
1060         struct cfi_private *cfi = map->fldrv_priv;
1061         unsigned long ofs;
1062         int chipnum;
1063         int ret = 0;
1064
1065         if (!map->virt || (from + len > mtd->size))
1066                 return -EINVAL;
1067         
1068         *mtdbuf = (void *)map->virt + from;
1069         *retlen = 0;
1070
1071         /* Now lock the chip(s) to POINT state */
1072
1073         /* ofs: offset within the first chip that the first read should start */
1074         chipnum = (from >> cfi->chipshift);
1075         ofs = from - (chipnum << cfi->chipshift);
1076
1077         while (len) {
1078                 unsigned long thislen;
1079
1080                 if (chipnum >= cfi->numchips)
1081                         break;
1082
1083                 if ((len + ofs -1) >> cfi->chipshift)
1084                         thislen = (1<<cfi->chipshift) - ofs;
1085                 else
1086                         thislen = len;
1087
1088                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1089                 if (ret)
1090                         break;
1091
1092                 *retlen += thislen;
1093                 len -= thislen;
1094                 
1095                 ofs = 0;
1096                 chipnum++;
1097         }
1098         return 0;
1099 }
1100
1101 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1102 {
1103         struct map_info *map = mtd->priv;
1104         struct cfi_private *cfi = map->fldrv_priv;
1105         unsigned long ofs;
1106         int chipnum;
1107
1108         /* Now unlock the chip(s) POINT state */
1109
1110         /* ofs: offset within the first chip that the first read should start */
1111         chipnum = (from >> cfi->chipshift);
1112         ofs = from - (chipnum <<  cfi->chipshift);
1113
1114         while (len) {
1115                 unsigned long thislen;
1116                 struct flchip *chip;
1117
1118                 chip = &cfi->chips[chipnum];
1119                 if (chipnum >= cfi->numchips)
1120                         break;
1121
1122                 if ((len + ofs -1) >> cfi->chipshift)
1123                         thislen = (1<<cfi->chipshift) - ofs;
1124                 else
1125                         thislen = len;
1126
1127                 spin_lock(chip->mutex);
1128                 if (chip->state == FL_POINT) {
1129                         chip->ref_point_counter--;
1130                         if(chip->ref_point_counter == 0)
1131                                 chip->state = FL_READY;
1132                 } else
1133                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1134
1135                 put_chip(map, chip, chip->start);
1136                 spin_unlock(chip->mutex);
1137
1138                 len -= thislen;
1139                 ofs = 0;
1140                 chipnum++;
1141         }
1142 }
1143
1144 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1145 {
1146         unsigned long cmd_addr;
1147         struct cfi_private *cfi = map->fldrv_priv;
1148         int ret;
1149
1150         adr += chip->start;
1151
1152         /* Ensure cmd read/writes are aligned. */ 
1153         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1154
1155         spin_lock(chip->mutex);
1156         ret = get_chip(map, chip, cmd_addr, FL_READY);
1157         if (ret) {
1158                 spin_unlock(chip->mutex);
1159                 return ret;
1160         }
1161
1162         if (chip->state != FL_POINT && chip->state != FL_READY) {
1163                 map_write(map, CMD(0xff), cmd_addr);
1164
1165                 chip->state = FL_READY;
1166         }
1167
1168         map_copy_from(map, buf, adr, len);
1169
1170         put_chip(map, chip, cmd_addr);
1171
1172         spin_unlock(chip->mutex);
1173         return 0;
1174 }
1175
1176 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1177 {
1178         struct map_info *map = mtd->priv;
1179         struct cfi_private *cfi = map->fldrv_priv;
1180         unsigned long ofs;
1181         int chipnum;
1182         int ret = 0;
1183
1184         /* ofs: offset within the first chip that the first read should start */
1185         chipnum = (from >> cfi->chipshift);
1186         ofs = from - (chipnum <<  cfi->chipshift);
1187
1188         *retlen = 0;
1189
1190         while (len) {
1191                 unsigned long thislen;
1192
1193                 if (chipnum >= cfi->numchips)
1194                         break;
1195
1196                 if ((len + ofs -1) >> cfi->chipshift)
1197                         thislen = (1<<cfi->chipshift) - ofs;
1198                 else
1199                         thislen = len;
1200
1201                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1202                 if (ret)
1203                         break;
1204
1205                 *retlen += thislen;
1206                 len -= thislen;
1207                 buf += thislen;
1208                 
1209                 ofs = 0;
1210                 chipnum++;
1211         }
1212         return ret;
1213 }
1214
1215 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1216                                      unsigned long adr, map_word datum, int mode)
1217 {
1218         struct cfi_private *cfi = map->fldrv_priv;
1219         map_word status, status_OK, write_cmd;
1220         unsigned long timeo;
1221         int z, ret=0;
1222
1223         adr += chip->start;
1224
1225         /* Let's determine this according to the interleave only once */
1226         status_OK = CMD(0x80);
1227         switch (mode) {
1228         case FL_WRITING:   write_cmd = CMD(0x40); break;
1229         case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1230         default: return -EINVAL;
1231         }
1232
1233         spin_lock(chip->mutex);
1234         ret = get_chip(map, chip, adr, mode);
1235         if (ret) {
1236                 spin_unlock(chip->mutex);
1237                 return ret;
1238         }
1239
1240         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1241         ENABLE_VPP(map);
1242         xip_disable(map, chip, adr);
1243         map_write(map, write_cmd, adr);
1244         map_write(map, datum, adr);
1245         chip->state = mode;
1246
1247         INVALIDATE_CACHE_UDELAY(map, chip,
1248                                 adr, map_bankwidth(map),
1249                                 chip->word_write_time);
1250
1251         timeo = jiffies + (HZ/2);
1252         z = 0;
1253         for (;;) {
1254                 if (chip->state != mode) {
1255                         /* Someone's suspended the write. Sleep */
1256                         DECLARE_WAITQUEUE(wait, current);
1257
1258                         set_current_state(TASK_UNINTERRUPTIBLE);
1259                         add_wait_queue(&chip->wq, &wait);
1260                         spin_unlock(chip->mutex);
1261                         schedule();
1262                         remove_wait_queue(&chip->wq, &wait);
1263                         timeo = jiffies + (HZ / 2); /* FIXME */
1264                         spin_lock(chip->mutex);
1265                         continue;
1266                 }
1267
1268                 status = map_read(map, adr);
1269                 if (map_word_andequal(map, status, status_OK, status_OK))
1270                         break;
1271                 
1272                 /* OK Still waiting */
1273                 if (time_after(jiffies, timeo)) {
1274                         chip->state = FL_STATUS;
1275                         xip_enable(map, chip, adr);
1276                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1277                         ret = -EIO;
1278                         goto out;
1279                 }
1280
1281                 /* Latency issues. Drop the lock, wait a while and retry */
1282                 z++;
1283                 UDELAY(map, chip, adr, 1);
1284         }
1285         if (!z) {
1286                 chip->word_write_time--;
1287                 if (!chip->word_write_time)
1288                         chip->word_write_time++;
1289         }
1290         if (z > 1) 
1291                 chip->word_write_time++;
1292
1293         /* Done and happy. */
1294         chip->state = FL_STATUS;
1295
1296         /* check for lock bit */
1297         if (map_word_bitsset(map, status, CMD(0x02))) {
1298                 /* clear status */
1299                 map_write(map, CMD(0x50), adr);
1300                 /* put back into read status register mode */
1301                 map_write(map, CMD(0x70), adr);
1302                 ret = -EROFS;
1303         }
1304
1305         xip_enable(map, chip, adr);
1306  out:   put_chip(map, chip, adr);
1307         spin_unlock(chip->mutex);
1308
1309         return ret;
1310 }
1311
1312
1313 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1314 {
1315         struct map_info *map = mtd->priv;
1316         struct cfi_private *cfi = map->fldrv_priv;
1317         int ret = 0;
1318         int chipnum;
1319         unsigned long ofs;
1320
1321         *retlen = 0;
1322         if (!len)
1323                 return 0;
1324
1325         chipnum = to >> cfi->chipshift;
1326         ofs = to  - (chipnum << cfi->chipshift);
1327
1328         /* If it's not bus-aligned, do the first byte write */
1329         if (ofs & (map_bankwidth(map)-1)) {
1330                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1331                 int gap = ofs - bus_ofs;
1332                 int n;
1333                 map_word datum;
1334
1335                 n = min_t(int, len, map_bankwidth(map)-gap);
1336                 datum = map_word_ff(map);
1337                 datum = map_word_load_partial(map, datum, buf, gap, n);
1338
1339                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1340                                                bus_ofs, datum, FL_WRITING);
1341                 if (ret) 
1342                         return ret;
1343
1344                 len -= n;
1345                 ofs += n;
1346                 buf += n;
1347                 (*retlen) += n;
1348
1349                 if (ofs >> cfi->chipshift) {
1350                         chipnum ++; 
1351                         ofs = 0;
1352                         if (chipnum == cfi->numchips)
1353                                 return 0;
1354                 }
1355         }
1356         
1357         while(len >= map_bankwidth(map)) {
1358                 map_word datum = map_word_load(map, buf);
1359
1360                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1361                                        ofs, datum, FL_WRITING);
1362                 if (ret)
1363                         return ret;
1364
1365                 ofs += map_bankwidth(map);
1366                 buf += map_bankwidth(map);
1367                 (*retlen) += map_bankwidth(map);
1368                 len -= map_bankwidth(map);
1369
1370                 if (ofs >> cfi->chipshift) {
1371                         chipnum ++; 
1372                         ofs = 0;
1373                         if (chipnum == cfi->numchips)
1374                                 return 0;
1375                 }
1376         }
1377
1378         if (len & (map_bankwidth(map)-1)) {
1379                 map_word datum;
1380
1381                 datum = map_word_ff(map);
1382                 datum = map_word_load_partial(map, datum, buf, 0, len);
1383
1384                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1385                                        ofs, datum, FL_WRITING);
1386                 if (ret) 
1387                         return ret;
1388                 
1389                 (*retlen) += len;
1390         }
1391
1392         return 0;
1393 }
1394
1395
1396 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 
1397                                     unsigned long adr, const u_char *buf, int len)
1398 {
1399         struct cfi_private *cfi = map->fldrv_priv;
1400         map_word status, status_OK;
1401         unsigned long cmd_adr, timeo;
1402         int wbufsize, z, ret=0, bytes, words;
1403
1404         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1405         adr += chip->start;
1406         cmd_adr = adr & ~(wbufsize-1);
1407         
1408         /* Let's determine this according to the interleave only once */
1409         status_OK = CMD(0x80);
1410
1411         spin_lock(chip->mutex);
1412         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1413         if (ret) {
1414                 spin_unlock(chip->mutex);
1415                 return ret;
1416         }
1417
1418         XIP_INVAL_CACHED_RANGE(map, adr, len);
1419         ENABLE_VPP(map);
1420         xip_disable(map, chip, cmd_adr);
1421
1422         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1423            [...], the device will not accept any more Write to Buffer commands". 
1424            So we must check here and reset those bits if they're set. Otherwise
1425            we're just pissing in the wind */
1426         if (chip->state != FL_STATUS)
1427                 map_write(map, CMD(0x70), cmd_adr);
1428         status = map_read(map, cmd_adr);
1429         if (map_word_bitsset(map, status, CMD(0x30))) {
1430                 xip_enable(map, chip, cmd_adr);
1431                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1432                 xip_disable(map, chip, cmd_adr);
1433                 map_write(map, CMD(0x50), cmd_adr);
1434                 map_write(map, CMD(0x70), cmd_adr);
1435         }
1436
1437         chip->state = FL_WRITING_TO_BUFFER;
1438
1439         z = 0;
1440         for (;;) {
1441                 map_write(map, CMD(0xe8), cmd_adr);
1442
1443                 status = map_read(map, cmd_adr);
1444                 if (map_word_andequal(map, status, status_OK, status_OK))
1445                         break;
1446
1447                 UDELAY(map, chip, cmd_adr, 1);
1448
1449                 if (++z > 20) {
1450                         /* Argh. Not ready for write to buffer */
1451                         map_word Xstatus;
1452                         map_write(map, CMD(0x70), cmd_adr);
1453                         chip->state = FL_STATUS;
1454                         Xstatus = map_read(map, cmd_adr);
1455                         /* Odd. Clear status bits */
1456                         map_write(map, CMD(0x50), cmd_adr);
1457                         map_write(map, CMD(0x70), cmd_adr);
1458                         xip_enable(map, chip, cmd_adr);
1459                         printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1460                                status.x[0], Xstatus.x[0]);
1461                         ret = -EIO;
1462                         goto out;
1463                 }
1464         }
1465
1466         /* Write length of data to come */
1467         bytes = len & (map_bankwidth(map)-1);
1468         words = len / map_bankwidth(map);
1469         map_write(map, CMD(words - !bytes), cmd_adr );
1470
1471         /* Write data */
1472         z = 0;
1473         while(z < words * map_bankwidth(map)) {
1474                 map_word datum = map_word_load(map, buf);
1475                 map_write(map, datum, adr+z);
1476
1477                 z += map_bankwidth(map);
1478                 buf += map_bankwidth(map);
1479         }
1480
1481         if (bytes) {
1482                 map_word datum;
1483
1484                 datum = map_word_ff(map);
1485                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1486                 map_write(map, datum, adr+z);
1487         }
1488
1489         /* GO GO GO */
1490         map_write(map, CMD(0xd0), cmd_adr);
1491         chip->state = FL_WRITING;
1492
1493         INVALIDATE_CACHE_UDELAY(map, chip, 
1494                                 cmd_adr, len,
1495                                 chip->buffer_write_time);
1496
1497         timeo = jiffies + (HZ/2);
1498         z = 0;
1499         for (;;) {
1500                 if (chip->state != FL_WRITING) {
1501                         /* Someone's suspended the write. Sleep */
1502                         DECLARE_WAITQUEUE(wait, current);
1503                         set_current_state(TASK_UNINTERRUPTIBLE);
1504                         add_wait_queue(&chip->wq, &wait);
1505                         spin_unlock(chip->mutex);
1506                         schedule();
1507                         remove_wait_queue(&chip->wq, &wait);
1508                         timeo = jiffies + (HZ / 2); /* FIXME */
1509                         spin_lock(chip->mutex);
1510                         continue;
1511                 }
1512
1513                 status = map_read(map, cmd_adr);
1514                 if (map_word_andequal(map, status, status_OK, status_OK))
1515                         break;
1516
1517                 /* OK Still waiting */
1518                 if (time_after(jiffies, timeo)) {
1519                         chip->state = FL_STATUS;
1520                         xip_enable(map, chip, cmd_adr);
1521                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1522                         ret = -EIO;
1523                         goto out;
1524                 }
1525                 
1526                 /* Latency issues. Drop the lock, wait a while and retry */
1527                 z++;
1528                 UDELAY(map, chip, cmd_adr, 1);
1529         }
1530         if (!z) {
1531                 chip->buffer_write_time--;
1532                 if (!chip->buffer_write_time)
1533                         chip->buffer_write_time++;
1534         }
1535         if (z > 1) 
1536                 chip->buffer_write_time++;
1537
1538         /* Done and happy. */
1539         chip->state = FL_STATUS;
1540
1541         /* check for lock bit */
1542         if (map_word_bitsset(map, status, CMD(0x02))) {
1543                 /* clear status */
1544                 map_write(map, CMD(0x50), cmd_adr);
1545                 /* put back into read status register mode */
1546                 map_write(map, CMD(0x70), adr);
1547                 ret = -EROFS;
1548         }
1549
1550         xip_enable(map, chip, cmd_adr);
1551  out:   put_chip(map, chip, cmd_adr);
1552         spin_unlock(chip->mutex);
1553         return ret;
1554 }
1555
1556 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1557                                        size_t len, size_t *retlen, const u_char *buf)
1558 {
1559         struct map_info *map = mtd->priv;
1560         struct cfi_private *cfi = map->fldrv_priv;
1561         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1562         int ret = 0;
1563         int chipnum;
1564         unsigned long ofs;
1565
1566         *retlen = 0;
1567         if (!len)
1568                 return 0;
1569
1570         chipnum = to >> cfi->chipshift;
1571         ofs = to  - (chipnum << cfi->chipshift);
1572
1573         /* If it's not bus-aligned, do the first word write */
1574         if (ofs & (map_bankwidth(map)-1)) {
1575                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1576                 if (local_len > len)
1577                         local_len = len;
1578                 ret = cfi_intelext_write_words(mtd, to, local_len,
1579                                                retlen, buf);
1580                 if (ret)
1581                         return ret;
1582                 ofs += local_len;
1583                 buf += local_len;
1584                 len -= local_len;
1585
1586                 if (ofs >> cfi->chipshift) {
1587                         chipnum ++;
1588                         ofs = 0;
1589                         if (chipnum == cfi->numchips)
1590                                 return 0;
1591                 }
1592         }
1593
1594         while(len) {
1595                 /* We must not cross write block boundaries */
1596                 int size = wbufsize - (ofs & (wbufsize-1));
1597
1598                 if (size > len)
1599                         size = len;
1600                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1601                                       ofs, buf, size);
1602                 if (ret)
1603                         return ret;
1604
1605                 ofs += size;
1606                 buf += size;
1607                 (*retlen) += size;
1608                 len -= size;
1609
1610                 if (ofs >> cfi->chipshift) {
1611                         chipnum ++; 
1612                         ofs = 0;
1613                         if (chipnum == cfi->numchips)
1614                                 return 0;
1615                 }
1616         }
1617         return 0;
1618 }
1619
1620 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1621                                       unsigned long adr, int len, void *thunk)
1622 {
1623         struct cfi_private *cfi = map->fldrv_priv;
1624         map_word status, status_OK;
1625         unsigned long timeo;
1626         int retries = 3;
1627         DECLARE_WAITQUEUE(wait, current);
1628         int ret = 0;
1629
1630         adr += chip->start;
1631
1632         /* Let's determine this according to the interleave only once */
1633         status_OK = CMD(0x80);
1634
1635  retry:
1636         spin_lock(chip->mutex);
1637         ret = get_chip(map, chip, adr, FL_ERASING);
1638         if (ret) {
1639                 spin_unlock(chip->mutex);
1640                 return ret;
1641         }
1642
1643         XIP_INVAL_CACHED_RANGE(map, adr, len);
1644         ENABLE_VPP(map);
1645         xip_disable(map, chip, adr);
1646
1647         /* Clear the status register first */
1648         map_write(map, CMD(0x50), adr);
1649
1650         /* Now erase */
1651         map_write(map, CMD(0x20), adr);
1652         map_write(map, CMD(0xD0), adr);
1653         chip->state = FL_ERASING;
1654         chip->erase_suspended = 0;
1655
1656         INVALIDATE_CACHE_UDELAY(map, chip,
1657                                 adr, len,
1658                                 chip->erase_time*1000/2);
1659
1660         /* FIXME. Use a timer to check this, and return immediately. */
1661         /* Once the state machine's known to be working I'll do that */
1662
1663         timeo = jiffies + (HZ*20);
1664         for (;;) {
1665                 if (chip->state != FL_ERASING) {
1666                         /* Someone's suspended the erase. Sleep */
1667                         set_current_state(TASK_UNINTERRUPTIBLE);
1668                         add_wait_queue(&chip->wq, &wait);
1669                         spin_unlock(chip->mutex);
1670                         schedule();
1671                         remove_wait_queue(&chip->wq, &wait);
1672                         spin_lock(chip->mutex);
1673                         continue;
1674                 }
1675                 if (chip->erase_suspended) {
1676                         /* This erase was suspended and resumed.
1677                            Adjust the timeout */
1678                         timeo = jiffies + (HZ*20); /* FIXME */
1679                         chip->erase_suspended = 0;
1680                 }
1681
1682                 status = map_read(map, adr);
1683                 if (map_word_andequal(map, status, status_OK, status_OK))
1684                         break;
1685                 
1686                 /* OK Still waiting */
1687                 if (time_after(jiffies, timeo)) {
1688                         map_word Xstatus;
1689                         map_write(map, CMD(0x70), adr);
1690                         chip->state = FL_STATUS;
1691                         Xstatus = map_read(map, adr);
1692                         /* Clear status bits */
1693                         map_write(map, CMD(0x50), adr);
1694                         map_write(map, CMD(0x70), adr);
1695                         xip_enable(map, chip, adr);
1696                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1697                                adr, status.x[0], Xstatus.x[0]);
1698                         ret = -EIO;
1699                         goto out;
1700                 }
1701                 
1702                 /* Latency issues. Drop the lock, wait a while and retry */
1703                 UDELAY(map, chip, adr, 1000000/HZ);
1704         }
1705
1706         /* We've broken this before. It doesn't hurt to be safe */
1707         map_write(map, CMD(0x70), adr);
1708         chip->state = FL_STATUS;
1709         status = map_read(map, adr);
1710
1711         /* check for lock bit */
1712         if (map_word_bitsset(map, status, CMD(0x3a))) {
1713                 unsigned long chipstatus;
1714
1715                 /* Reset the error bits */
1716                 map_write(map, CMD(0x50), adr);
1717                 map_write(map, CMD(0x70), adr);
1718                 xip_enable(map, chip, adr);
1719
1720                 chipstatus = MERGESTATUS(status);
1721
1722                 if ((chipstatus & 0x30) == 0x30) {
1723                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
1724                         ret = -EIO;
1725                 } else if (chipstatus & 0x02) {
1726                         /* Protection bit set */
1727                         ret = -EROFS;
1728                 } else if (chipstatus & 0x8) {
1729                         /* Voltage */
1730                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
1731                         ret = -EIO;
1732                 } else if (chipstatus & 0x20) {
1733                         if (retries--) {
1734                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1735                                 timeo = jiffies + HZ;
1736                                 put_chip(map, chip, adr);
1737                                 spin_unlock(chip->mutex);
1738                                 goto retry;
1739                         }
1740                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1741                         ret = -EIO;
1742                 }
1743         } else {
1744                 xip_enable(map, chip, adr);
1745                 ret = 0;
1746         }
1747
1748  out:   put_chip(map, chip, adr);
1749         spin_unlock(chip->mutex);
1750         return ret;
1751 }
1752
1753 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1754 {
1755         unsigned long ofs, len;
1756         int ret;
1757
1758         ofs = instr->addr;
1759         len = instr->len;
1760
1761         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1762         if (ret)
1763                 return ret;
1764
1765         instr->state = MTD_ERASE_DONE;
1766         mtd_erase_callback(instr);
1767         
1768         return 0;
1769 }
1770
1771 static void cfi_intelext_sync (struct mtd_info *mtd)
1772 {
1773         struct map_info *map = mtd->priv;
1774         struct cfi_private *cfi = map->fldrv_priv;
1775         int i;
1776         struct flchip *chip;
1777         int ret = 0;
1778
1779         for (i=0; !ret && i<cfi->numchips; i++) {
1780                 chip = &cfi->chips[i];
1781
1782                 spin_lock(chip->mutex);
1783                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1784
1785                 if (!ret) {
1786                         chip->oldstate = chip->state;
1787                         chip->state = FL_SYNCING;
1788                         /* No need to wake_up() on this state change - 
1789                          * as the whole point is that nobody can do anything
1790                          * with the chip now anyway.
1791                          */
1792                 }
1793                 spin_unlock(chip->mutex);
1794         }
1795
1796         /* Unlock the chips again */
1797
1798         for (i--; i >=0; i--) {
1799                 chip = &cfi->chips[i];
1800
1801                 spin_lock(chip->mutex);
1802                 
1803                 if (chip->state == FL_SYNCING) {
1804                         chip->state = chip->oldstate;
1805                         chip->oldstate = FL_READY;
1806                         wake_up(&chip->wq);
1807                 }
1808                 spin_unlock(chip->mutex);
1809         }
1810 }
1811
1812 #ifdef DEBUG_LOCK_BITS
1813 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1814                                                 struct flchip *chip,
1815                                                 unsigned long adr,
1816                                                 int len, void *thunk)
1817 {
1818         struct cfi_private *cfi = map->fldrv_priv;
1819         int status, ofs_factor = cfi->interleave * cfi->device_type;
1820
1821         adr += chip->start;
1822         xip_disable(map, chip, adr+(2*ofs_factor));
1823         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1824         chip->state = FL_JEDEC_QUERY;
1825         status = cfi_read_query(map, adr+(2*ofs_factor));
1826         xip_enable(map, chip, 0);
1827         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1828                adr, status);
1829         return 0;
1830 }
1831 #endif
1832
1833 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1834 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1835
1836 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1837                                        unsigned long adr, int len, void *thunk)
1838 {
1839         struct cfi_private *cfi = map->fldrv_priv;
1840         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1841         map_word status, status_OK;
1842         unsigned long timeo = jiffies + HZ;
1843         int ret;
1844
1845         adr += chip->start;
1846
1847         /* Let's determine this according to the interleave only once */
1848         status_OK = CMD(0x80);
1849
1850         spin_lock(chip->mutex);
1851         ret = get_chip(map, chip, adr, FL_LOCKING);
1852         if (ret) {
1853                 spin_unlock(chip->mutex);
1854                 return ret;
1855         }
1856
1857         ENABLE_VPP(map);
1858         xip_disable(map, chip, adr);
1859         
1860         map_write(map, CMD(0x60), adr);
1861         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1862                 map_write(map, CMD(0x01), adr);
1863                 chip->state = FL_LOCKING;
1864         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1865                 map_write(map, CMD(0xD0), adr);
1866                 chip->state = FL_UNLOCKING;
1867         } else
1868                 BUG();
1869
1870         /*
1871          * If Instant Individual Block Locking supported then no need
1872          * to delay.
1873          */
1874
1875         if (!extp || !(extp->FeatureSupport & (1 << 5)))
1876                 UDELAY(map, chip, adr, 1000000/HZ);
1877
1878         /* FIXME. Use a timer to check this, and return immediately. */
1879         /* Once the state machine's known to be working I'll do that */
1880
1881         timeo = jiffies + (HZ*20);
1882         for (;;) {
1883
1884                 status = map_read(map, adr);
1885                 if (map_word_andequal(map, status, status_OK, status_OK))
1886                         break;
1887                 
1888                 /* OK Still waiting */
1889                 if (time_after(jiffies, timeo)) {
1890                         map_word Xstatus;
1891                         map_write(map, CMD(0x70), adr);
1892                         chip->state = FL_STATUS;
1893                         Xstatus = map_read(map, adr);
1894                         xip_enable(map, chip, adr);
1895                         printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1896                                status.x[0], Xstatus.x[0]);
1897                         put_chip(map, chip, adr);
1898                         spin_unlock(chip->mutex);
1899                         return -EIO;
1900                 }
1901                 
1902                 /* Latency issues. Drop the lock, wait a while and retry */
1903                 UDELAY(map, chip, adr, 1);
1904         }
1905         
1906         /* Done and happy. */
1907         chip->state = FL_STATUS;
1908         xip_enable(map, chip, adr);
1909         put_chip(map, chip, adr);
1910         spin_unlock(chip->mutex);
1911         return 0;
1912 }
1913
1914 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1915 {
1916         int ret;
1917
1918 #ifdef DEBUG_LOCK_BITS
1919         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1920                __FUNCTION__, ofs, len);
1921         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1922                 ofs, len, 0);
1923 #endif
1924
1925         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 
1926                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1927         
1928 #ifdef DEBUG_LOCK_BITS
1929         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1930                __FUNCTION__, ret);
1931         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1932                 ofs, len, 0);
1933 #endif
1934
1935         return ret;
1936 }
1937
1938 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1939 {
1940         int ret;
1941
1942 #ifdef DEBUG_LOCK_BITS
1943         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1944                __FUNCTION__, ofs, len);
1945         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1946                 ofs, len, 0);
1947 #endif
1948
1949         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1950                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1951         
1952 #ifdef DEBUG_LOCK_BITS
1953         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1954                __FUNCTION__, ret);
1955         cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 
1956                 ofs, len, 0);
1957 #endif
1958         
1959         return ret;
1960 }
1961
1962 #ifdef CONFIG_MTD_OTP
1963
1964 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 
1965                         u_long data_offset, u_char *buf, u_int size,
1966                         u_long prot_offset, u_int groupno, u_int groupsize);
1967
1968 static int __xipram
1969 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1970             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1971 {
1972         struct cfi_private *cfi = map->fldrv_priv;
1973         int ret;
1974
1975         spin_lock(chip->mutex);
1976         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1977         if (ret) {
1978                 spin_unlock(chip->mutex);
1979                 return ret;
1980         }
1981
1982         /* let's ensure we're not reading back cached data from array mode */
1983         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1984
1985         xip_disable(map, chip, chip->start);
1986         if (chip->state != FL_JEDEC_QUERY) {
1987                 map_write(map, CMD(0x90), chip->start);
1988                 chip->state = FL_JEDEC_QUERY;
1989         }
1990         map_copy_from(map, buf, chip->start + offset, size);
1991         xip_enable(map, chip, chip->start);
1992
1993         /* then ensure we don't keep OTP data in the cache */
1994         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1995
1996         put_chip(map, chip, chip->start);
1997         spin_unlock(chip->mutex);
1998         return 0;
1999 }
2000
2001 static int
2002 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2003              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2004 {
2005         int ret;
2006
2007         while (size) {
2008                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2009                 int gap = offset - bus_ofs;
2010                 int n = min_t(int, size, map_bankwidth(map)-gap);
2011                 map_word datum = map_word_ff(map);
2012
2013                 datum = map_word_load_partial(map, datum, buf, gap, n);
2014                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2015                 if (ret) 
2016                         return ret;
2017
2018                 offset += n;
2019                 buf += n;
2020                 size -= n;
2021         }
2022
2023         return 0;
2024 }
2025
2026 static int
2027 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2028             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2029 {
2030         struct cfi_private *cfi = map->fldrv_priv;
2031         map_word datum;
2032
2033         /* make sure area matches group boundaries */
2034         if (size != grpsz)
2035                 return -EXDEV;
2036
2037         datum = map_word_ff(map);
2038         datum = map_word_clr(map, datum, CMD(1 << grpno));
2039         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2040 }
2041
2042 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2043                                  size_t *retlen, u_char *buf,
2044                                  otp_op_t action, int user_regs)
2045 {
2046         struct map_info *map = mtd->priv;
2047         struct cfi_private *cfi = map->fldrv_priv;
2048         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2049         struct flchip *chip;
2050         struct cfi_intelext_otpinfo *otp;
2051         u_long devsize, reg_prot_offset, data_offset;
2052         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2053         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2054         int ret;
2055
2056         *retlen = 0;
2057
2058         /* Check that we actually have some OTP registers */
2059         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2060                 return -ENODATA;
2061
2062         /* we need real chips here not virtual ones */
2063         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2064         chip_step = devsize >> cfi->chipshift;
2065         chip_num = 0;
2066
2067         /* Some chips have OTP located in the _top_ partition only.
2068            For example: Intel 28F256L18T (T means top-parameter device) */
2069         if (cfi->mfr == MANUFACTURER_INTEL) {
2070                 switch (cfi->id) {
2071                 case 0x880b:
2072                 case 0x880c:
2073                 case 0x880d:
2074                         chip_num = chip_step - 1;
2075                 }
2076         }
2077
2078         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2079                 chip = &cfi->chips[chip_num];
2080                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2081
2082                 /* first OTP region */
2083                 field = 0;
2084                 reg_prot_offset = extp->ProtRegAddr;
2085                 reg_fact_groups = 1;
2086                 reg_fact_size = 1 << extp->FactProtRegSize;
2087                 reg_user_groups = 1;
2088                 reg_user_size = 1 << extp->UserProtRegSize;
2089
2090                 while (len > 0) {
2091                         /* flash geometry fixup */
2092                         data_offset = reg_prot_offset + 1;
2093                         data_offset *= cfi->interleave * cfi->device_type;
2094                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2095                         reg_fact_size *= cfi->interleave;
2096                         reg_user_size *= cfi->interleave;
2097
2098                         if (user_regs) {
2099                                 groups = reg_user_groups;
2100                                 groupsize = reg_user_size;
2101                                 /* skip over factory reg area */
2102                                 groupno = reg_fact_groups;
2103                                 data_offset += reg_fact_groups * reg_fact_size;
2104                         } else {
2105                                 groups = reg_fact_groups;
2106                                 groupsize = reg_fact_size;
2107                                 groupno = 0;
2108                         }
2109
2110                         while (len > 0 && groups > 0) {
2111                                 if (!action) {
2112                                         /*
2113                                          * Special case: if action is NULL
2114                                          * we fill buf with otp_info records.
2115                                          */
2116                                         struct otp_info *otpinfo;
2117                                         map_word lockword;
2118                                         len -= sizeof(struct otp_info);
2119                                         if (len <= 0)
2120                                                 return -ENOSPC;
2121                                         ret = do_otp_read(map, chip,
2122                                                           reg_prot_offset,
2123                                                           (u_char *)&lockword,
2124                                                           map_bankwidth(map),
2125                                                           0, 0,  0);
2126                                         if (ret)
2127                                                 return ret;
2128                                         otpinfo = (struct otp_info *)buf;
2129                                         otpinfo->start = from;
2130                                         otpinfo->length = groupsize;
2131                                         otpinfo->locked =
2132                                            !map_word_bitsset(map, lockword,
2133                                                              CMD(1 << groupno));
2134                                         from += groupsize;
2135                                         buf += sizeof(*otpinfo);
2136                                         *retlen += sizeof(*otpinfo);
2137                                 } else if (from >= groupsize) {
2138                                         from -= groupsize;
2139                                         data_offset += groupsize;
2140                                 } else {
2141                                         int size = groupsize;
2142                                         data_offset += from;
2143                                         size -= from;
2144                                         from = 0;
2145                                         if (size > len)
2146                                                 size = len;
2147                                         ret = action(map, chip, data_offset,
2148                                                      buf, size, reg_prot_offset,
2149                                                      groupno, groupsize);
2150                                         if (ret < 0)
2151                                                 return ret;
2152                                         buf += size;
2153                                         len -= size;
2154                                         *retlen += size;
2155                                         data_offset += size;
2156                                 }
2157                                 groupno++;
2158                                 groups--;
2159                         }
2160
2161                         /* next OTP region */
2162                         if (++field == extp->NumProtectionFields)
2163                                 break;
2164                         reg_prot_offset = otp->ProtRegAddr;
2165                         reg_fact_groups = otp->FactGroups;
2166                         reg_fact_size = 1 << otp->FactProtRegSize;
2167                         reg_user_groups = otp->UserGroups;
2168                         reg_user_size = 1 << otp->UserProtRegSize;
2169                         otp++;
2170                 }
2171         }
2172
2173         return 0;
2174 }
2175
2176 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2177                                            size_t len, size_t *retlen,
2178                                             u_char *buf)
2179 {
2180         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2181                                      buf, do_otp_read, 0);
2182 }
2183
2184 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2185                                            size_t len, size_t *retlen,
2186                                             u_char *buf)
2187 {
2188         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2189                                      buf, do_otp_read, 1);
2190 }
2191
2192 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2193                                             size_t len, size_t *retlen,
2194                                              u_char *buf)
2195 {
2196         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2197                                      buf, do_otp_write, 1);
2198 }
2199
2200 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2201                                            loff_t from, size_t len)
2202 {
2203         size_t retlen;
2204         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2205                                      NULL, do_otp_lock, 1);
2206 }
2207
2208 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 
2209                                            struct otp_info *buf, size_t len)
2210 {
2211         size_t retlen;
2212         int ret;
2213
2214         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2215         return ret ? : retlen;
2216 }
2217
2218 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2219                                            struct otp_info *buf, size_t len)
2220 {
2221         size_t retlen;
2222         int ret;
2223
2224         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2225         return ret ? : retlen;
2226 }
2227
2228 #endif
2229
2230 static int cfi_intelext_suspend(struct mtd_info *mtd)
2231 {
2232         struct map_info *map = mtd->priv;
2233         struct cfi_private *cfi = map->fldrv_priv;
2234         int i;
2235         struct flchip *chip;
2236         int ret = 0;
2237
2238         for (i=0; !ret && i<cfi->numchips; i++) {
2239                 chip = &cfi->chips[i];
2240
2241                 spin_lock(chip->mutex);
2242
2243                 switch (chip->state) {
2244                 case FL_READY:
2245                 case FL_STATUS:
2246                 case FL_CFI_QUERY:
2247                 case FL_JEDEC_QUERY:
2248                         if (chip->oldstate == FL_READY) {
2249                                 chip->oldstate = chip->state;
2250                                 chip->state = FL_PM_SUSPENDED;
2251                                 /* No need to wake_up() on this state change - 
2252                                  * as the whole point is that nobody can do anything
2253                                  * with the chip now anyway.
2254                                  */
2255                         } else {
2256                                 /* There seems to be an operation pending. We must wait for it. */
2257                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2258                                 ret = -EAGAIN;
2259                         }
2260                         break;
2261                 default:
2262                         /* Should we actually wait? Once upon a time these routines weren't
2263                            allowed to. Or should we return -EAGAIN, because the upper layers
2264                            ought to have already shut down anything which was using the device
2265                            anyway? The latter for now. */
2266                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2267                         ret = -EAGAIN;
2268                 case FL_PM_SUSPENDED:
2269                         break;
2270                 }
2271                 spin_unlock(chip->mutex);
2272         }
2273
2274         /* Unlock the chips again */
2275
2276         if (ret) {
2277                 for (i--; i >=0; i--) {
2278                         chip = &cfi->chips[i];
2279                         
2280                         spin_lock(chip->mutex);
2281                         
2282                         if (chip->state == FL_PM_SUSPENDED) {
2283                                 /* No need to force it into a known state here,
2284                                    because we're returning failure, and it didn't
2285                                    get power cycled */
2286                                 chip->state = chip->oldstate;
2287                                 chip->oldstate = FL_READY;
2288                                 wake_up(&chip->wq);
2289                         }
2290                         spin_unlock(chip->mutex);
2291                 }
2292         } 
2293         
2294         return ret;
2295 }
2296
2297 static void cfi_intelext_resume(struct mtd_info *mtd)
2298 {
2299         struct map_info *map = mtd->priv;
2300         struct cfi_private *cfi = map->fldrv_priv;
2301         int i;
2302         struct flchip *chip;
2303
2304         for (i=0; i<cfi->numchips; i++) {
2305         
2306                 chip = &cfi->chips[i];
2307
2308                 spin_lock(chip->mutex);
2309                 
2310                 /* Go to known state. Chip may have been power cycled */
2311                 if (chip->state == FL_PM_SUSPENDED) {
2312                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2313                         chip->oldstate = chip->state = FL_READY;
2314                         wake_up(&chip->wq);
2315                 }
2316
2317                 spin_unlock(chip->mutex);
2318         }
2319 }
2320
2321 static int cfi_intelext_reset(struct mtd_info *mtd)
2322 {
2323         struct map_info *map = mtd->priv;
2324         struct cfi_private *cfi = map->fldrv_priv;
2325         int i, ret;
2326
2327         for (i=0; i < cfi->numchips; i++) {
2328                 struct flchip *chip = &cfi->chips[i];
2329
2330                 /* force the completion of any ongoing operation
2331                    and switch to array mode so any bootloader in 
2332                    flash is accessible for soft reboot. */
2333                 spin_lock(chip->mutex);
2334                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2335                 if (!ret) {
2336                         map_write(map, CMD(0xff), chip->start);
2337                         chip->state = FL_READY;
2338                 }
2339                 spin_unlock(chip->mutex);
2340         }
2341
2342         return 0;
2343 }
2344
2345 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2346                                void *v)
2347 {
2348         struct mtd_info *mtd;
2349
2350         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2351         cfi_intelext_reset(mtd);
2352         return NOTIFY_DONE;
2353 }
2354
2355 static void cfi_intelext_destroy(struct mtd_info *mtd)
2356 {
2357         struct map_info *map = mtd->priv;
2358         struct cfi_private *cfi = map->fldrv_priv;
2359         cfi_intelext_reset(mtd);
2360         unregister_reboot_notifier(&mtd->reboot_notifier);
2361         kfree(cfi->cmdset_priv);
2362         kfree(cfi->cfiq);
2363         kfree(cfi->chips[0].priv);
2364         kfree(cfi);
2365         kfree(mtd->eraseregions);
2366 }
2367
2368 static char im_name_1[]="cfi_cmdset_0001";
2369 static char im_name_3[]="cfi_cmdset_0003";
2370
2371 static int __init cfi_intelext_init(void)
2372 {
2373         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2374         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2375         return 0;
2376 }
2377
2378 static void __exit cfi_intelext_exit(void)
2379 {
2380         inter_module_unregister(im_name_1);
2381         inter_module_unregister(im_name_3);
2382 }
2383
2384 module_init(cfi_intelext_init);
2385 module_exit(cfi_intelext_exit);
2386
2387 MODULE_LICENSE("GPL");
2388 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2389 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");