2 * MTD device concatenation layer
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
8 * $Id: mtdconcat.c,v 1.1.1.1 2005/04/11 02:50:25 jack Exp $
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/concat.h>
20 * Our storage structure:
21 * Subdev points to an array of pointers to struct mtd_info objects
22 * which is allocated along with this structure
28 struct mtd_info **subdev;
32 * how to calculate the size required for the above structure,
33 * including the pointer array subdev points to:
35 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
36 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
40 * Given a pointer to the MTD object in the mtd_concat structure,
41 * we can retrieve the pointer to that structure with this macro.
43 #define CONCAT(x) ((struct mtd_concat *)(x))
47 * MTD methods which look up the relevant subdevice, translate the
48 * effective address and pass through to the subdevice.
51 static int concat_read (struct mtd_info *mtd, loff_t from, size_t len,
52 size_t *retlen, u_char *buf)
54 struct mtd_concat *concat = CONCAT(mtd);
60 for(i = 0; i < concat->num_subdev; i++)
62 struct mtd_info *subdev = concat->subdev[i];
65 if (from >= subdev->size)
72 if (from + len > subdev->size)
73 size = subdev->size - from;
77 err = subdev->read(subdev, from, size, &retsize, buf);
95 static int concat_write (struct mtd_info *mtd, loff_t to, size_t len,
96 size_t *retlen, const u_char *buf)
98 struct mtd_concat *concat = CONCAT(mtd);
102 if (!(mtd->flags & MTD_WRITEABLE))
107 for(i = 0; i < concat->num_subdev; i++)
109 struct mtd_info *subdev = concat->subdev[i];
110 size_t size, retsize;
112 if (to >= subdev->size)
119 if (to + len > subdev->size)
120 size = subdev->size - to;
124 if (!(subdev->flags & MTD_WRITEABLE))
127 err = subdev->write(subdev, to, size, &retsize, buf);
145 static void concat_erase_callback (struct erase_info *instr)
147 wake_up((wait_queue_head_t *)instr->priv);
150 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
153 wait_queue_head_t waitq;
154 DECLARE_WAITQUEUE(wait, current);
157 * This code was stol^H^H^H^Hinspired by mtdchar.c
159 init_waitqueue_head(&waitq);
162 erase->callback = concat_erase_callback;
163 erase->priv = (unsigned long)&waitq;
166 * FIXME: Allow INTERRUPTIBLE. Which means
167 * not having the wait_queue head on the stack.
169 err = mtd->erase(mtd, erase);
172 set_current_state(TASK_UNINTERRUPTIBLE);
173 add_wait_queue(&waitq, &wait);
174 if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED)
176 remove_wait_queue(&waitq, &wait);
177 set_current_state(TASK_RUNNING);
179 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
184 static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
186 struct mtd_concat *concat = CONCAT(mtd);
187 struct mtd_info *subdev;
190 struct erase_info *erase;
192 if (!(mtd->flags & MTD_WRITEABLE))
195 if(instr->addr > concat->mtd.size)
198 if(instr->len + instr->addr > concat->mtd.size)
202 * Check for proper erase block alignment of the to-be-erased area.
203 * It is easier to do this based on the super device's erase
204 * region info rather than looking at each particular sub-device
207 if (!concat->mtd.numeraseregions)
208 { /* the easy case: device has uniform erase block size */
209 if(instr->addr & (concat->mtd.erasesize - 1))
211 if(instr->len & (concat->mtd.erasesize - 1))
215 { /* device has variable erase size */
216 struct mtd_erase_region_info *erase_regions = concat->mtd.eraseregions;
219 * Find the erase region where the to-be-erased area begins:
221 for(i = 0; i < concat->mtd.numeraseregions &&
222 instr->addr >= erase_regions[i].offset; i++)
227 * Now erase_regions[i] is the region in which the
228 * to-be-erased area begins. Verify that the starting
229 * offset is aligned to this region's erase size:
231 if (instr->addr & (erase_regions[i].erasesize-1))
235 * now find the erase region where the to-be-erased area ends:
237 for(; i < concat->mtd.numeraseregions &&
238 (instr->addr + instr->len) >= erase_regions[i].offset ; ++i)
242 * check if the ending offset is aligned to this region's erase size
244 if ((instr->addr + instr->len) & (erase_regions[i].erasesize-1))
248 /* make a local copy of instr to avoid modifying the caller's struct */
249 erase = kmalloc(sizeof(struct erase_info),GFP_KERNEL);
258 * find the subdevice where the to-be-erased area begins, adjust
259 * starting offset to be relative to the subdevice start
261 for(i = 0; i < concat->num_subdev; i++)
263 subdev = concat->subdev[i];
264 if(subdev->size <= erase->addr)
265 erase->addr -= subdev->size;
269 if(i >= concat->num_subdev) /* must never happen since size */
270 BUG(); /* limit has been verified above */
272 /* now do the erase: */
274 for(;length > 0; i++) /* loop for all subevices affected by this request */
276 subdev = concat->subdev[i]; /* get current subdevice */
278 /* limit length to subdevice's size: */
279 if(erase->addr + length > subdev->size)
280 erase->len = subdev->size - erase->addr;
284 if (!(subdev->flags & MTD_WRITEABLE))
289 length -= erase->len;
290 if ((err = concat_dev_erase(subdev, erase)))
292 if(err == -EINVAL) /* sanity check: must never happen since */
293 BUG(); /* block alignment has been checked above */
297 * erase->addr specifies the offset of the area to be
298 * erased *within the current subdevice*. It can be
299 * non-zero only the first time through this loop, i.e.
300 * for the first subdevice where blocks need to be erased.
301 * All the following erases must begin at the start of the
302 * current subdevice, i.e. at offset zero.
310 instr->state = MTD_ERASE_DONE;
312 instr->callback(instr);
316 static int concat_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
318 struct mtd_concat *concat = CONCAT(mtd);
319 int i, err = -EINVAL;
321 if ((len + ofs) > mtd->size)
324 for(i = 0; i < concat->num_subdev; i++)
326 struct mtd_info *subdev = concat->subdev[i];
329 if (ofs >= subdev->size)
336 if (ofs + len > subdev->size)
337 size = subdev->size - ofs;
341 err = subdev->lock(subdev, ofs, size);
357 static int concat_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
359 struct mtd_concat *concat = CONCAT(mtd);
362 if ((len + ofs) > mtd->size)
365 for(i = 0; i < concat->num_subdev; i++)
367 struct mtd_info *subdev = concat->subdev[i];
370 if (ofs >= subdev->size)
377 if (ofs + len > subdev->size)
378 size = subdev->size - ofs;
382 err = subdev->unlock(subdev, ofs, size);
398 static void concat_sync(struct mtd_info *mtd)
400 struct mtd_concat *concat = CONCAT(mtd);
403 for(i = 0; i < concat->num_subdev; i++)
405 struct mtd_info *subdev = concat->subdev[i];
406 subdev->sync(subdev);
410 static int concat_suspend(struct mtd_info *mtd)
412 struct mtd_concat *concat = CONCAT(mtd);
415 for(i = 0; i < concat->num_subdev; i++)
417 struct mtd_info *subdev = concat->subdev[i];
418 if((rc = subdev->suspend(subdev)) < 0)
424 static void concat_resume(struct mtd_info *mtd)
426 struct mtd_concat *concat = CONCAT(mtd);
429 for(i = 0; i < concat->num_subdev; i++)
431 struct mtd_info *subdev = concat->subdev[i];
432 subdev->resume(subdev);
437 * This function constructs a virtual MTD device by concatenating
438 * num_devs MTD devices. A pointer to the new device object is
439 * stored to *new_dev upon success. This function does _not_
440 * register any devices: this is the caller's responsibility.
442 struct mtd_info *mtd_concat_create(
443 struct mtd_info *subdev[], /* subdevices to concatenate */
444 int num_devs, /* number of subdevices */
445 char *name) /* name for the new device */
449 struct mtd_concat *concat;
450 u_int32_t max_erasesize, curr_erasesize;
451 int num_erase_region;
453 printk(KERN_NOTICE "Concatenating MTD devices:\n");
454 for(i = 0; i < num_devs; i++)
455 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
456 printk(KERN_NOTICE "into device \"%s\"\n", name);
458 /* allocate the device structure */
459 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
460 concat = kmalloc (size, GFP_KERNEL);
463 printk ("memory allocation error while creating concatenated device \"%s\"\n",
467 memset(concat, 0, size);
468 concat->subdev = (struct mtd_info **)(concat + 1);
471 * Set up the new "super" device's MTD object structure, check for
472 * incompatibilites between the subdevices.
474 concat->mtd.type = subdev[0]->type;
475 concat->mtd.flags = subdev[0]->flags;
476 concat->mtd.size = subdev[0]->size;
477 concat->mtd.erasesize = subdev[0]->erasesize;
478 concat->mtd.oobblock = subdev[0]->oobblock;
479 concat->mtd.oobsize = subdev[0]->oobsize;
480 concat->mtd.ecctype = subdev[0]->ecctype;
481 concat->mtd.eccsize = subdev[0]->eccsize;
483 concat->subdev[0] = subdev[0];
485 for(i = 1; i < num_devs; i++)
487 if(concat->mtd.type != subdev[i]->type)
490 printk ("Incompatible device type on \"%s\"\n", subdev[i]->name);
493 if(concat->mtd.flags != subdev[i]->flags)
495 * Expect all flags except MTD_WRITEABLE to be equal on
498 if((concat->mtd.flags ^ subdev[i]->flags) & ~MTD_WRITEABLE)
501 printk ("Incompatible device flags on \"%s\"\n", subdev[i]->name);
504 else /* if writeable attribute differs, make super device writeable */
505 concat->mtd.flags |= subdev[i]->flags & MTD_WRITEABLE;
507 concat->mtd.size += subdev[i]->size;
508 if(concat->mtd.oobblock != subdev[i]->oobblock ||
509 concat->mtd.oobsize != subdev[i]->oobsize ||
510 concat->mtd.ecctype != subdev[i]->ecctype ||
511 concat->mtd.eccsize != subdev[i]->eccsize)
514 printk ("Incompatible OOB or ECC data on \"%s\"\n", subdev[i]->name);
517 concat->subdev[i] = subdev[i];
521 concat->num_subdev = num_devs;
522 concat->mtd.name = name;
525 * NOTE: for now, we do not provide any readv()/writev() methods
526 * because they are messy to implement and they are not
527 * used to a great extent anyway.
529 concat->mtd.erase = concat_erase;
530 concat->mtd.read = concat_read;
531 concat->mtd.write = concat_write;
532 concat->mtd.sync = concat_sync;
533 concat->mtd.lock = concat_lock;
534 concat->mtd.unlock = concat_unlock;
535 concat->mtd.suspend = concat_suspend;
536 concat->mtd.resume = concat_resume;
540 * Combine the erase block size info of the subdevices:
542 * first, walk the map of the new device and see how
543 * many changes in erase size we have
545 max_erasesize = curr_erasesize = subdev[0]->erasesize;
546 num_erase_region = 1;
547 for(i = 0; i < num_devs; i++)
549 if(subdev[i]->numeraseregions == 0)
550 { /* current subdevice has uniform erase size */
551 if(subdev[i]->erasesize != curr_erasesize)
552 { /* if it differs from the last subdevice's erase size, count it */
554 curr_erasesize = subdev[i]->erasesize;
555 if(curr_erasesize > max_erasesize)
556 max_erasesize = curr_erasesize;
560 { /* current subdevice has variable erase size */
562 for(j = 0; j < subdev[i]->numeraseregions; j++)
563 { /* walk the list of erase regions, count any changes */
564 if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
567 curr_erasesize = subdev[i]->eraseregions[j].erasesize;
568 if(curr_erasesize > max_erasesize)
569 max_erasesize = curr_erasesize;
575 if(num_erase_region == 1)
577 * All subdevices have the same uniform erase size.
580 concat->mtd.erasesize = curr_erasesize;
581 concat->mtd.numeraseregions = 0;
585 * erase block size varies across the subdevices: allocate
586 * space to store the data describing the variable erase regions
588 struct mtd_erase_region_info *erase_region_p;
589 u_int32_t begin, position;
591 concat->mtd.erasesize = max_erasesize;
592 concat->mtd.numeraseregions = num_erase_region;
593 concat->mtd.eraseregions = erase_region_p = kmalloc (
594 num_erase_region * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
598 printk ("memory allocation error while creating erase region list"
599 " for device \"%s\"\n", name);
604 * walk the map of the new device once more and fill in
605 * in erase region info:
607 curr_erasesize = subdev[0]->erasesize;
608 begin = position = 0;
609 for(i = 0; i < num_devs; i++)
611 if(subdev[i]->numeraseregions == 0)
612 { /* current subdevice has uniform erase size */
613 if(subdev[i]->erasesize != curr_erasesize)
615 * fill in an mtd_erase_region_info structure for the area
616 * we have walked so far:
618 erase_region_p->offset = begin;
619 erase_region_p->erasesize = curr_erasesize;
620 erase_region_p->numblocks = (position - begin) / curr_erasesize;
623 curr_erasesize = subdev[i]->erasesize;
626 position += subdev[i]->size;
629 { /* current subdevice has variable erase size */
631 for(j = 0; j < subdev[i]->numeraseregions; j++)
632 { /* walk the list of erase regions, count any changes */
633 if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
635 erase_region_p->offset = begin;
636 erase_region_p->erasesize = curr_erasesize;
637 erase_region_p->numblocks = (position - begin) / curr_erasesize;
640 curr_erasesize = subdev[i]->eraseregions[j].erasesize;
643 position += subdev[i]->eraseregions[j].numblocks * curr_erasesize;
647 /* Now write the final entry */
648 erase_region_p->offset = begin;
649 erase_region_p->erasesize = curr_erasesize;
650 erase_region_p->numblocks = (position - begin) / curr_erasesize;
657 * This function destroys an MTD object obtained from concat_mtd_devs()
660 void mtd_concat_destroy(struct mtd_info *mtd)
662 struct mtd_concat *concat = CONCAT(mtd);
663 if(concat->mtd.numeraseregions)
664 kfree(concat->mtd.eraseregions);
669 EXPORT_SYMBOL(mtd_concat_create);
670 EXPORT_SYMBOL(mtd_concat_destroy);
673 MODULE_LICENSE("GPL");
674 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
675 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");