[PATCH] device-mapper: add dm_find_md
[powerpc.git] / drivers / md / md.c
index 3fb8039..8175a2a 100644 (file)
@@ -131,6 +131,8 @@ static ctl_table raid_root_table[] = {
 
 static struct block_device_operations md_fops;
 
+static int start_readonly;
+
 /*
  * Enables to iterate over all existing md arrays
  * all_mddevs_lock protects this list.
@@ -330,18 +332,46 @@ static void free_disk_sb(mdk_rdev_t * rdev)
 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
 {
        mdk_rdev_t *rdev = bio->bi_private;
+       mddev_t *mddev = rdev->mddev;
        if (bio->bi_size)
                return 1;
 
        if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
-               md_error(rdev->mddev, rdev);
+               md_error(mddev, rdev);
 
-       if (atomic_dec_and_test(&rdev->mddev->pending_writes))
-               wake_up(&rdev->mddev->sb_wait);
+       if (atomic_dec_and_test(&mddev->pending_writes))
+               wake_up(&mddev->sb_wait);
        bio_put(bio);
        return 0;
 }
 
+static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+{
+       struct bio *bio2 = bio->bi_private;
+       mdk_rdev_t *rdev = bio2->bi_private;
+       mddev_t *mddev = rdev->mddev;
+       if (bio->bi_size)
+               return 1;
+
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+           error == -EOPNOTSUPP) {
+               unsigned long flags;
+               /* barriers don't appear to be supported :-( */
+               set_bit(BarriersNotsupp, &rdev->flags);
+               mddev->barriers_work = 0;
+               spin_lock_irqsave(&mddev->write_lock, flags);
+               bio2->bi_next = mddev->biolist;
+               mddev->biolist = bio2;
+               spin_unlock_irqrestore(&mddev->write_lock, flags);
+               wake_up(&mddev->sb_wait);
+               bio_put(bio);
+               return 0;
+       }
+       bio_put(bio2);
+       bio->bi_private = rdev;
+       return super_written(bio, bytes_done, error);
+}
+
 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
                   sector_t sector, int size, struct page *page)
 {
@@ -350,16 +380,54 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
         * and decrement it on completion, waking up sb_wait
         * if zero is reached.
         * If an error occurred, call md_error
+        *
+        * As we might need to resubmit the request if BIO_RW_BARRIER
+        * causes ENOTSUPP, we allocate a spare bio...
         */
        struct bio *bio = bio_alloc(GFP_NOIO, 1);
+       int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
 
        bio->bi_bdev = rdev->bdev;
        bio->bi_sector = sector;
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
        bio->bi_end_io = super_written;
+       bio->bi_rw = rw;
+
        atomic_inc(&mddev->pending_writes);
-       submit_bio((1<<BIO_RW)|(1<<BIO_RW_SYNC), bio);
+       if (!test_bit(BarriersNotsupp, &rdev->flags)) {
+               struct bio *rbio;
+               rw |= (1<<BIO_RW_BARRIER);
+               rbio = bio_clone(bio, GFP_NOIO);
+               rbio->bi_private = bio;
+               rbio->bi_end_io = super_written_barrier;
+               submit_bio(rw, rbio);
+       } else
+               submit_bio(rw, bio);
+}
+
+void md_super_wait(mddev_t *mddev)
+{
+       /* wait for all superblock writes that were scheduled to complete.
+        * if any had to be retried (due to BARRIER problems), retry them
+        */
+       DEFINE_WAIT(wq);
+       for(;;) {
+               prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
+               if (atomic_read(&mddev->pending_writes)==0)
+                       break;
+               while (mddev->biolist) {
+                       struct bio *bio;
+                       spin_lock_irq(&mddev->write_lock);
+                       bio = mddev->biolist;
+                       mddev->biolist = bio->bi_next ;
+                       bio->bi_next = NULL;
+                       spin_unlock_irq(&mddev->write_lock);
+                       submit_bio(bio->bi_rw, bio);
+               }
+               schedule();
+       }
+       finish_wait(&mddev->sb_wait, &wq);
 }
 
 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
@@ -610,7 +678,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
        mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
 
        rdev->raid_disk = -1;
-       rdev->in_sync = 0;
+       rdev->flags = 0;
        if (mddev->raid_disks == 0) {
                mddev->major_version = 0;
                mddev->minor_version = sb->minor_version;
@@ -671,21 +739,19 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                return 0;
 
        if (mddev->level != LEVEL_MULTIPATH) {
-               rdev->faulty = 0;
-               rdev->flags = 0;
                desc = sb->disks + rdev->desc_nr;
 
                if (desc->state & (1<<MD_DISK_FAULTY))
-                       rdev->faulty = 1;
+                       set_bit(Faulty, &rdev->flags);
                else if (desc->state & (1<<MD_DISK_SYNC) &&
                         desc->raid_disk < mddev->raid_disks) {
-                       rdev->in_sync = 1;
+                       set_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = desc->raid_disk;
                }
                if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
                        set_bit(WriteMostly, &rdev->flags);
        } else /* MULTIPATH are always insync */
-               rdev->in_sync = 1;
+               set_bit(In_sync, &rdev->flags);
        return 0;
 }
 
@@ -698,7 +764,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        struct list_head *tmp;
        mdk_rdev_t *rdev2;
        int next_spare = mddev->raid_disks;
-       char nm[20];
+
 
        /* make rdev->sb match mddev data..
         *
@@ -712,7 +778,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
         */
        int i;
        int active=0, working=0,failed=0,spare=0,nr_disks=0;
-       unsigned int fixdesc=0;
 
        rdev->sb_size = MD_SB_BYTES;
 
@@ -761,33 +826,26 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        ITERATE_RDEV(mddev,rdev2,tmp) {
                mdp_disk_t *d;
                int desc_nr;
-               if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
+               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
+                   && !test_bit(Faulty, &rdev2->flags))
                        desc_nr = rdev2->raid_disk;
                else
                        desc_nr = next_spare++;
-               if (desc_nr != rdev2->desc_nr) {
-                       fixdesc |= (1 << desc_nr);
-                       rdev2->desc_nr = desc_nr;
-                       if (rdev2->raid_disk >= 0) {
-                               sprintf(nm, "rd%d", rdev2->raid_disk);
-                               sysfs_remove_link(&mddev->kobj, nm);
-                       }
-                       sysfs_remove_link(&rdev2->kobj, "block");
-                       kobject_del(&rdev2->kobj);
-               }
+               rdev2->desc_nr = desc_nr;
                d = &sb->disks[rdev2->desc_nr];
                nr_disks++;
                d->number = rdev2->desc_nr;
                d->major = MAJOR(rdev2->bdev->bd_dev);
                d->minor = MINOR(rdev2->bdev->bd_dev);
-               if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
+               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
+                   && !test_bit(Faulty, &rdev2->flags))
                        d->raid_disk = rdev2->raid_disk;
                else
                        d->raid_disk = rdev2->desc_nr; /* compatibility */
-               if (rdev2->faulty) {
+               if (test_bit(Faulty, &rdev2->flags)) {
                        d->state = (1<<MD_DISK_FAULTY);
                        failed++;
-               } else if (rdev2->in_sync) {
+               } else if (test_bit(In_sync, &rdev2->flags)) {
                        d->state = (1<<MD_DISK_ACTIVE);
                        d->state |= (1<<MD_DISK_SYNC);
                        active++;
@@ -800,25 +858,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                if (test_bit(WriteMostly, &rdev2->flags))
                        d->state |= (1<<MD_DISK_WRITEMOSTLY);
        }
-       if (fixdesc)
-               ITERATE_RDEV(mddev,rdev2,tmp)
-                       if (fixdesc & (1<<rdev2->desc_nr)) {
-                               snprintf(rdev2->kobj.name, KOBJ_NAME_LEN, "dev%d",
-                                        rdev2->desc_nr);
-                               /* kobject_add gets a ref on the parent, so
-                                * we have to drop the one we already have
-                                */
-                               kobject_add(&rdev2->kobj);
-                               kobject_put(rdev->kobj.parent);
-                               sysfs_create_link(&rdev2->kobj,
-                                                 &rdev2->bdev->bd_disk->kobj,
-                                                 "block");
-                               if (rdev2->raid_disk >= 0) {
-                                       sprintf(nm, "rd%d", rdev2->raid_disk);
-                                       sysfs_create_link(&mddev->kobj,
-                                                         &rdev2->kobj, nm);
-                               }
-                       }
        /* now set the "removed" and "faulty" bits on any missing devices */
        for (i=0 ; i < mddev->raid_disks ; i++) {
                mdp_disk_t *d = &sb->disks[i];
@@ -975,7 +1014,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
        struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
 
        rdev->raid_disk = -1;
-       rdev->in_sync = 0;
+       rdev->flags = 0;
        if (mddev->raid_disks == 0) {
                mddev->major_version = 1;
                mddev->patch_version = 0;
@@ -989,7 +1028,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->size = le64_to_cpu(sb->size)/2;
                mddev->events = le64_to_cpu(sb->events);
                mddev->bitmap_offset = 0;
-               mddev->default_bitmap_offset = 0;
                mddev->default_bitmap_offset = 1024;
                
                mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
@@ -1027,22 +1065,19 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
                switch(role) {
                case 0xffff: /* spare */
-                       rdev->faulty = 0;
                        break;
                case 0xfffe: /* faulty */
-                       rdev->faulty = 1;
+                       set_bit(Faulty, &rdev->flags);
                        break;
                default:
-                       rdev->in_sync = 1;
-                       rdev->faulty = 0;
+                       set_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = role;
                        break;
                }
-               rdev->flags = 0;
                if (sb->devflags & WriteMostly1)
                        set_bit(WriteMostly, &rdev->flags);
        } else /* MULTIPATH are always insync */
-               rdev->in_sync = 1;
+               set_bit(In_sync, &rdev->flags);
 
        return 0;
 }
@@ -1086,9 +1121,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        
        ITERATE_RDEV(mddev,rdev2,tmp) {
                i = rdev2->desc_nr;
-               if (rdev2->faulty)
+               if (test_bit(Faulty, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
-               else if (rdev2->in_sync)
+               else if (test_bit(In_sync, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
                else
                        sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -1146,6 +1181,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
 {
        mdk_rdev_t *same_pdev;
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+       struct kobject *ko;
 
        if (rdev->mddev) {
                MD_BUG();
@@ -1174,17 +1210,22 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
                if (find_rdev_nr(mddev, rdev->desc_nr))
                        return -EBUSY;
        }
+       bdevname(rdev->bdev,b);
+       if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
+               return -ENOMEM;
                        
        list_add(&rdev->same_set, &mddev->disks);
        rdev->mddev = mddev;
-       printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
+       printk(KERN_INFO "md: bind<%s>\n", b);
 
-       rdev->kobj.k_name = NULL;
-       snprintf(rdev->kobj.name, KOBJ_NAME_LEN, "dev%d", rdev->desc_nr);
        rdev->kobj.parent = &mddev->kobj;
        kobject_add(&rdev->kobj);
 
-       sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block");
+       if (rdev->bdev->bd_part)
+               ko = &rdev->bdev->bd_part->kobj;
+       else
+               ko = &rdev->bdev->bd_disk->kobj;
+       sysfs_create_link(&rdev->kobj, ko, "block");
        return 0;
 }
 
@@ -1327,7 +1368,8 @@ static void print_rdev(mdk_rdev_t *rdev)
        char b[BDEVNAME_SIZE];
        printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
                bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
-               rdev->faulty, rdev->in_sync, rdev->desc_nr);
+               test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
+               rdev->desc_nr);
        if (rdev->sb_loaded) {
                printk(KERN_INFO "md: rdev superblock:\n");
                print_sb((mdp_super_t*)page_address(rdev->sb_page));
@@ -1384,7 +1426,7 @@ static void md_update_sb(mddev_t * mddev)
        int sync_req;
 
 repeat:
-       spin_lock(&mddev->write_lock);
+       spin_lock_irq(&mddev->write_lock);
        sync_req = mddev->in_sync;
        mddev->utime = get_seconds();
        mddev->events ++;
@@ -1407,11 +1449,11 @@ repeat:
         */
        if (!mddev->persistent) {
                mddev->sb_dirty = 0;
-               spin_unlock(&mddev->write_lock);
+               spin_unlock_irq(&mddev->write_lock);
                wake_up(&mddev->sb_wait);
                return;
        }
-       spin_unlock(&mddev->write_lock);
+       spin_unlock_irq(&mddev->write_lock);
 
        dprintk(KERN_INFO 
                "md: updating %s RAID superblock on device (in sync %d)\n",
@@ -1421,11 +1463,11 @@ repeat:
        ITERATE_RDEV(mddev,rdev,tmp) {
                char b[BDEVNAME_SIZE];
                dprintk(KERN_INFO "md: ");
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        dprintk("(skipping faulty ");
 
                dprintk("%s ", bdevname(rdev->bdev,b));
-               if (!rdev->faulty) {
+               if (!test_bit(Faulty, &rdev->flags)) {
                        md_super_write(mddev,rdev,
                                       rdev->sb_offset<<1, rdev->sb_size,
                                       rdev->sb_page);
@@ -1439,17 +1481,17 @@ repeat:
                        /* only need to write one superblock... */
                        break;
        }
-       wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+       md_super_wait(mddev);
        /* if there was a failure, sb_dirty was set to 1, and we re-write super */
 
-       spin_lock(&mddev->write_lock);
+       spin_lock_irq(&mddev->write_lock);
        if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
                /* have to write it out again */
-               spin_unlock(&mddev->write_lock);
+               spin_unlock_irq(&mddev->write_lock);
                goto repeat;
        }
        mddev->sb_dirty = 0;
-       spin_unlock(&mddev->write_lock);
+       spin_unlock_irq(&mddev->write_lock);
        wake_up(&mddev->sb_wait);
 
 }
@@ -1461,33 +1503,32 @@ struct rdev_sysfs_entry {
 };
 
 static ssize_t
-rdev_show_state(mdk_rdev_t *rdev, char *page)
+state_show(mdk_rdev_t *rdev, char *page)
 {
        char *sep = "";
        int len=0;
 
-       if (rdev->faulty) {
+       if (test_bit(Faulty, &rdev->flags)) {
                len+= sprintf(page+len, "%sfaulty",sep);
                sep = ",";
        }
-       if (rdev->in_sync) {
+       if (test_bit(In_sync, &rdev->flags)) {
                len += sprintf(page+len, "%sin_sync",sep);
                sep = ",";
        }
-       if (!rdev->faulty && !rdev->in_sync) {
+       if (!test_bit(Faulty, &rdev->flags) &&
+           !test_bit(In_sync, &rdev->flags)) {
                len += sprintf(page+len, "%sspare", sep);
                sep = ",";
        }
        return len+sprintf(page+len, "\n");
 }
 
-static struct rdev_sysfs_entry rdev_state = {
-       .attr = {.name = "state", .mode = S_IRUGO },
-       .show = rdev_show_state,
-};
+static struct rdev_sysfs_entry
+rdev_state = __ATTR_RO(state);
 
 static ssize_t
-rdev_show_super(mdk_rdev_t *rdev, char *page)
+super_show(mdk_rdev_t *rdev, char *page)
 {
        if (rdev->sb_loaded && rdev->sb_size) {
                memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
@@ -1495,10 +1536,8 @@ rdev_show_super(mdk_rdev_t *rdev, char *page)
        } else
                return 0;
 }
-static struct rdev_sysfs_entry rdev_super = {
-       .attr = {.name = "super", .mode = S_IRUGO },
-       .show = rdev_show_super,
-};
+static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
+
 static struct attribute *rdev_default_attrs[] = {
        &rdev_state.attr,
        &rdev_super.attr,
@@ -1578,8 +1617,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
        kobject_init(&rdev->kobj);
 
        rdev->desc_nr = -1;
-       rdev->faulty = 0;
-       rdev->in_sync = 0;
+       rdev->flags = 0;
        rdev->data_offset = 0;
        atomic_set(&rdev->nr_pending, 0);
        atomic_set(&rdev->read_errors, 0);
@@ -1670,7 +1708,7 @@ static void analyze_sbs(mddev_t * mddev)
                if (mddev->level == LEVEL_MULTIPATH) {
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
-                       rdev->in_sync = 1;
+                       set_bit(In_sync, &rdev->flags);
                }
        }
 
@@ -1685,37 +1723,33 @@ static void analyze_sbs(mddev_t * mddev)
 }
 
 static ssize_t
-md_show_level(mddev_t *mddev, char *page)
+level_show(mddev_t *mddev, char *page)
 {
        mdk_personality_t *p = mddev->pers;
-       if (p == NULL)
+       if (p == NULL && mddev->raid_disks == 0)
                return 0;
        if (mddev->level >= 0)
-               return sprintf(page, "RAID-%d\n", mddev->level);
+               return sprintf(page, "raid%d\n", mddev->level);
        else
                return sprintf(page, "%s\n", p->name);
 }
 
-static struct md_sysfs_entry md_level = {
-       .attr = {.name = "level", .mode = S_IRUGO },
-       .show = md_show_level,
-};
+static struct md_sysfs_entry md_level = __ATTR_RO(level);
 
 static ssize_t
-md_show_rdisks(mddev_t *mddev, char *page)
+raid_disks_show(mddev_t *mddev, char *page)
 {
+       if (mddev->raid_disks == 0)
+               return 0;
        return sprintf(page, "%d\n", mddev->raid_disks);
 }
 
-static struct md_sysfs_entry md_raid_disks = {
-       .attr = {.name = "raid_disks", .mode = S_IRUGO },
-       .show = md_show_rdisks,
-};
+static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks);
 
 static ssize_t
-md_show_scan(mddev_t *mddev, char *page)
+action_show(mddev_t *mddev, char *page)
 {
-       char *type = "none";
+       char *type = "idle";
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
                if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -1732,66 +1766,84 @@ md_show_scan(mddev_t *mddev, char *page)
 }
 
 static ssize_t
-md_store_scan(mddev_t *mddev, const char *page, size_t len)
+action_store(mddev_t *mddev, const char *page, size_t len)
 {
-       int canscan=0;
+       if (!mddev->pers || !mddev->pers->sync_request)
+               return -EINVAL;
+
+       if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) {
+               if (mddev->sync_thread) {
+                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+                       md_unregister_thread(mddev->sync_thread);
+                       mddev->sync_thread = NULL;
+                       mddev->recovery = 0;
+               }
+               return len;
+       }
 
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
-       down(&mddev->reconfig_sem);
-       if (mddev->pers && mddev->pers->sync_request)
-               canscan=1;
-       up(&mddev->reconfig_sem);
-       if (!canscan)
-               return -EINVAL;
-
-       if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
-               set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
-       else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
-               return -EINVAL;
-       set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
-       set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
-       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 ||
+           strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       else {
+               if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
+                       set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+               else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
+                       return -EINVAL;
+               set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+               set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       }
        md_wakeup_thread(mddev->thread);
        return len;
 }
 
 static ssize_t
-md_show_mismatch(mddev_t *mddev, char *page)
+mismatch_cnt_show(mddev_t *mddev, char *page)
 {
        return sprintf(page, "%llu\n",
                       (unsigned long long) mddev->resync_mismatches);
 }
 
-static struct md_sysfs_entry md_scan_mode = {
-       .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR },
-       .show = md_show_scan,
-       .store = md_store_scan,
-};
+static struct md_sysfs_entry
+md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
 
-static struct md_sysfs_entry md_mismatches = {
-       .attr = {.name = "mismatch_cnt", .mode = S_IRUGO },
-       .show = md_show_mismatch,
-};
+
+static struct md_sysfs_entry
+md_mismatches = __ATTR_RO(mismatch_cnt);
 
 static struct attribute *md_default_attrs[] = {
        &md_level.attr,
        &md_raid_disks.attr,
+       NULL,
+};
+
+static struct attribute *md_redundancy_attrs[] = {
        &md_scan_mode.attr,
        &md_mismatches.attr,
        NULL,
 };
+static struct attribute_group md_redundancy_group = {
+       .name = NULL,
+       .attrs = md_redundancy_attrs,
+};
+
 
 static ssize_t
 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
        mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+       ssize_t rv;
 
        if (!entry->show)
                return -EIO;
-       return entry->show(mddev, page);
+       mddev_lock(mddev);
+       rv = entry->show(mddev, page);
+       mddev_unlock(mddev);
+       return rv;
 }
 
 static ssize_t
@@ -1800,10 +1852,14 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
 {
        struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
        mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+       ssize_t rv;
 
        if (!entry->store)
                return -EIO;
-       return entry->store(mddev, page, length);
+       mddev_lock(mddev);
+       rv = entry->store(mddev, page, length);
+       mddev_unlock(mddev);
+       return rv;
 }
 
 static void md_free(struct kobject *ko)
@@ -1939,7 +1995,7 @@ static int do_md_run(mddev_t * mddev)
 
                /* devices must have minimum size of one chunk */
                ITERATE_RDEV(mddev,rdev,tmp) {
-                       if (rdev->faulty)
+                       if (test_bit(Faulty, &rdev->flags))
                                continue;
                        if (rdev->size < chunk_size / 1024) {
                                printk(KERN_WARNING
@@ -1967,7 +2023,7 @@ static int do_md_run(mddev_t * mddev)
         * Also find largest hardsector size
         */
        ITERATE_RDEV(mddev,rdev,tmp) {
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        continue;
                sync_blockdev(rdev->bdev);
                invalidate_bdev(rdev->bdev, 0);
@@ -1991,6 +2047,10 @@ static int do_md_run(mddev_t * mddev)
 
        mddev->recovery = 0;
        mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
+       mddev->barriers_work = 1;
+
+       if (start_readonly)
+               mddev->ro = 2; /* read-only, but switch on first write */
 
        /* before we start the array running, initialise the bitmap */
        err = bitmap_create(mddev);
@@ -2006,6 +2066,11 @@ static int do_md_run(mddev_t * mddev)
                bitmap_destroy(mddev);
                return err;
        }
+       if (mddev->pers->sync_request)
+               sysfs_create_group(&mddev->kobj, &md_redundancy_group);
+       else if (mddev->ro == 2) /* auto-readonly not meaningful */
+               mddev->ro = 0;
+
        atomic_set(&mddev->writes_pending,0);
        mddev->safemode = 0;
        mddev->safemode_timer.function = md_safemode_timeout;
@@ -2104,16 +2169,19 @@ static int do_md_stop(mddev_t * mddev, int ro)
 
                if (ro) {
                        err  = -ENXIO;
-                       if (mddev->ro)
+                       if (mddev->ro==1)
                                goto out;
                        mddev->ro = 1;
                } else {
                        bitmap_flush(mddev);
-                       wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+                       md_super_wait(mddev);
                        if (mddev->ro)
                                set_disk_ro(disk, 0);
                        blk_queue_make_request(mddev->queue, md_fail_request);
                        mddev->pers->stop(mddev);
+                       if (mddev->pers->sync_request)
+                               sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+
                        module_put(mddev->pers->owner);
                        mddev->pers = NULL;
                        if (mddev->ro)
@@ -2304,7 +2372,7 @@ static int autostart_array(dev_t startdev)
                return err;
        }
 
-       if (start_rdev->faulty) {
+       if (test_bit(Faulty, &start_rdev->flags)) {
                printk(KERN_WARNING 
                        "md: can not autostart based on faulty %s!\n",
                        bdevname(start_rdev->bdev,b));
@@ -2363,11 +2431,11 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        nr=working=active=failed=spare=0;
        ITERATE_RDEV(mddev,rdev,tmp) {
                nr++;
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        failed++;
                else {
                        working++;
-                       if (rdev->in_sync)
+                       if (test_bit(In_sync, &rdev->flags))
                                active++;       
                        else
                                spare++;
@@ -2458,9 +2526,9 @@ static int get_disk_info(mddev_t * mddev, void __user * arg)
                info.minor = MINOR(rdev->bdev->bd_dev);
                info.raid_disk = rdev->raid_disk;
                info.state = 0;
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        info.state |= (1<<MD_DISK_FAULTY);
-               else if (rdev->in_sync) {
+               else if (test_bit(In_sync, &rdev->flags)) {
                        info.state |= (1<<MD_DISK_ACTIVE);
                        info.state |= (1<<MD_DISK_SYNC);
                }
@@ -2553,7 +2621,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                                validate_super(mddev, rdev);
                rdev->saved_raid_disk = rdev->raid_disk;
 
-               rdev->in_sync = 0; /* just to be sure */
+               clear_bit(In_sync, &rdev->flags); /* just to be sure */
                if (info->state & (1<<MD_DISK_WRITEMOSTLY))
                        set_bit(WriteMostly, &rdev->flags);
 
@@ -2591,11 +2659,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                else
                        rdev->raid_disk = -1;
 
-               rdev->faulty = 0;
+               rdev->flags = 0;
+
                if (rdev->raid_disk < mddev->raid_disks)
-                       rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
-               else
-                       rdev->in_sync = 0;
+                       if (info->state & (1<<MD_DISK_SYNC))
+                               set_bit(In_sync, &rdev->flags);
 
                if (info->state & (1<<MD_DISK_WRITEMOSTLY))
                        set_bit(WriteMostly, &rdev->flags);
@@ -2694,14 +2762,14 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
                goto abort_export;
        }
 
-       if (rdev->faulty) {
+       if (test_bit(Faulty, &rdev->flags)) {
                printk(KERN_WARNING 
                        "md: can not hot-add faulty %s disk to %s!\n",
                        bdevname(rdev->bdev,b), mdname(mddev));
                err = -EINVAL;
                goto abort_export;
        }
-       rdev->in_sync = 0;
+       clear_bit(In_sync, &rdev->flags);
        rdev->desc_nr = -1;
        bind_rdev_to_array(rdev, mddev);
 
@@ -2863,6 +2931,9 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
 
        mddev->sb_dirty      = 1;
 
+       mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+       mddev->bitmap_offset = 0;
+
        /*
         * Generate a 128 bit UUID
         */
@@ -3087,7 +3158,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
                if (cnt > 0 ) {
                        printk(KERN_WARNING
                               "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
-                              "This will not be supported beyond 2.6\n",
+                              "This will not be supported beyond July 2006\n",
                               current->comm, current->pid);
                        cnt--;
                }
@@ -3221,12 +3292,22 @@ static int md_ioctl(struct inode *inode, struct file *file,
 
        /*
         * The remaining ioctls are changing the state of the
-        * superblock, so we do not allow read-only arrays
-        * here:
+        * superblock, so we do not allow them on read-only arrays.
+        * However non-MD ioctls (e.g. get-size) will still come through
+        * here and hit the 'default' below, so only disallow
+        * 'md' ioctls, and switch to rw mode if started auto-readonly.
         */
-       if (mddev->ro) {
-               err = -EROFS;
-               goto abort_unlock;
+       if (_IOC_TYPE(cmd) == MD_MAJOR &&
+           mddev->ro && mddev->pers) {
+               if (mddev->ro == 2) {
+                       mddev->ro = 0;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               md_wakeup_thread(mddev->thread);
+
+               } else {
+                       err = -EROFS;
+                       goto abort_unlock;
+               }
        }
 
        switch (cmd)
@@ -3356,21 +3437,26 @@ static int md_thread(void * arg)
         */
 
        allow_signal(SIGKILL);
-       complete(thread->event);
        while (!kthread_should_stop()) {
-               void (*run)(mddev_t *);
 
-               wait_event_interruptible_timeout(thread->wqueue,
-                                                test_bit(THREAD_WAKEUP, &thread->flags)
-                                                || kthread_should_stop(),
-                                                thread->timeout);
+               /* We need to wait INTERRUPTIBLE so that
+                * we don't add to the load-average.
+                * That means we need to be sure no signals are
+                * pending
+                */
+               if (signal_pending(current))
+                       flush_signals(current);
+
+               wait_event_interruptible_timeout
+                       (thread->wqueue,
+                        test_bit(THREAD_WAKEUP, &thread->flags)
+                        || kthread_should_stop(),
+                        thread->timeout);
                try_to_freeze();
 
                clear_bit(THREAD_WAKEUP, &thread->flags);
 
-               run = thread->run;
-               if (run)
-                       run(thread->mddev);
+               thread->run(thread->mddev);
        }
 
        return 0;
@@ -3389,7 +3475,6 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
                                 const char *name)
 {
        mdk_thread_t *thread;
-       struct completion event;
 
        thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
        if (!thread)
@@ -3398,18 +3483,14 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
        memset(thread, 0, sizeof(mdk_thread_t));
        init_waitqueue_head(&thread->wqueue);
 
-       init_completion(&event);
-       thread->event = &event;
        thread->run = run;
        thread->mddev = mddev;
-       thread->name = name;
        thread->timeout = MAX_SCHEDULE_TIMEOUT;
        thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
        }
-       wait_for_completion(&event);
        return thread;
 }
 
@@ -3428,7 +3509,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
                return;
        }
 
-       if (!rdev || rdev->faulty)
+       if (!rdev || test_bit(Faulty, &rdev->flags))
                return;
 /*
        dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
@@ -3614,8 +3695,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "%s : %sactive", mdname(mddev),
                                                mddev->pers ? "" : "in");
                if (mddev->pers) {
-                       if (mddev->ro)
+                       if (mddev->ro==1)
                                seq_printf(seq, " (read-only)");
+                       if (mddev->ro==2)
+                               seq_printf(seq, "(auto-read-only)");
                        seq_printf(seq, " %s", mddev->pers->name);
                }
 
@@ -3626,7 +3709,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
                                bdevname(rdev->bdev,b), rdev->desc_nr);
                        if (test_bit(WriteMostly, &rdev->flags))
                                seq_printf(seq, "(W)");
-                       if (rdev->faulty) {
+                       if (test_bit(Faulty, &rdev->flags)) {
                                seq_printf(seq, "(F)");
                                continue;
                        } else if (rdev->raid_disk < 0)
@@ -3655,11 +3738,15 @@ static int md_seq_show(struct seq_file *seq, void *v)
                if (mddev->pers) {
                        mddev->pers->status (seq, mddev);
                        seq_printf(seq, "\n      ");
-                       if (mddev->curr_resync > 2) {
-                               status_resync (seq, mddev);
-                               seq_printf(seq, "\n      ");
-                       } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
-                               seq_printf(seq, "       resync=DELAYED\n      ");
+                       if (mddev->pers->sync_request) {
+                               if (mddev->curr_resync > 2) {
+                                       status_resync (seq, mddev);
+                                       seq_printf(seq, "\n      ");
+                               } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
+                                       seq_printf(seq, "\tresync=DELAYED\n      ");
+                               else if (mddev->recovery_cp < MaxSector)
+                                       seq_printf(seq, "\tresync=PENDING\n      ");
+                       }
                } else
                        seq_printf(seq, "\n       ");
 
@@ -3761,11 +3848,20 @@ static int is_mddev_idle(mddev_t *mddev)
                curr_events = disk_stat_read(disk, sectors[0]) + 
                                disk_stat_read(disk, sectors[1]) - 
                                atomic_read(&disk->sync_io);
-               /* Allow some slack between valud of curr_events and last_events,
-                * as there are some uninteresting races.
+               /* The difference between curr_events and last_events
+                * will be affected by any new non-sync IO (making
+                * curr_events bigger) and any difference in the amount of
+                * in-flight syncio (making current_events bigger or smaller)
+                * The amount in-flight is currently limited to
+                * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
+                * which is at most 4096 sectors.
+                * These numbers are fairly fragile and should be made
+                * more robust, probably by enforcing the
+                * 'window size' that md_do_sync sort-of uses.
+                *
                 * Note: the following is an unsigned comparison.
                 */
-               if ((curr_events - rdev->last_events + 32) > 64) {
+               if ((curr_events - rdev->last_events + 4096) > 8192) {
                        rdev->last_events = curr_events;
                        idle = 0;
                }
@@ -3796,15 +3892,22 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
        if (bio_data_dir(bi) != WRITE)
                return;
 
+       BUG_ON(mddev->ro == 1);
+       if (mddev->ro == 2) {
+               /* need to switch to read/write */
+               mddev->ro = 0;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               md_wakeup_thread(mddev->thread);
+       }
        atomic_inc(&mddev->writes_pending);
        if (mddev->in_sync) {
-               spin_lock(&mddev->write_lock);
+               spin_lock_irq(&mddev->write_lock);
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
                        mddev->sb_dirty = 1;
                        md_wakeup_thread(mddev->thread);
                }
-               spin_unlock(&mddev->write_lock);
+               spin_unlock_irq(&mddev->write_lock);
        }
        wait_event(mddev->sb_wait, mddev->sb_dirty==0);
 }
@@ -3860,9 +3963,7 @@ static void md_do_sync(mddev_t *mddev)
                mddev->curr_resync = 2;
 
        try_again:
-               if (signal_pending(current) ||
-                   kthread_should_stop()) {
-                       flush_signals(current);
+               if (kthread_should_stop()) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto skip;
                }
@@ -3882,9 +3983,8 @@ static void md_do_sync(mddev_t *mddev)
                                         * time 'round when curr_resync == 2
                                         */
                                        continue;
-                               prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
-                               if (!signal_pending(current) &&
-                                   !kthread_should_stop() &&
+                               prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+                               if (!kthread_should_stop() &&
                                    mddev2->curr_resync >= mddev->curr_resync) {
                                        printk(KERN_INFO "md: delaying resync of %s"
                                               " until %s has finished resync (they"
@@ -3993,13 +4093,12 @@ static void md_do_sync(mddev_t *mddev)
                }
 
 
-               if (signal_pending(current) || kthread_should_stop()) {
+               if (kthread_should_stop()) {
                        /*
                         * got a signal, exit.
                         */
                        printk(KERN_INFO 
                                "md: md_do_sync() got signal ... exiting\n");
-                       flush_signals(current);
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto out;
                }
@@ -4021,7 +4120,7 @@ static void md_do_sync(mddev_t *mddev)
                if (currspeed > sysctl_speed_limit_min) {
                        if ((currspeed > sysctl_speed_limit_max) ||
                                        !is_mddev_idle(mddev)) {
-                               msleep_interruptible(250);
+                               msleep(500);
                                goto repeat;
                        }
                }
@@ -4114,7 +4213,7 @@ void md_check_recovery(mddev_t *mddev)
        if (mddev_trylock(mddev)==0) {
                int spares =0;
 
-               spin_lock(&mddev->write_lock);
+               spin_lock_irq(&mddev->write_lock);
                if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
                    !mddev->in_sync && mddev->recovery_cp == MaxSector) {
                        mddev->in_sync = 1;
@@ -4122,7 +4221,7 @@ void md_check_recovery(mddev_t *mddev)
                }
                if (mddev->safemode == 1)
                        mddev->safemode = 0;
-               spin_unlock(&mddev->write_lock);
+               spin_unlock_irq(&mddev->write_lock);
 
                if (mddev->sb_dirty)
                        md_update_sb(mddev);
@@ -4174,7 +4273,7 @@ void md_check_recovery(mddev_t *mddev)
                 */
                ITERATE_RDEV(mddev,rdev,rtmp)
                        if (rdev->raid_disk >= 0 &&
-                           (rdev->faulty || ! rdev->in_sync) &&
+                           (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
                            atomic_read(&rdev->nr_pending)==0) {
                                if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
                                        char nm[20];
@@ -4187,7 +4286,7 @@ void md_check_recovery(mddev_t *mddev)
                if (mddev->degraded) {
                        ITERATE_RDEV(mddev,rdev,rtmp)
                                if (rdev->raid_disk < 0
-                                   && !rdev->faulty) {
+                                   && !test_bit(Faulty, &rdev->flags)) {
                                        if (mddev->pers->hot_add_disk(mddev,rdev)) {
                                                char nm[20];
                                                sprintf(nm, "rd%d", rdev->raid_disk);
@@ -4283,7 +4382,7 @@ static int __init md_init(void)
                        " MD_SB_DISKS=%d\n",
                        MD_MAJOR_VERSION, MD_MINOR_VERSION,
                        MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
-       printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR,
+       printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
                        BITMAP_MINOR);
 
        if (register_blkdev(MAJOR_NR, "md"))
@@ -4347,7 +4446,7 @@ static void autostart_arrays(int part)
                if (IS_ERR(rdev))
                        continue;
 
-               if (rdev->faulty) {
+               if (test_bit(Faulty, &rdev->flags)) {
                        MD_BUG();
                        continue;
                }
@@ -4394,6 +4493,23 @@ static __exit void md_exit(void)
 module_init(md_init)
 module_exit(md_exit)
 
+static int get_ro(char *buffer, struct kernel_param *kp)
+{
+       return sprintf(buffer, "%d", start_readonly);
+}
+static int set_ro(const char *val, struct kernel_param *kp)
+{
+       char *e;
+       int num = simple_strtoul(val, &e, 10);
+       if (*val && (*e == '\0' || *e == '\n')) {
+               start_readonly = num;
+               return 0;;
+       }
+       return -EINVAL;
+}
+
+module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
+
 EXPORT_SYMBOL(register_md_personality);
 EXPORT_SYMBOL(unregister_md_personality);
 EXPORT_SYMBOL(md_error);