[PATCH] Unlinline a bunch of other functions
[powerpc.git] / drivers / md / raid5.c
index 121fbaa..25976bf 100644 (file)
 #define STRIPE_SHIFT           (PAGE_SHIFT - 9)
 #define STRIPE_SECTORS         (STRIPE_SIZE>>9)
 #define        IO_THRESHOLD            1
-#define HASH_PAGES             1
-#define HASH_PAGES_ORDER       0
-#define NR_HASH                        (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
+#define NR_HASH                        (PAGE_SIZE / sizeof(struct hlist_head))
 #define HASH_MASK              (NR_HASH - 1)
 
-#define stripe_hash(conf, sect)        ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
+#define stripe_hash(conf, sect)        (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
 
 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
  * order without overlap.  There may be several bio's per stripe+device, and
@@ -71,7 +69,7 @@
 
 static void print_raid5_conf (raid5_conf_t *conf);
 
-static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
+static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 {
        if (atomic_dec_and_test(&sh->count)) {
                if (!list_empty(&sh->lru))
@@ -98,7 +96,7 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                        list_add_tail(&sh->lru, &conf->inactive_list);
                        atomic_dec(&conf->active_stripes);
                        if (!conf->inactive_blocked ||
-                           atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
+                           atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
                                wake_up(&conf->wait_for_stripe);
                }
        }
@@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh)
        spin_unlock_irqrestore(&conf->device_lock, flags);
 }
 
-static void remove_hash(struct stripe_head *sh)
+static inline void remove_hash(struct stripe_head *sh)
 {
        PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
 
-       if (sh->hash_pprev) {
-               if (sh->hash_next)
-                       sh->hash_next->hash_pprev = sh->hash_pprev;
-               *sh->hash_pprev = sh->hash_next;
-               sh->hash_pprev = NULL;
-       }
+       hlist_del_init(&sh->hash);
 }
 
-static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
+static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
 {
-       struct stripe_head **shp = &stripe_hash(conf, sh->sector);
+       struct hlist_head *hp = stripe_hash(conf, sh->sector);
 
        PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
 
        CHECK_DEVLOCK();
-       if ((sh->hash_next = *shp) != NULL)
-               (*shp)->hash_pprev = &sh->hash_next;
-       *shp = sh;
-       sh->hash_pprev = shp;
+       hlist_add_head(&sh->hash, hp);
 }
 
 
@@ -167,7 +157,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
                if (!p)
                        continue;
                sh->dev[i].page = NULL;
-               page_cache_release(p);
+               put_page(p);
        }
 }
 
@@ -188,7 +178,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
 
 static void raid5_build_block (struct stripe_head *sh, int i);
 
-static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
+static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int disks = conf->raid_disks, i;
@@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
 {
        struct stripe_head *sh;
+       struct hlist_node *hn;
 
        CHECK_DEVLOCK();
        PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
-       for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
+       hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
                if (sh->sector == sector)
                        return sh;
        PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -264,7 +255,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
                                conf->inactive_blocked = 1;
                                wait_event_lock_irq(conf->wait_for_stripe,
                                                    !list_empty(&conf->inactive_list) &&
-                                                   (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
+                                                   (atomic_read(&conf->active_stripes)
+                                                    < (conf->max_nr_stripes *3/4)
                                                     || !conf->inactive_blocked),
                                                    conf->device_lock,
                                                    unplug_slaves(conf->mddev);
@@ -416,25 +408,34 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
                set_bit(R5_UPTODATE, &sh->dev[i].flags);
 #endif
                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
-                       printk("R5: read error corrected!!\n");
+                       printk(KERN_INFO "raid5: read error corrected!!\n");
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
                }
+               if (atomic_read(&conf->disks[i].rdev->read_errors))
+                       atomic_set(&conf->disks[i].rdev->read_errors, 0);
        } else {
+               int retry = 0;
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
-               if (conf->mddev->degraded) {
-                       printk("R5: read error not correctable.\n");
-                       clear_bit(R5_ReadError, &sh->dev[i].flags);
-                       clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, conf->disks[i].rdev);
-               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
+               atomic_inc(&conf->disks[i].rdev->read_errors);
+               if (conf->mddev->degraded)
+                       printk(KERN_WARNING "raid5: read error not correctable.\n");
+               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
                        /* Oh, no!!! */
-                       printk("R5: read error NOT corrected!!\n");
+                       printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
+               else if (atomic_read(&conf->disks[i].rdev->read_errors)
+                        > conf->max_nr_stripes)
+                       printk(KERN_WARNING
+                              "raid5: Too many read errors, failing device.\n");
+               else
+                       retry = 1;
+               if (retry)
+                       set_bit(R5_ReadError, &sh->dev[i].flags);
+               else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
                        md_error(conf->mddev, conf->disks[i].rdev);
-               } else
-                       set_bit(R5_ReadError, &sh->dev[i].flags);
+               }
        }
        rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
 #if 0
@@ -517,19 +518,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
        raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
        PRINTK("raid5: error called\n");
 
-       if (!rdev->faulty) {
+       if (!test_bit(Faulty, &rdev->flags)) {
                mddev->sb_dirty = 1;
-               if (rdev->in_sync) {
+               if (test_bit(In_sync, &rdev->flags)) {
                        conf->working_disks--;
                        mddev->degraded++;
                        conf->failed_disks++;
-                       rdev->in_sync = 0;
+                       clear_bit(In_sync, &rdev->flags);
                        /*
                         * if recovery was running, make sure it aborts.
                         */
                        set_bit(MD_RECOVERY_ERR, &mddev->recovery);
                }
-               rdev->faulty = 1;
+               set_bit(Faulty, &rdev->flags);
                printk (KERN_ALERT
                        "raid5: Disk failure on %s, disabling device."
                        " Operation continuing on %d devices\n",
@@ -595,7 +596,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
                        *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
                        break;
                default:
-                       printk("raid5: unsupported algorithm %d\n",
+                       printk(KERN_ERR "raid5: unsupported algorithm %d\n",
                                conf->algorithm);
        }
 
@@ -636,7 +637,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
                        i -= (sh->pd_idx + 1);
                        break;
                default:
-                       printk("raid5: unsupported algorithm %d\n",
+                       printk(KERN_ERR "raid5: unsupported algorithm %d\n",
                                conf->algorithm);
        }
 
@@ -645,7 +646,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
 
        check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
        if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
-               printk("compute_blocknr: map not correct\n");
+               printk(KERN_ERR "compute_blocknr: map not correct\n");
                return 0;
        }
        return r_sector;
@@ -728,7 +729,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
                if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
                        ptr[count++] = p;
                else
-                       printk("compute_block() %d, stripe %llu, %d"
+                       printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
                                " not present\n", dd_idx,
                                (unsigned long long)sh->sector, i);
 
@@ -951,11 +952,11 @@ static void handle_stripe(struct stripe_head *sh)
        syncing = test_bit(STRIPE_SYNCING, &sh->state);
        /* Now to look around and see what can be done */
 
+       rcu_read_lock();
        for (i=disks; i--; ) {
                mdk_rdev_t *rdev;
                dev = &sh->dev[i];
                clear_bit(R5_Insync, &dev->flags);
-               clear_bit(R5_Syncio, &dev->flags);
 
                PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
                        i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -994,19 +995,20 @@ static void handle_stripe(struct stripe_head *sh)
                                non_overwrite++;
                }
                if (dev->written) written++;
-               rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
-               if (!rdev || !rdev->in_sync) {
-                       /* The ReadError flag wil just be confusing now */
+               rdev = rcu_dereference(conf->disks[i].rdev);
+               if (!rdev || !test_bit(In_sync, &rdev->flags)) {
+                       /* The ReadError flag will just be confusing now */
                        clear_bit(R5_ReadError, &dev->flags);
                        clear_bit(R5_ReWrite, &dev->flags);
                }
-               if (!rdev || !rdev->in_sync
+               if (!rdev || !test_bit(In_sync, &rdev->flags)
                    || test_bit(R5_ReadError, &dev->flags)) {
                        failed++;
                        failed_num = i;
                } else
                        set_bit(R5_Insync, &dev->flags);
        }
+       rcu_read_unlock();
        PRINTK("locked=%d uptodate=%d to_read=%d"
                " to_write=%d failed=%d failed_num=%d\n",
                locked, uptodate, to_read, to_write, failed, failed_num);
@@ -1018,10 +1020,13 @@ static void handle_stripe(struct stripe_head *sh)
                        int bitmap_end = 0;
 
                        if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
-                               mdk_rdev_t *rdev = conf->disks[i].rdev;
-                               if (rdev && rdev->in_sync)
+                               mdk_rdev_t *rdev;
+                               rcu_read_lock();
+                               rdev = rcu_dereference(conf->disks[i].rdev);
+                               if (rdev && test_bit(In_sync, &rdev->flags))
                                        /* multiple read failures in one stripe */
                                        md_error(conf->mddev, rdev);
+                               rcu_read_unlock();
                        }
 
                        spin_lock_irq(&conf->device_lock);
@@ -1170,9 +1175,6 @@ static void handle_stripe(struct stripe_head *sh)
                                        locked++;
                                        PRINTK("Reading block %d (sync=%d)\n", 
                                                i, syncing);
-                                       if (syncing)
-                                               md_sync_acct(conf->disks[i].rdev->bdev,
-                                                            STRIPE_SECTORS);
                                }
                        }
                }
@@ -1279,7 +1281,7 @@ static void handle_stripe(struct stripe_head *sh)
         * is available
         */
        if (syncing && locked == 0 &&
-           !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) {
+           !test_bit(STRIPE_INSYNC, &sh->state)) {
                set_bit(STRIPE_HANDLE, &sh->state);
                if (failed == 0) {
                        char *pagea;
@@ -1292,27 +1294,30 @@ static void handle_stripe(struct stripe_head *sh)
                            !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
                                /* parity is correct (on disc, not in buffer any more) */
                                set_bit(STRIPE_INSYNC, &sh->state);
+                       } else {
+                               conf->mddev->resync_mismatches += STRIPE_SECTORS;
+                               if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                                       /* don't try to repair!! */
+                                       set_bit(STRIPE_INSYNC, &sh->state);
+                               else {
+                                       compute_block(sh, sh->pd_idx);
+                                       uptodate++;
+                               }
                        }
                }
                if (!test_bit(STRIPE_INSYNC, &sh->state)) {
+                       /* either failed parity check, or recovery is happening */
                        if (failed==0)
                                failed_num = sh->pd_idx;
-                       /* should be able to compute the missing block and write it to spare */
-                       if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
-                               if (uptodate+1 != disks)
-                                       BUG();
-                               compute_block(sh, failed_num);
-                               uptodate++;
-                       }
-                       if (uptodate != disks)
-                               BUG();
                        dev = &sh->dev[failed_num];
+                       BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
+                       BUG_ON(uptodate != disks);
+
                        set_bit(R5_LOCKED, &dev->flags);
                        set_bit(R5_Wantwrite, &dev->flags);
                        clear_bit(STRIPE_DEGRADED, &sh->state);
                        locked++;
                        set_bit(STRIPE_INSYNC, &sh->state);
-                       set_bit(R5_Syncio, &dev->flags);
                }
        }
        if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -1323,7 +1328,8 @@ static void handle_stripe(struct stripe_head *sh)
        /* If the failed drive is just a ReadError, then we might need to progress
         * the repair/check process
         */
-       if (failed == 1 && test_bit(R5_ReadError, &sh->dev[failed_num].flags)
+       if (failed == 1 && ! conf->mddev->ro &&
+           test_bit(R5_ReadError, &sh->dev[failed_num].flags)
            && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
            && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
                ) {
@@ -1369,15 +1375,15 @@ static void handle_stripe(struct stripe_head *sh)
                        bi->bi_end_io = raid5_end_read_request;
  
                rcu_read_lock();
-               rdev = conf->disks[i].rdev;
-               if (rdev && rdev->faulty)
+               rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
                        rdev = NULL;
                if (rdev)
                        atomic_inc(&rdev->nr_pending);
                rcu_read_unlock();
  
                if (rdev) {
-                       if (test_bit(R5_Syncio, &sh->dev[i].flags))
+                       if (syncing)
                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 
                        bi->bi_bdev = rdev->bdev;
@@ -1394,6 +1400,9 @@ static void handle_stripe(struct stripe_head *sh)
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
                        bi->bi_next = NULL;
+                       if (rw == WRITE &&
+                           test_bit(R5_ReWrite, &sh->dev[i].flags))
+                               atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
                        generic_make_request(bi);
                } else {
                        if (rw == 1)
@@ -1406,7 +1415,7 @@ static void handle_stripe(struct stripe_head *sh)
        }
 }
 
-static inline void raid5_activate_delayed(raid5_conf_t *conf)
+static void raid5_activate_delayed(raid5_conf_t *conf)
 {
        if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
                while (!list_empty(&conf->delayed_list)) {
@@ -1422,7 +1431,7 @@ static inline void raid5_activate_delayed(raid5_conf_t *conf)
        }
 }
 
-static inline void activate_bit_delay(raid5_conf_t *conf)
+static void activate_bit_delay(raid5_conf_t *conf)
 {
        /* device_lock is held */
        struct list_head head;
@@ -1443,8 +1452,8 @@ static void unplug_slaves(mddev_t *mddev)
 
        rcu_read_lock();
        for (i=0; i<mddev->raid_disks; i++) {
-               mdk_rdev_t *rdev = conf->disks[i].rdev;
-               if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+               mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
                        request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
 
                        atomic_inc(&rdev->nr_pending);
@@ -1488,8 +1497,8 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
 
        rcu_read_lock();
        for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               mdk_rdev_t *rdev = conf->disks[i].rdev;
-               if (rdev && !rdev->faulty) {
+               mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct block_device *bdev = rdev->bdev;
                        request_queue_t *r_queue = bdev_get_queue(bdev);
 
@@ -1631,6 +1640,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                return rv;
        }
        if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+           !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
            !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
                /* we can skip this block, and probably more */
                sync_blocks /= STRIPE_SECTORS;
@@ -1689,7 +1699,9 @@ static void raid5d (mddev_t *mddev)
 
                if (conf->seq_flush - conf->seq_write > 0) {
                        int seq = conf->seq_flush;
+                       spin_unlock_irq(&conf->device_lock);
                        bitmap_unplug(mddev->bitmap);
+                       spin_lock_irq(&conf->device_lock);
                        conf->seq_write = seq;
                        activate_bit_delay(conf);
                }
@@ -1727,25 +1739,26 @@ static void raid5d (mddev_t *mddev)
        PRINTK("--- raid5d inactive\n");
 }
 
-struct raid5_sysfs_entry {
-       struct attribute attr;
-       ssize_t (*show)(raid5_conf_t *, char *);
-       ssize_t (*store)(raid5_conf_t *, const char *, ssize_t);
-};
-
 static ssize_t
-raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page)
+raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
 {
-       return sprintf(page, "%d\n", conf->max_nr_stripes);
+       raid5_conf_t *conf = mddev_to_conf(mddev);
+       if (conf)
+               return sprintf(page, "%d\n", conf->max_nr_stripes);
+       else
+               return 0;
 }
 
 static ssize_t
-raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len)
+raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
 {
+       raid5_conf_t *conf = mddev_to_conf(mddev);
        char *end;
        int new;
        if (len >= PAGE_SIZE)
                return -EINVAL;
+       if (!conf)
+               return -ENODEV;
 
        new = simple_strtoul(page, &end, 10);
        if (!*page || (*end && *end != '\n') )
@@ -1765,68 +1778,33 @@ raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len)
        }
        return len;
 }
-static struct raid5_sysfs_entry raid5_stripecache_size = {
-       .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR },
-       .show = raid5_show_stripe_cache_size,
-       .store = raid5_store_stripe_cache_size,
-};
+
+static struct md_sysfs_entry
+raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
+                               raid5_show_stripe_cache_size,
+                               raid5_store_stripe_cache_size);
 
 static ssize_t
-raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page)
+stripe_cache_active_show(mddev_t *mddev, char *page)
 {
-       return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
+       raid5_conf_t *conf = mddev_to_conf(mddev);
+       if (conf)
+               return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
+       else
+               return 0;
 }
 
-static struct raid5_sysfs_entry raid5_stripecache_active = {
-       .attr = {.name = "stripe_cache_active", .mode = S_IRUGO},
-       .show = raid5_show_stripe_cache_active,
-};
+static struct md_sysfs_entry
+raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
 
-static struct attribute *raid5_default_attrs[] = {
+static struct attribute *raid5_attrs[] =  {
        &raid5_stripecache_size.attr,
        &raid5_stripecache_active.attr,
        NULL,
 };
-
-static ssize_t
-raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-       struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr);
-       raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj);
-
-       if (!entry->show)
-               return -EIO;
-       return entry->show(conf, page);
-}
-
-static ssize_t
-raid5_attr_store(struct kobject *kobj, struct attribute *attr,
-             const char *page, size_t length)
-{
-       struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr);
-       raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj);
-
-       if (!entry->store)
-               return -EIO;
-       return entry->store(conf, page, length);
-}
-
-static void raid5_free(struct kobject *ko)
-{
-       raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj);
-       kfree(conf);
-}
-
-
-static struct sysfs_ops raid5_sysfs_ops = {
-       .show           = raid5_attr_show,
-       .store          = raid5_attr_store,
-};
-
-static struct kobj_type raid5_ktype = {
-       .release        = raid5_free,
-       .sysfs_ops      = &raid5_sysfs_ops,
-       .default_attrs  = raid5_default_attrs,
+static struct attribute_group raid5_attrs_group = {
+       .name = NULL,
+       .attrs = raid5_attrs,
 };
 
 static int run(mddev_t *mddev)
@@ -1838,21 +1816,21 @@ static int run(mddev_t *mddev)
        struct list_head *tmp;
 
        if (mddev->level != 5 && mddev->level != 4) {
-               printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level);
+               printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
+                      mdname(mddev), mddev->level);
                return -EIO;
        }
 
-       mddev->private = kmalloc (sizeof (raid5_conf_t)
-                                 + mddev->raid_disks * sizeof(struct disk_info),
-                                 GFP_KERNEL);
+       mddev->private = kzalloc(sizeof (raid5_conf_t)
+                                + mddev->raid_disks * sizeof(struct disk_info),
+                                GFP_KERNEL);
        if ((conf = mddev->private) == NULL)
                goto abort;
-       memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
+
        conf->mddev = mddev;
 
-       if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
+       if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
                goto abort;
-       memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
 
        spin_lock_init(&conf->device_lock);
        init_waitqueue_head(&conf->wait_for_stripe);
@@ -1875,7 +1853,7 @@ static int run(mddev_t *mddev)
 
                disk->rdev = rdev;
 
-               if (rdev->in_sync) {
+               if (test_bit(In_sync, &rdev->flags)) {
                        char b[BDEVNAME_SIZE];
                        printk(KERN_INFO "raid5: device %s operational as raid"
                                " disk %d\n", bdevname(rdev->bdev,b),
@@ -1919,10 +1897,17 @@ static int run(mddev_t *mddev)
 
        if (mddev->degraded == 1 &&
            mddev->recovery_cp != MaxSector) {
-               printk(KERN_ERR 
-                       "raid5: cannot start dirty degraded array for %s\n",
-                       mdname(mddev));
-               goto abort;
+               if (mddev->ok_start_degraded)
+                       printk(KERN_WARNING
+                              "raid5: starting dirty degraded array: %s"
+                              "- data corruption possible.\n",
+                              mdname(mddev));
+               else {
+                       printk(KERN_ERR
+                              "raid5: cannot start dirty degraded array for %s\n",
+                              mdname(mddev));
+                       goto abort;
+               }
        }
 
        {
@@ -1934,7 +1919,7 @@ static int run(mddev_t *mddev)
                        goto abort;
                }
        }
-memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
+       memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
                 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
        if (grow_stripes(conf, conf->max_nr_stripes)) {
                printk(KERN_ERR 
@@ -1964,19 +1949,13 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
         */
        {
                int stripe = (mddev->raid_disks-1) * mddev->chunk_size
-                       / PAGE_CACHE_SIZE;
+                       / PAGE_SIZE;
                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
        }
 
        /* Ok, everything is just fine now */
-       conf->kobj.parent = kobject_get(&mddev->kobj);
-       strcpy(conf->kobj.name, "raid5");
-       conf->kobj.ktype = &raid5_ktype;
-       kobject_register(&conf->kobj);
-
-       if (mddev->bitmap)
-               mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
+       sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
 
        mddev->queue->unplug_fn = raid5_unplug_device;
        mddev->queue->issue_flush_fn = raid5_issue_flush;
@@ -1986,9 +1965,7 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
 abort:
        if (conf) {
                print_raid5_conf(conf);
-               if (conf->stripe_hashtbl)
-                       free_pages((unsigned long) conf->stripe_hashtbl,
-                                                       HASH_PAGES_ORDER);
+               kfree(conf->stripe_hashtbl);
                kfree(conf);
        }
        mddev->private = NULL;
@@ -2005,9 +1982,10 @@ static int stop(mddev_t *mddev)
        md_unregister_thread(mddev->thread);
        mddev->thread = NULL;
        shrink_stripes(conf);
-       free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
+       kfree(conf->stripe_hashtbl);
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       kobject_unregister(&conf->kobj);
+       sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
+       kfree(conf);
        mddev->private = NULL;
        return 0;
 }
@@ -2032,12 +2010,12 @@ static void print_sh (struct stripe_head *sh)
 static void printall (raid5_conf_t *conf)
 {
        struct stripe_head *sh;
+       struct hlist_node *hn;
        int i;
 
        spin_lock_irq(&conf->device_lock);
        for (i = 0; i < NR_HASH; i++) {
-               sh = conf->stripe_hashtbl[i];
-               for (; sh; sh = sh->hash_next) {
+               hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
                        if (sh->raid_conf != conf)
                                continue;
                        print_sh(sh);
@@ -2057,7 +2035,7 @@ static void status (struct seq_file *seq, mddev_t *mddev)
        for (i = 0; i < conf->raid_disks; i++)
                seq_printf (seq, "%s",
                               conf->disks[i].rdev &&
-                              conf->disks[i].rdev->in_sync ? "U" : "_");
+                              test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
        seq_printf (seq, "]");
 #if RAID5_DEBUG
 #define D(x) \
@@ -2084,7 +2062,7 @@ static void print_raid5_conf (raid5_conf_t *conf)
                tmp = conf->disks + i;
                if (tmp->rdev)
                printk(" disk %d, o:%d, dev:%s\n",
-                       i, !tmp->rdev->faulty,
+                       i, !test_bit(Faulty, &tmp->rdev->flags),
                        bdevname(tmp->rdev->bdev,b));
        }
 }
@@ -2098,12 +2076,12 @@ static int raid5_spare_active(mddev_t *mddev)
        for (i = 0; i < conf->raid_disks; i++) {
                tmp = conf->disks + i;
                if (tmp->rdev
-                   && !tmp->rdev->faulty
-                   && !tmp->rdev->in_sync) {
+                   && !test_bit(Faulty, &tmp->rdev->flags)
+                   && !test_bit(In_sync, &tmp->rdev->flags)) {
                        mddev->degraded--;
                        conf->failed_disks--;
                        conf->working_disks++;
-                       tmp->rdev->in_sync = 1;
+                       set_bit(In_sync, &tmp->rdev->flags);
                }
        }
        print_raid5_conf(conf);
@@ -2120,7 +2098,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
        print_raid5_conf(conf);
        rdev = p->rdev;
        if (rdev) {
-               if (rdev->in_sync ||
+               if (test_bit(In_sync, &rdev->flags) ||
                    atomic_read(&rdev->nr_pending)) {
                        err = -EBUSY;
                        goto abort;
@@ -2155,12 +2133,12 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
         */
        for (disk=0; disk < mddev->raid_disks; disk++)
                if ((p=conf->disks + disk)->rdev == NULL) {
-                       rdev->in_sync = 0;
+                       clear_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = disk;
                        found = 1;
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
-                       p->rdev = rdev;
+                       rcu_assign_pointer(p->rdev, rdev);
                        break;
                }
        print_raid5_conf(conf);
@@ -2210,17 +2188,30 @@ static void raid5_quiesce(mddev_t *mddev, int state)
                spin_unlock_irq(&conf->device_lock);
                break;
        }
-       if (mddev->thread) {
-               if (mddev->bitmap)
-                       mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
-               else
-                       mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
-               md_wakeup_thread(mddev->thread);
-       }
 }
-static mdk_personality_t raid5_personality=
+
+static struct mdk_personality raid5_personality =
 {
        .name           = "raid5",
+       .level          = 5,
+       .owner          = THIS_MODULE,
+       .make_request   = make_request,
+       .run            = run,
+       .stop           = stop,
+       .status         = status,
+       .error_handler  = error,
+       .hot_add_disk   = raid5_add_disk,
+       .hot_remove_disk= raid5_remove_disk,
+       .spare_active   = raid5_spare_active,
+       .sync_request   = sync_request,
+       .resize         = raid5_resize,
+       .quiesce        = raid5_quiesce,
+};
+
+static struct mdk_personality raid4_personality =
+{
+       .name           = "raid4",
+       .level          = 4,
        .owner          = THIS_MODULE,
        .make_request   = make_request,
        .run            = run,
@@ -2235,17 +2226,24 @@ static mdk_personality_t raid5_personality=
        .quiesce        = raid5_quiesce,
 };
 
-static int __init raid5_init (void)
+static int __init raid5_init(void)
 {
-       return register_md_personality (RAID5, &raid5_personality);
+       register_md_personality(&raid5_personality);
+       register_md_personality(&raid4_personality);
+       return 0;
 }
 
-static void raid5_exit (void)
+static void raid5_exit(void)
 {
-       unregister_md_personality (RAID5);
+       unregister_md_personality(&raid5_personality);
+       unregister_md_personality(&raid4_personality);
 }
 
 module_init(raid5_init);
 module_exit(raid5_exit);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("md-personality-4"); /* RAID5 */
+MODULE_ALIAS("md-raid5");
+MODULE_ALIAS("md-raid4");
+MODULE_ALIAS("md-level-5");
+MODULE_ALIAS("md-level-4");