[BLOCK] Don't clear sg_dma_len/addr() in blk_rq_map_sg()
[powerpc.git] / block / ll_rw_blk.c
index 4fde3a3..fb8fb88 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/cpu.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
+#include <linux/scatterlist.h>
 
 /*
  * for max sense size
@@ -304,23 +305,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
 
 EXPORT_SYMBOL(blk_queue_ordered);
 
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q:     the request queue
- * @iff:   the function to be called issuing the flush
- *
- * Description:
- *   If a driver supports issuing a flush command, the support is notified
- *   to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
-{
-       q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
 /*
  * Cache flushing for ordered writes handling
  */
@@ -1335,10 +1319,11 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
  * must make sure sg can hold rq->nr_phys_segments entries
  */
 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-                 struct scatterlist *sg)
+                 struct scatterlist *sglist)
 {
        struct bio_vec *bvec, *bvprv;
        struct req_iterator iter;
+       struct scatterlist *sg;
        int nsegs, cluster;
 
        nsegs = 0;
@@ -1348,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
         * for each bio in rq
         */
        bvprv = NULL;
+       sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
                int nbytes = bvec->bv_len;
 
                if (bvprv && cluster) {
-                       if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+                       if (sg->length + nbytes > q->max_segment_size)
                                goto new_segment;
 
                        if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -1360,19 +1346,25 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                        if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                                goto new_segment;
 
-                       sg[nsegs - 1].length += nbytes;
+                       sg->length += nbytes;
                } else {
 new_segment:
-                       memset(&sg[nsegs],0,sizeof(struct scatterlist));
-                       sg[nsegs].page = bvec->bv_page;
-                       sg[nsegs].length = nbytes;
-                       sg[nsegs].offset = bvec->bv_offset;
-
+                       if (!sg)
+                               sg = sglist;
+                       else
+                               sg = sg_next(sg);
+
+                       sg_set_page(sg, bvec->bv_page);
+                       sg->length = nbytes;
+                       sg->offset = bvec->bv_offset;
                        nsegs++;
                }
                bvprv = bvec;
        } /* segments in rq */
 
+       if (sg)
+               __sg_mark_end(sg);
+
        return nsegs;
 }
 
@@ -1799,6 +1791,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_trace_shutdown(q);
 
+       bdi_destroy(&q->backing_dev_info);
        kmem_cache_free(requestq_cachep, q);
 }
 
@@ -1852,21 +1845,27 @@ static struct kobj_type queue_ktype;
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
+       int err;
 
        q = kmem_cache_alloc_node(requestq_cachep,
                                gfp_mask | __GFP_ZERO, node_id);
        if (!q)
                return NULL;
 
+       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+       q->backing_dev_info.unplug_io_data = q;
+       err = bdi_init(&q->backing_dev_info);
+       if (err) {
+               kmem_cache_free(requestq_cachep, q);
+               return NULL;
+       }
+
        init_timer(&q->unplug_timer);
 
        kobject_set_name(&q->kobj, "%s", "queue");
        q->kobj.ktype = &queue_ktype;
        kobject_init(&q->kobj);
 
-       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
-       q->backing_dev_info.unplug_io_data = q;
-
        mutex_init(&q->sysfs_lock);
 
        return q;
@@ -2666,6 +2665,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
 
 EXPORT_SYMBOL(blk_execute_rq);
 
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+       if (err)
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+       complete(bio->bi_private);
+}
+
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
@@ -2678,7 +2685,10 @@ EXPORT_SYMBOL(blk_execute_rq);
  */
 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
 {
+       DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q;
+       struct bio *bio;
+       int ret;
 
        if (bdev->bd_disk == NULL)
                return -ENXIO;
@@ -2686,10 +2696,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        q = bdev_get_queue(bdev);
        if (!q)
                return -ENXIO;
-       if (!q->issue_flush_fn)
-               return -EOPNOTSUPP;
 
-       return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+       bio = bio_alloc(GFP_KERNEL, 0);
+       if (!bio)
+               return -ENOMEM;
+
+       bio->bi_end_io = bio_end_empty_barrier;
+       bio->bi_private = &wait;
+       bio->bi_bdev = bdev;
+       submit_bio(1 << BIO_RW_BARRIER, bio);
+
+       wait_for_completion(&wait);
+
+       /*
+        * The driver must store the error location in ->bi_sector, if
+        * it supports it. For non-stacked drivers, this should be copied
+        * from rq->sector.
+        */
+       if (error_sector)
+               *error_sector = bio->bi_sector;
+
+       ret = 0;
+       if (!bio_flagged(bio, BIO_UPTODATE))
+               ret = -EIO;
+
+       bio_put(bio);
+       return ret;
 }
 
 EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3337,7 +3369,7 @@ void submit_bio(int rw, struct bio *bio)
                if (unlikely(block_dump)) {
                        char b[BDEVNAME_SIZE];
                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-                               current->comm, current->pid,
+                       current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
                                (unsigned long long)bio->bi_sector,
                                bdevname(bio->bi_bdev,b));
@@ -3709,7 +3741,7 @@ EXPORT_SYMBOL(end_dequeued_request);
 
 /**
  * end_request - end I/O on the current segment of the request
- * @rq:                the request being processed
+ * @req:       the request being processed
  * @uptodate:  error value or 0/1 uptodate flag
  *
  * Description:
@@ -4022,7 +4054,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                        max_hw_sectors_kb = q->max_hw_sectors >> 1,
                        page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
-       int ra_kb;
 
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
@@ -4031,14 +4062,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
         * values synchronously:
         */
        spin_lock_irq(q->queue_lock);
-       /*
-        * Trim readahead window as well, if necessary:
-        */
-       ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
-       if (ra_kb > max_sectors_kb)
-               q->backing_dev_info.ra_pages =
-                               max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
-
        q->max_sectors = max_sectors_kb << 1;
        spin_unlock_irq(q->queue_lock);
 
@@ -4052,7 +4075,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(q->max_phys_segments, page);
+}
 
+static ssize_t queue_max_segments_store(struct request_queue *q,
+                                       const char *page, size_t count)
+{
+       unsigned long segments;
+       ssize_t ret = queue_var_store(&segments, page, count);
+
+       spin_lock_irq(q->queue_lock);
+       q->max_phys_segments = segments;
+       spin_unlock_irq(q->queue_lock);
+
+       return ret;
+}
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -4076,6 +4115,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
        .show = queue_max_hw_sectors_show,
 };
 
+static struct queue_sysfs_entry queue_max_segments_entry = {
+       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_max_segments_show,
+       .store = queue_max_segments_store,
+};
+
 static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@ -4087,6 +4132,7 @@ static struct attribute *default_attrs[] = {
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
+       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
 };