[PATCH] iscsi endianness annotations
[powerpc.git] / drivers / mmc / mmc_block.c
index c1293f1..05ba8ac 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/kdev_t.h>
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
+#include <linux/scatterlist.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -82,7 +83,6 @@ static void mmc_blk_put(struct mmc_blk_data *md)
        md->usage--;
        if (md->usage == 0) {
                put_disk(md->disk);
-               mmc_cleanup_queue(&md->queue);
                kfree(md);
        }
        mutex_unlock(&open_lock);
@@ -154,15 +154,80 @@ static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
        return stat;
 }
 
+static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+       int err;
+       u32 blocks;
+
+       struct mmc_request mrq;
+       struct mmc_command cmd;
+       struct mmc_data data;
+       unsigned int timeout_us;
+
+       struct scatterlist sg;
+
+       memset(&cmd, 0, sizeof(struct mmc_command));
+
+       cmd.opcode = MMC_APP_CMD;
+       cmd.arg = card->rca << 16;
+       cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+       err = mmc_wait_for_cmd(card->host, &cmd, 0);
+       if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD))
+               return (u32)-1;
+
+       memset(&cmd, 0, sizeof(struct mmc_command));
+
+       cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+       cmd.arg = 0;
+       cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+       memset(&data, 0, sizeof(struct mmc_data));
+
+       data.timeout_ns = card->csd.tacc_ns * 100;
+       data.timeout_clks = card->csd.tacc_clks * 100;
+
+       timeout_us = data.timeout_ns / 1000;
+       timeout_us += data.timeout_clks * 1000 /
+               (card->host->ios.clock / 1000);
+
+       if (timeout_us > 100000) {
+               data.timeout_ns = 100000000;
+               data.timeout_clks = 0;
+       }
+
+       data.blksz = 4;
+       data.blocks = 1;
+       data.flags = MMC_DATA_READ;
+       data.sg = &sg;
+       data.sg_len = 1;
+
+       memset(&mrq, 0, sizeof(struct mmc_request));
+
+       mrq.cmd = &cmd;
+       mrq.data = &data;
+
+       sg_init_one(&sg, &blocks, 4);
+
+       mmc_wait_for_req(card->host, &mrq);
+
+       if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE)
+               return (u32)-1;
+
+       blocks = ntohl(blocks);
+
+       return blocks;
+}
+
 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 {
        struct mmc_blk_data *md = mq->data;
        struct mmc_card *card = md->queue.card;
        struct mmc_blk_request brq;
-       int ret;
+       int ret = 1;
 
        if (mmc_card_claim_host(card))
-               goto cmd_err;
+               goto flush_queue;
 
        do {
                struct mmc_command cmd;
@@ -172,22 +237,29 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                brq.mrq.cmd = &brq.cmd;
                brq.mrq.data = &brq.data;
 
-               brq.cmd.arg = req->sector << 9;
+               brq.cmd.arg = req->sector;
+               if (!mmc_card_blockaddr(card))
+                       brq.cmd.arg <<= 9;
                brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
                brq.data.blksz = 1 << md->block_bits;
-               brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
                brq.stop.opcode = MMC_STOP_TRANSMISSION;
                brq.stop.arg = 0;
                brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+               brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
+               if (brq.data.blocks > card->host->max_blk_count)
+                       brq.data.blocks = card->host->max_blk_count;
 
                mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
 
                /*
                 * If the host doesn't support multiple block writes, force
-                * block writes to single block.
+                * block writes to single block. SD cards are excepted from
+                * this rule as they support querying the number of
+                * successfully written sectors.
                 */
                if (rq_data_dir(req) != READ &&
-                   !(card->host->caps & MMC_CAP_MULTIWRITE))
+                   !(card->host->caps & MMC_CAP_MULTIWRITE) &&
+                   !mmc_card_sd(card))
                        brq.data.blocks = 1;
 
                if (brq.data.blocks > 1) {
@@ -276,24 +348,41 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        return 1;
 
  cmd_err:
-       mmc_card_release_host(card);
-
-       ret = 1;
-
-       /*
-        * For writes and where the host claims to support proper
-        * error reporting, we first ok the successful blocks.
+       /*
+        * If this is an SD card and we're writing, we can first
+        * mark the known good sectors as ok.
+        *
+        * If the card is not SD, we can still ok written sectors
+        * if the controller can do proper error reporting.
         *
         * For reads we just fail the entire chunk as that should
         * be safe in all cases.
         */
-       if (rq_data_dir(req) != READ &&
-           (card->host->caps & MMC_CAP_MULTIWRITE)) {
+       if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
+               u32 blocks;
+               unsigned int bytes;
+
+               blocks = mmc_sd_num_wr_blocks(card);
+               if (blocks != (u32)-1) {
+                       if (card->csd.write_partial)
+                               bytes = blocks << md->block_bits;
+                       else
+                               bytes = blocks << 9;
+                       spin_lock_irq(&md->lock);
+                       ret = end_that_request_chunk(req, 1, bytes);
+                       spin_unlock_irq(&md->lock);
+               }
+       } else if (rq_data_dir(req) != READ &&
+                  (card->host->caps & MMC_CAP_MULTIWRITE)) {
                spin_lock_irq(&md->lock);
                ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
                spin_unlock_irq(&md->lock);
        }
 
+flush_queue:
+
+       mmc_card_release_host(card);
+
        spin_lock_irq(&md->lock);
        while (ret) {
                ret = end_that_request_chunk(req, 0,
@@ -410,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
        struct mmc_command cmd;
        int err;
 
+       /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
+       if (mmc_card_blockaddr(card))
+               return 0;
+
        mmc_card_claim_host(card);
        cmd.opcode = MMC_SET_BLOCKLEN;
        cmd.arg = 1 << md->block_bits;
@@ -467,12 +560,11 @@ static void mmc_blk_remove(struct mmc_card *card)
        if (md) {
                int devidx;
 
+               /* Stop new requests from getting into the queue */
                del_gendisk(md->disk);
 
-               /*
-                * I think this is needed.
-                */
-               md->disk->queue = NULL;
+               /* Then flush out any already in there */
+               mmc_cleanup_queue(&md->queue);
 
                devidx = md->disk->first_minor >> MMC_SHIFT;
                __clear_bit(devidx, dev_use);