Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[powerpc.git] / drivers / mmc / imxmmc.c
1 /*
2  *  linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver
3  *
4  *  Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5  *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6  *
7  *  derived from pxamci.c by Russell King
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  *  2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
14  *             Changed to conform redesigned i.MX scatter gather DMA interface
15  *
16  *  2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz>
17  *             Updated for 2.6.14 kernel
18  *
19  *  2005-12-13 Jay Monkman <jtm@smoothsmoothie.com>
20  *             Found and corrected problems in the write path
21  *
22  *  2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz>
23  *             The event handling rewritten right way in softirq.
24  *             Added many ugly hacks and delays to overcome SDHC
25  *             deficiencies
26  *
27  */
28
29 #ifdef CONFIG_MMC_DEBUG
30 #define DEBUG
31 #else
32 #undef  DEBUG
33 #endif
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/ioport.h>
38 #include <linux/platform_device.h>
39 #include <linux/interrupt.h>
40 #include <linux/blkdev.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/mmc/host.h>
43 #include <linux/mmc/card.h>
44 #include <linux/mmc/protocol.h>
45 #include <linux/delay.h>
46
47 #include <asm/dma.h>
48 #include <asm/io.h>
49 #include <asm/irq.h>
50 #include <asm/sizes.h>
51 #include <asm/arch/mmc.h>
52 #include <asm/arch/imx-dma.h>
53
54 #include "imxmmc.h"
55
56 #define DRIVER_NAME "imx-mmc"
57
58 #define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
59                       INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
60                       INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
61
62 struct imxmci_host {
63         struct mmc_host         *mmc;
64         spinlock_t              lock;
65         struct resource         *res;
66         int                     irq;
67         imx_dmach_t             dma;
68         unsigned int            clkrt;
69         unsigned int            cmdat;
70         volatile unsigned int   imask;
71         unsigned int            power_mode;
72         unsigned int            present;
73         struct imxmmc_platform_data *pdata;
74
75         struct mmc_request      *req;
76         struct mmc_command      *cmd;
77         struct mmc_data         *data;
78
79         struct timer_list       timer;
80         struct tasklet_struct   tasklet;
81         unsigned int            status_reg;
82         unsigned long           pending_events;
83         /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */
84         u16                     *data_ptr;
85         unsigned int            data_cnt;
86         atomic_t                stuck_timeout;
87
88         unsigned int            dma_nents;
89         unsigned int            dma_size;
90         unsigned int            dma_dir;
91         int                     dma_allocated;
92
93         unsigned char           actual_bus_width;
94
95         int                     prev_cmd_code;
96 };
97
98 #define IMXMCI_PEND_IRQ_b       0
99 #define IMXMCI_PEND_DMA_END_b   1
100 #define IMXMCI_PEND_DMA_ERR_b   2
101 #define IMXMCI_PEND_WAIT_RESP_b 3
102 #define IMXMCI_PEND_DMA_DATA_b  4
103 #define IMXMCI_PEND_CPU_DATA_b  5
104 #define IMXMCI_PEND_CARD_XCHG_b 6
105 #define IMXMCI_PEND_SET_INIT_b  7
106 #define IMXMCI_PEND_STARTED_b   8
107
108 #define IMXMCI_PEND_IRQ_m       (1 << IMXMCI_PEND_IRQ_b)
109 #define IMXMCI_PEND_DMA_END_m   (1 << IMXMCI_PEND_DMA_END_b)
110 #define IMXMCI_PEND_DMA_ERR_m   (1 << IMXMCI_PEND_DMA_ERR_b)
111 #define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
112 #define IMXMCI_PEND_DMA_DATA_m  (1 << IMXMCI_PEND_DMA_DATA_b)
113 #define IMXMCI_PEND_CPU_DATA_m  (1 << IMXMCI_PEND_CPU_DATA_b)
114 #define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
115 #define IMXMCI_PEND_SET_INIT_m  (1 << IMXMCI_PEND_SET_INIT_b)
116 #define IMXMCI_PEND_STARTED_m   (1 << IMXMCI_PEND_STARTED_b)
117
118 static void imxmci_stop_clock(struct imxmci_host *host)
119 {
120         int i = 0;
121         MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK;
122         while(i < 0x1000) {
123                 if(!(i & 0x7f))
124                         MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK;
125
126                 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) {
127                         /* Check twice before cut */
128                         if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN))
129                                 return;
130                 }
131
132                 i++;
133         }
134         dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
135 }
136
137 static int imxmci_start_clock(struct imxmci_host *host)
138 {
139         unsigned int trials = 0;
140         unsigned int delay_limit = 128;
141         unsigned long flags;
142
143         MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
144
145         clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
146
147         /*
148          * Command start of the clock, this usually succeeds in less
149          * then 6 delay loops, but during card detection (low clockrate)
150          * it takes up to 5000 delay loops and sometimes fails for the first time
151          */
152         MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
153
154         do {
155                 unsigned int delay = delay_limit;
156
157                 while(delay--){
158                         if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
159                                 /* Check twice before cut */
160                                 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
161                                         return 0;
162
163                         if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
164                                 return 0;
165                 }
166
167                 local_irq_save(flags);
168                 /*
169                  * Ensure, that request is not doubled under all possible circumstances.
170                  * It is possible, that cock running state is missed, because some other
171                  * IRQ or schedule delays this function execution and the clocks has
172                  * been already stopped by other means (response processing, SDHC HW)
173                  */
174                 if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
175                         MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
176                 local_irq_restore(flags);
177
178         } while(++trials<256);
179
180         dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
181
182         return -1;
183 }
184
185 static void imxmci_softreset(void)
186 {
187         /* reset sequence */
188         MMC_STR_STP_CLK = 0x8;
189         MMC_STR_STP_CLK = 0xD;
190         MMC_STR_STP_CLK = 0x5;
191         MMC_STR_STP_CLK = 0x5;
192         MMC_STR_STP_CLK = 0x5;
193         MMC_STR_STP_CLK = 0x5;
194         MMC_STR_STP_CLK = 0x5;
195         MMC_STR_STP_CLK = 0x5;
196         MMC_STR_STP_CLK = 0x5;
197         MMC_STR_STP_CLK = 0x5;
198
199         MMC_RES_TO = 0xff;
200         MMC_BLK_LEN = 512;
201         MMC_NOB = 1;
202 }
203
204 static int imxmci_busy_wait_for_status(struct imxmci_host *host,
205                         unsigned int *pstat, unsigned int stat_mask,
206                         int timeout, const char *where)
207 {
208         int loops=0;
209         while(!(*pstat & stat_mask)) {
210                 loops+=2;
211                 if(loops >= timeout) {
212                         dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
213                                 where, *pstat, stat_mask);
214                         return -1;
215                 }
216                 udelay(2);
217                 *pstat |= MMC_STATUS;
218         }
219         if(!loops)
220                 return 0;
221
222         /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
223         if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
224                 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
225                         loops, where, *pstat, stat_mask);
226         return loops;
227 }
228
229 static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
230 {
231         unsigned int nob = data->blocks;
232         unsigned int blksz = data->blksz;
233         unsigned int datasz = nob * blksz;
234         int i;
235
236         if (data->flags & MMC_DATA_STREAM)
237                 nob = 0xffff;
238
239         host->data = data;
240         data->bytes_xfered = 0;
241
242         MMC_NOB = nob;
243         MMC_BLK_LEN = blksz;
244
245         /*
246          * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
247          * We are in big troubles for non-512 byte transfers according to note in the paragraph
248          * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
249          * The situation is even more complex in reality. The SDHC in not able to handle wll
250          * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
251          * This is required for SCR read at least.
252          */
253         if (datasz < 512) {
254                 host->dma_size = datasz;
255                 if (data->flags & MMC_DATA_READ) {
256                         host->dma_dir = DMA_FROM_DEVICE;
257
258                         /* Hack to enable read SCR */
259                         MMC_NOB = 1;
260                         MMC_BLK_LEN = 512;
261                 } else {
262                         host->dma_dir = DMA_TO_DEVICE;
263                 }
264
265                 /* Convert back to virtual address */
266                 host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
267                 host->data_cnt = 0;
268
269                 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
270                 set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
271
272                 return;
273         }
274
275         if (data->flags & MMC_DATA_READ) {
276                 host->dma_dir = DMA_FROM_DEVICE;
277                 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
278                                                 data->sg_len,  host->dma_dir);
279
280                 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
281                         host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);
282
283                 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
284                 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
285         } else {
286                 host->dma_dir = DMA_TO_DEVICE;
287
288                 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
289                                                 data->sg_len,  host->dma_dir);
290
291                 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
292                         host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);
293
294                 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
295                 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
296         }
297
298 #if 1   /* This code is there only for consistency checking and can be disabled in future */
299         host->dma_size = 0;
300         for(i=0; i<host->dma_nents; i++)
301                 host->dma_size+=data->sg[i].length;
302
303         if (datasz > host->dma_size) {
304                 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
305                        datasz, host->dma_size);
306         }
307 #endif
308
309         host->dma_size = datasz;
310
311         wmb();
312
313         if(host->actual_bus_width == MMC_BUS_WIDTH_4)
314                 BLR(host->dma) = 0;     /* burst 64 byte read / 64 bytes write */
315         else
316                 BLR(host->dma) = 16;    /* burst 16 byte read / 16 bytes write */
317
318         RSSR(host->dma) = DMA_REQ_SDHC;
319
320         set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
321         clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
322
323         /* start DMA engine for read, write is delayed after initial response */
324         if (host->dma_dir == DMA_FROM_DEVICE) {
325                 imx_dma_enable(host->dma);
326         }
327 }
328
329 static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
330 {
331         unsigned long flags;
332         u32 imask;
333
334         WARN_ON(host->cmd != NULL);
335         host->cmd = cmd;
336
337         /* Ensure, that clock are stopped else command programming and start fails */
338         imxmci_stop_clock(host);
339
340         if (cmd->flags & MMC_RSP_BUSY)
341                 cmdat |= CMD_DAT_CONT_BUSY;
342
343         switch (mmc_resp_type(cmd)) {
344         case MMC_RSP_R1: /* short CRC, OPCODE */
345         case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
346                 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
347                 break;
348         case MMC_RSP_R2: /* long 136 bit + CRC */
349                 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
350                 break;
351         case MMC_RSP_R3: /* short */
352                 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
353                 break;
354         default:
355                 break;
356         }
357
358         if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) )
359                 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
360
361         if ( host->actual_bus_width == MMC_BUS_WIDTH_4 )
362                 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
363
364         MMC_CMD = cmd->opcode;
365         MMC_ARGH = cmd->arg >> 16;
366         MMC_ARGL = cmd->arg & 0xffff;
367         MMC_CMD_DAT_CONT = cmdat;
368
369         atomic_set(&host->stuck_timeout, 0);
370         set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
371
372
373         imask = IMXMCI_INT_MASK_DEFAULT;
374         imask &= ~INT_MASK_END_CMD_RES;
375         if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) {
376                 /*imask &= ~INT_MASK_BUF_READY;*/
377                 imask &= ~INT_MASK_DATA_TRAN;
378                 if ( cmdat & CMD_DAT_CONT_WRITE )
379                         imask &= ~INT_MASK_WRITE_OP_DONE;
380                 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
381                         imask &= ~INT_MASK_BUF_READY;
382         }
383
384         spin_lock_irqsave(&host->lock, flags);
385         host->imask = imask;
386         MMC_INT_MASK = host->imask;
387         spin_unlock_irqrestore(&host->lock, flags);
388
389         dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
390                 cmd->opcode, cmd->opcode, imask);
391
392         imxmci_start_clock(host);
393 }
394
395 static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
396 {
397         unsigned long flags;
398
399         spin_lock_irqsave(&host->lock, flags);
400
401         host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
402                         IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
403
404         host->imask = IMXMCI_INT_MASK_DEFAULT;
405         MMC_INT_MASK = host->imask;
406
407         spin_unlock_irqrestore(&host->lock, flags);
408
409         if(req && req->cmd)
410                 host->prev_cmd_code = req->cmd->opcode;
411
412         host->req = NULL;
413         host->cmd = NULL;
414         host->data = NULL;
415         mmc_request_done(host->mmc, req);
416 }
417
418 static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
419 {
420         struct mmc_data *data = host->data;
421         int data_error;
422
423         if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){
424                 imx_dma_disable(host->dma);
425                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
426                              host->dma_dir);
427         }
428
429         if ( stat & STATUS_ERR_MASK ) {
430                 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat);
431                 if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
432                         data->error = MMC_ERR_BADCRC;
433                 else if(stat & STATUS_TIME_OUT_READ)
434                         data->error = MMC_ERR_TIMEOUT;
435                 else
436                         data->error = MMC_ERR_FAILED;
437         } else {
438                 data->bytes_xfered = host->dma_size;
439         }
440
441         data_error = data->error;
442
443         host->data = NULL;
444
445         return data_error;
446 }
447
448 static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
449 {
450         struct mmc_command *cmd = host->cmd;
451         int i;
452         u32 a,b,c;
453         struct mmc_data *data = host->data;
454
455         if (!cmd)
456                 return 0;
457
458         host->cmd = NULL;
459
460         if (stat & STATUS_TIME_OUT_RESP) {
461                 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
462                 cmd->error = MMC_ERR_TIMEOUT;
463         } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
464                 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
465                 cmd->error = MMC_ERR_BADCRC;
466         }
467
468         if(cmd->flags & MMC_RSP_PRESENT) {
469                 if(cmd->flags & MMC_RSP_136) {
470                         for (i = 0; i < 4; i++) {
471                                 u32 a = MMC_RES_FIFO & 0xffff;
472                                 u32 b = MMC_RES_FIFO & 0xffff;
473                                 cmd->resp[i] = a<<16 | b;
474                         }
475                 } else {
476                         a = MMC_RES_FIFO & 0xffff;
477                         b = MMC_RES_FIFO & 0xffff;
478                         c = MMC_RES_FIFO & 0xffff;
479                         cmd->resp[0] = a<<24 | b<<8 | c>>8;
480                 }
481         }
482
483         dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
484                 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
485
486         if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) {
487                 if (host->req->data->flags & MMC_DATA_WRITE) {
488
489                         /* Wait for FIFO to be empty before starting DMA write */
490
491                         stat = MMC_STATUS;
492                         if(imxmci_busy_wait_for_status(host, &stat,
493                                 STATUS_APPL_BUFF_FE,
494                                 40, "imxmci_cmd_done DMA WR") < 0) {
495                                 cmd->error = MMC_ERR_FIFO;
496                                 imxmci_finish_data(host, stat);
497                                 if(host->req)
498                                         imxmci_finish_request(host, host->req);
499                                 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
500                                        stat);
501                                 return 0;
502                         }
503
504                         if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
505                                 imx_dma_enable(host->dma);
506                         }
507                 }
508         } else {
509                 struct mmc_request *req;
510                 imxmci_stop_clock(host);
511                 req = host->req;
512
513                 if(data)
514                         imxmci_finish_data(host, stat);
515
516                 if( req ) {
517                         imxmci_finish_request(host, req);
518                 } else {
519                         dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
520                 }
521         }
522
523         return 1;
524 }
525
526 static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
527 {
528         struct mmc_data *data = host->data;
529         int data_error;
530
531         if (!data)
532                 return 0;
533
534         data_error = imxmci_finish_data(host, stat);
535
536         if (host->req->stop) {
537                 imxmci_stop_clock(host);
538                 imxmci_start_cmd(host, host->req->stop, 0);
539         } else {
540                 struct mmc_request *req;
541                 req = host->req;
542                 if( req ) {
543                         imxmci_finish_request(host, req);
544                 } else {
545                         dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
546                 }
547         }
548
549         return 1;
550 }
551
552 static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
553 {
554         int i;
555         int burst_len;
556         int trans_done = 0;
557         unsigned int stat = *pstat;
558
559         if(host->actual_bus_width != MMC_BUS_WIDTH_4)
560                 burst_len = 16;
561         else
562                 burst_len = 64;
563
564         /* This is unfortunately required */
565         dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
566                 stat);
567
568         udelay(20);     /* required for clocks < 8MHz*/
569
570         if(host->dma_dir == DMA_FROM_DEVICE) {
571                 imxmci_busy_wait_for_status(host, &stat,
572                                 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
573                                 STATUS_TIME_OUT_READ,
574                                 50, "imxmci_cpu_driven_data read");
575
576                 while((stat & (STATUS_APPL_BUFF_FF |  STATUS_DATA_TRANS_DONE)) &&
577                       !(stat & STATUS_TIME_OUT_READ) &&
578                       (host->data_cnt < 512)) {
579
580                         udelay(20);     /* required for clocks < 8MHz*/
581
582                         for(i = burst_len; i>=2 ; i-=2) {
583                                 u16 data;
584                                 data = MMC_BUFFER_ACCESS;
585                                 udelay(10);     /* required for clocks < 8MHz*/
586                                 if(host->data_cnt+2 <= host->dma_size) {
587                                         *(host->data_ptr++) = data;
588                                 } else {
589                                         if(host->data_cnt < host->dma_size)
590                                                 *(u8*)(host->data_ptr) = data;
591                                 }
592                                 host->data_cnt += 2;
593                         }
594
595                         stat = MMC_STATUS;
596
597                         dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
598                                 host->data_cnt, burst_len, stat);
599                 }
600
601                 if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
602                         trans_done = 1;
603
604                 if(host->dma_size & 0x1ff)
605                         stat &= ~STATUS_CRC_READ_ERR;
606
607                 if(stat & STATUS_TIME_OUT_READ) {
608                         dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
609                                 stat);
610                         trans_done = -1;
611                 }
612
613         } else {
614                 imxmci_busy_wait_for_status(host, &stat,
615                                 STATUS_APPL_BUFF_FE,
616                                 20, "imxmci_cpu_driven_data write");
617
618                 while((stat & STATUS_APPL_BUFF_FE) &&
619                       (host->data_cnt < host->dma_size)) {
620                         if(burst_len >= host->dma_size - host->data_cnt) {
621                                 burst_len = host->dma_size - host->data_cnt;
622                                 host->data_cnt = host->dma_size;
623                                 trans_done = 1;
624                         } else {
625                                 host->data_cnt += burst_len;
626                         }
627
628                         for(i = burst_len; i>0 ; i-=2)
629                                 MMC_BUFFER_ACCESS = *(host->data_ptr++);
630
631                         stat = MMC_STATUS;
632
633                         dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
634                                 burst_len, stat);
635                 }
636         }
637
638         *pstat = stat;
639
640         return trans_done;
641 }
642
643 static void imxmci_dma_irq(int dma, void *devid)
644 {
645         struct imxmci_host *host = devid;
646         uint32_t stat = MMC_STATUS;
647
648         atomic_set(&host->stuck_timeout, 0);
649         host->status_reg = stat;
650         set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
651         tasklet_schedule(&host->tasklet);
652 }
653
654 static irqreturn_t imxmci_irq(int irq, void *devid)
655 {
656         struct imxmci_host *host = devid;
657         uint32_t stat = MMC_STATUS;
658         int handled = 1;
659
660         MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT;
661
662         atomic_set(&host->stuck_timeout, 0);
663         host->status_reg = stat;
664         set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
665         set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
666         tasklet_schedule(&host->tasklet);
667
668         return IRQ_RETVAL(handled);;
669 }
670
671 static void imxmci_tasklet_fnc(unsigned long data)
672 {
673         struct imxmci_host *host = (struct imxmci_host *)data;
674         u32 stat;
675         unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
676         int timeout = 0;
677
678         if(atomic_read(&host->stuck_timeout) > 4) {
679                 char *what;
680                 timeout = 1;
681                 stat = MMC_STATUS;
682                 host->status_reg = stat;
683                 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
684                         if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
685                                 what = "RESP+DMA";
686                         else
687                                 what = "RESP";
688                 else
689                         if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
690                                 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
691                                         what = "DATA";
692                                 else
693                                         what = "DMA";
694                         else
695                                 what = "???";
696
697                 dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
698                        what, stat, MMC_INT_MASK);
699                 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
700                        MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
701                 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
702                        host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
703         }
704
705         if(!host->present || timeout)
706                 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
707                                     STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
708
709         if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
710                 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
711
712                 stat = MMC_STATUS;
713                 /*
714                  * This is not required in theory, but there is chance to miss some flag
715                  * which clears automatically by mask write, FreeScale original code keeps
716                  * stat from IRQ time so do I
717                  */
718                 stat |= host->status_reg;
719
720                 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
721                         stat &= ~STATUS_CRC_READ_ERR;
722
723                 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
724                         imxmci_busy_wait_for_status(host, &stat,
725                                         STATUS_END_CMD_RESP | STATUS_ERR_MASK,
726                                         20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
727                 }
728
729                 if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
730                         if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
731                                 imxmci_cmd_done(host, stat);
732                         if(host->data && (stat & STATUS_ERR_MASK))
733                                 imxmci_data_done(host, stat);
734                 }
735
736                 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
737                         stat |= MMC_STATUS;
738                         if(imxmci_cpu_driven_data(host, &stat)){
739                                 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
740                                         imxmci_cmd_done(host, stat);
741                                 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
742                                                         &host->pending_events);
743                                 imxmci_data_done(host, stat);
744                         }
745                 }
746         }
747
748         if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
749            !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
750
751                 stat = MMC_STATUS;
752                 /* Same as above */
753                 stat |= host->status_reg;
754
755                 if(host->dma_dir == DMA_TO_DEVICE) {
756                         data_dir_mask = STATUS_WRITE_OP_DONE;
757                 } else {
758                         data_dir_mask = STATUS_DATA_TRANS_DONE;
759                 }
760
761                 if(stat & data_dir_mask) {
762                         clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
763                         imxmci_data_done(host, stat);
764                 }
765         }
766
767         if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
768
769                 if(host->cmd)
770                         imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
771
772                 if(host->data)
773                         imxmci_data_done(host, STATUS_TIME_OUT_READ |
774                                          STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
775
776                 if(host->req)
777                         imxmci_finish_request(host, host->req);
778
779                 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
780
781         }
782 }
783
784 static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
785 {
786         struct imxmci_host *host = mmc_priv(mmc);
787         unsigned int cmdat;
788
789         WARN_ON(host->req != NULL);
790
791         host->req = req;
792
793         cmdat = 0;
794
795         if (req->data) {
796                 imxmci_setup_data(host, req->data);
797
798                 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
799
800                 if (req->data->flags & MMC_DATA_WRITE)
801                         cmdat |= CMD_DAT_CONT_WRITE;
802
803                 if (req->data->flags & MMC_DATA_STREAM) {
804                         cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
805                 }
806         }
807
808         imxmci_start_cmd(host, req->cmd, cmdat);
809 }
810
811 #define CLK_RATE 19200000
812
813 static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
814 {
815         struct imxmci_host *host = mmc_priv(mmc);
816         int prescaler;
817
818         if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
819                 host->actual_bus_width = MMC_BUS_WIDTH_4;
820                 imx_gpio_mode(PB11_PF_SD_DAT3);
821         }else{
822                 host->actual_bus_width = MMC_BUS_WIDTH_1;
823                 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
824         }
825
826         if ( host->power_mode != ios->power_mode ) {
827                 switch (ios->power_mode) {
828                 case MMC_POWER_OFF:
829                         break;
830                 case MMC_POWER_UP:
831                         set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
832                         break;
833                 case MMC_POWER_ON:
834                         break;
835                 }
836                 host->power_mode = ios->power_mode;
837         }
838
839         if ( ios->clock ) {
840                 unsigned int clk;
841
842                 /* The prescaler is 5 for PERCLK2 equal to 96MHz
843                  * then 96MHz / 5 = 19.2 MHz
844                  */
845                 clk=imx_get_perclk2();
846                 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE;
847                 switch(prescaler) {
848                 case 0:
849                 case 1: prescaler = 0;
850                         break;
851                 case 2: prescaler = 1;
852                         break;
853                 case 3: prescaler = 2;
854                         break;
855                 case 4: prescaler = 4;
856                         break;
857                 default:
858                 case 5: prescaler = 5;
859                         break;
860                 }
861
862                 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
863                         clk, prescaler);
864
865                 for(clk=0; clk<8; clk++) {
866                         int x;
867                         x = CLK_RATE / (1<<clk);
868                         if( x <= ios->clock)
869                                 break;
870                 }
871
872                 MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */
873
874                 imxmci_stop_clock(host);
875                 MMC_CLK_RATE = (prescaler<<3) | clk;
876                 /*
877                  * Under my understanding, clock should not be started there, because it would
878                  * initiate SDHC sequencer and send last or random command into card
879                  */
880                 /*imxmci_start_clock(host);*/
881
882                 dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
883         } else {
884                 imxmci_stop_clock(host);
885         }
886 }
887
888 static const struct mmc_host_ops imxmci_ops = {
889         .request        = imxmci_request,
890         .set_ios        = imxmci_set_ios,
891 };
892
893 static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
894 {
895         int i;
896
897         for (i = 0; i < dev->num_resources; i++)
898                 if (dev->resource[i].flags == mask && nr-- == 0)
899                         return &dev->resource[i];
900         return NULL;
901 }
902
903 static int platform_device_irq(struct platform_device *dev, int nr)
904 {
905         int i;
906
907         for (i = 0; i < dev->num_resources; i++)
908                 if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
909                         return dev->resource[i].start;
910         return NO_IRQ;
911 }
912
913 static void imxmci_check_status(unsigned long data)
914 {
915         struct imxmci_host *host = (struct imxmci_host *)data;
916
917         if( host->pdata->card_present() != host->present ) {
918                 host->present ^= 1;
919                 dev_info(mmc_dev(host->mmc), "card %s\n",
920                       host->present ? "inserted" : "removed");
921
922                 set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
923                 tasklet_schedule(&host->tasklet);
924         }
925
926         if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
927            test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
928                 atomic_inc(&host->stuck_timeout);
929                 if(atomic_read(&host->stuck_timeout) > 4)
930                         tasklet_schedule(&host->tasklet);
931         } else {
932                 atomic_set(&host->stuck_timeout, 0);
933
934         }
935
936         mod_timer(&host->timer, jiffies + (HZ>>1));
937 }
938
939 static int imxmci_probe(struct platform_device *pdev)
940 {
941         struct mmc_host *mmc;
942         struct imxmci_host *host = NULL;
943         struct resource *r;
944         int ret = 0, irq;
945
946         printk(KERN_INFO "i.MX mmc driver\n");
947
948         r = platform_device_resource(pdev, IORESOURCE_MEM, 0);
949         irq = platform_device_irq(pdev, 0);
950         if (!r || irq == NO_IRQ)
951                 return -ENXIO;
952
953         r = request_mem_region(r->start, 0x100, "IMXMCI");
954         if (!r)
955                 return -EBUSY;
956
957         mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
958         if (!mmc) {
959                 ret = -ENOMEM;
960                 goto out;
961         }
962
963         mmc->ops = &imxmci_ops;
964         mmc->f_min = 150000;
965         mmc->f_max = CLK_RATE/2;
966         mmc->ocr_avail = MMC_VDD_32_33;
967         mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK;
968
969         /* MMC core transfer sizes tunable parameters */
970         mmc->max_hw_segs = 64;
971         mmc->max_phys_segs = 64;
972         mmc->max_seg_size = 64*512;     /* default PAGE_CACHE_SIZE */
973         mmc->max_req_size = 64*512;     /* default PAGE_CACHE_SIZE */
974         mmc->max_blk_size = 2048;
975         mmc->max_blk_count = 65535;
976
977         host = mmc_priv(mmc);
978         host->mmc = mmc;
979         host->dma_allocated = 0;
980         host->pdata = pdev->dev.platform_data;
981
982         spin_lock_init(&host->lock);
983         host->res = r;
984         host->irq = irq;
985
986         imx_gpio_mode(PB8_PF_SD_DAT0);
987         imx_gpio_mode(PB9_PF_SD_DAT1);
988         imx_gpio_mode(PB10_PF_SD_DAT2);
989         /* Configured as GPIO with pull-up to ensure right MCC card mode */
990         /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
991         imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
992         /* imx_gpio_mode(PB11_PF_SD_DAT3); */
993         imx_gpio_mode(PB12_PF_SD_CLK);
994         imx_gpio_mode(PB13_PF_SD_CMD);
995
996         imxmci_softreset();
997
998         if ( MMC_REV_NO != 0x390 ) {
999                 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1000                         MMC_REV_NO);
1001                 goto out;
1002         }
1003
1004         MMC_READ_TO = 0x2db4; /* recommended in data sheet */
1005
1006         host->imask = IMXMCI_INT_MASK_DEFAULT;
1007         MMC_INT_MASK = host->imask;
1008
1009
1010         if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){
1011                 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1012                 ret = -EBUSY;
1013                 goto out;
1014         }
1015         host->dma_allocated=1;
1016         imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1017
1018         tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1019         host->status_reg=0;
1020         host->pending_events=0;
1021
1022         ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
1023         if (ret)
1024                 goto out;
1025
1026         host->present = host->pdata->card_present();
1027         init_timer(&host->timer);
1028         host->timer.data = (unsigned long)host;
1029         host->timer.function = imxmci_check_status;
1030         add_timer(&host->timer);
1031         mod_timer(&host->timer, jiffies + (HZ>>1));
1032
1033         platform_set_drvdata(pdev, mmc);
1034
1035         mmc_add_host(mmc);
1036
1037         return 0;
1038
1039 out:
1040         if (host) {
1041                 if(host->dma_allocated){
1042                         imx_dma_free(host->dma);
1043                         host->dma_allocated=0;
1044                 }
1045         }
1046         if (mmc)
1047                 mmc_free_host(mmc);
1048         release_resource(r);
1049         return ret;
1050 }
1051
1052 static int imxmci_remove(struct platform_device *pdev)
1053 {
1054         struct mmc_host *mmc = platform_get_drvdata(pdev);
1055
1056         platform_set_drvdata(pdev, NULL);
1057
1058         if (mmc) {
1059                 struct imxmci_host *host = mmc_priv(mmc);
1060
1061                 tasklet_disable(&host->tasklet);
1062
1063                 del_timer_sync(&host->timer);
1064                 mmc_remove_host(mmc);
1065
1066                 free_irq(host->irq, host);
1067                 if(host->dma_allocated){
1068                         imx_dma_free(host->dma);
1069                         host->dma_allocated=0;
1070                 }
1071
1072                 tasklet_kill(&host->tasklet);
1073
1074                 release_resource(host->res);
1075
1076                 mmc_free_host(mmc);
1077         }
1078         return 0;
1079 }
1080
1081 #ifdef CONFIG_PM
1082 static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1083 {
1084         struct mmc_host *mmc = platform_get_drvdata(dev);
1085         int ret = 0;
1086
1087         if (mmc)
1088                 ret = mmc_suspend_host(mmc, state);
1089
1090         return ret;
1091 }
1092
1093 static int imxmci_resume(struct platform_device *dev)
1094 {
1095         struct mmc_host *mmc = platform_get_drvdata(dev);
1096         struct imxmci_host *host;
1097         int ret = 0;
1098
1099         if (mmc) {
1100                 host = mmc_priv(mmc);
1101                 if(host)
1102                         set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1103                 ret = mmc_resume_host(mmc);
1104         }
1105
1106         return ret;
1107 }
1108 #else
1109 #define imxmci_suspend  NULL
1110 #define imxmci_resume   NULL
1111 #endif /* CONFIG_PM */
1112
1113 static struct platform_driver imxmci_driver = {
1114         .probe          = imxmci_probe,
1115         .remove         = imxmci_remove,
1116         .suspend        = imxmci_suspend,
1117         .resume         = imxmci_resume,
1118         .driver         = {
1119                 .name           = DRIVER_NAME,
1120         }
1121 };
1122
1123 static int __init imxmci_init(void)
1124 {
1125         return platform_driver_register(&imxmci_driver);
1126 }
1127
1128 static void __exit imxmci_exit(void)
1129 {
1130         platform_driver_unregister(&imxmci_driver);
1131 }
1132
1133 module_init(imxmci_init);
1134 module_exit(imxmci_exit);
1135
1136 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1137 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1138 MODULE_LICENSE("GPL");