spi: spi_bfin cleanups, error handling
[powerpc.git] / drivers / spi / spi_bfin5xx.c
1 /*
2  * File:        drivers/spi/bfin5xx_spi.c
3  * Maintainer:
4  *              Bryan Wu <bryan.wu@analog.com>
5  * Original Author:
6  *              Luke Yang (Analog Devices Inc.)
7  *
8  * Created:     March. 10th 2006
9  * Description: SPI controller driver for Blackfin BF5xx
10  * Bugs:        Enter bugs at http://blackfin.uclinux.org/
11  *
12  * Modified:
13  *      March 10, 2006  bfin5xx_spi.c Created. (Luke Yang)
14  *      August 7, 2006  added full duplex mode (Axel Weiss & Luke Yang)
15  *      July  17, 2007  add support for BF54x SPI0 controller (Bryan Wu)
16  *
17  * Copyright 2004-2007 Analog Devices Inc.
18  *
19  * This program is free software ;  you can redistribute it and/or modify
20  * it under the terms of the GNU General Public License as published by
21  * the Free Software Foundation ;  either version 2, or (at your option)
22  * any later version.
23  *
24  * This program is distributed in the hope that it will be useful,
25  * but WITHOUT ANY WARRANTY ;  without even the implied warranty of
26  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  * GNU General Public License for more details.
28  *
29  * You should have received a copy of the GNU General Public License
30  * along with this program ;  see the file COPYING.
31  * If not, write to the Free Software Foundation,
32  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33  */
34
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/delay.h>
38 #include <linux/device.h>
39 #include <linux/io.h>
40 #include <linux/ioport.h>
41 #include <linux/irq.h>
42 #include <linux/errno.h>
43 #include <linux/interrupt.h>
44 #include <linux/platform_device.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/spi/spi.h>
47 #include <linux/workqueue.h>
48
49 #include <asm/dma.h>
50 #include <asm/portmux.h>
51 #include <asm/bfin5xx_spi.h>
52
53 MODULE_AUTHOR("Bryan Wu, Luke Yang");
54 MODULE_DESCRIPTION("Blackfin BF5xx SPI Contoller Driver");
55 MODULE_LICENSE("GPL");
56
57 #define DRV_NAME        "bfin-spi-master"
58 #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
59
60 #define DEFINE_SPI_REG(reg, off) \
61 static inline u16 read_##reg(void) \
62         { return bfin_read16(SPI0_REGBASE + off); } \
63 static inline void write_##reg(u16 v) \
64         {bfin_write16(SPI0_REGBASE + off, v); }
65
66 DEFINE_SPI_REG(CTRL, 0x00)
67 DEFINE_SPI_REG(FLAG, 0x04)
68 DEFINE_SPI_REG(STAT, 0x08)
69 DEFINE_SPI_REG(TDBR, 0x0C)
70 DEFINE_SPI_REG(RDBR, 0x10)
71 DEFINE_SPI_REG(BAUD, 0x14)
72 DEFINE_SPI_REG(SHAW, 0x18)
73 #define START_STATE ((void*)0)
74 #define RUNNING_STATE ((void*)1)
75 #define DONE_STATE ((void*)2)
76 #define ERROR_STATE ((void*)-1)
77 #define QUEUE_RUNNING 0
78 #define QUEUE_STOPPED 1
79 int dma_requested;
80
81 struct driver_data {
82         /* Driver model hookup */
83         struct platform_device *pdev;
84
85         /* SPI framework hookup */
86         struct spi_master *master;
87
88         /* BFIN hookup */
89         struct bfin5xx_spi_master *master_info;
90
91         /* Driver message queue */
92         struct workqueue_struct *workqueue;
93         struct work_struct pump_messages;
94         spinlock_t lock;
95         struct list_head queue;
96         int busy;
97         int run;
98
99         /* Message Transfer pump */
100         struct tasklet_struct pump_transfers;
101
102         /* Current message transfer state info */
103         struct spi_message *cur_msg;
104         struct spi_transfer *cur_transfer;
105         struct chip_data *cur_chip;
106         size_t len_in_bytes;
107         size_t len;
108         void *tx;
109         void *tx_end;
110         void *rx;
111         void *rx_end;
112         int dma_mapped;
113         dma_addr_t rx_dma;
114         dma_addr_t tx_dma;
115         size_t rx_map_len;
116         size_t tx_map_len;
117         u8 n_bytes;
118         void (*write) (struct driver_data *);
119         void (*read) (struct driver_data *);
120         void (*duplex) (struct driver_data *);
121 };
122
123 struct chip_data {
124         u16 ctl_reg;
125         u16 baud;
126         u16 flag;
127
128         u8 chip_select_num;
129         u8 chip_select_requested;
130         u8 n_bytes;
131         u8 width;               /* 0 or 1 */
132         u8 enable_dma;
133         u8 bits_per_word;       /* 8 or 16 */
134         u8 cs_change_per_word;
135         u8 cs_chg_udelay;
136         void (*write) (struct driver_data *);
137         void (*read) (struct driver_data *);
138         void (*duplex) (struct driver_data *);
139 };
140
141 static void bfin_spi_enable(struct driver_data *drv_data)
142 {
143         u16 cr;
144
145         cr = read_CTRL();
146         write_CTRL(cr | BIT_CTL_ENABLE);
147 }
148
149 static void bfin_spi_disable(struct driver_data *drv_data)
150 {
151         u16 cr;
152
153         cr = read_CTRL();
154         write_CTRL(cr & (~BIT_CTL_ENABLE));
155 }
156
157 /* Caculate the SPI_BAUD register value based on input HZ */
158 static u16 hz_to_spi_baud(u32 speed_hz)
159 {
160         u_long sclk = get_sclk();
161         u16 spi_baud = (sclk / (2 * speed_hz));
162
163         if ((sclk % (2 * speed_hz)) > 0)
164                 spi_baud++;
165
166         return spi_baud;
167 }
168
169 static int flush(struct driver_data *drv_data)
170 {
171         unsigned long limit = loops_per_jiffy << 1;
172
173         /* wait for stop and clear stat */
174         while (!(read_STAT() & BIT_STAT_SPIF) && limit--)
175                 continue;
176
177         write_STAT(BIT_STAT_CLR);
178
179         return limit;
180 }
181
182 #define MAX_SPI0_SSEL   7
183
184 /* stop controller and re-config current chip*/
185 static int restore_state(struct driver_data *drv_data)
186 {
187         struct chip_data *chip = drv_data->cur_chip;
188         int ret = 0;
189         u16 ssel[MAX_SPI0_SSEL] = {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
190                                         P_SPI0_SSEL4, P_SPI0_SSEL5,
191                                         P_SPI0_SSEL6, P_SPI0_SSEL7,};
192
193         /* Clear status and disable clock */
194         write_STAT(BIT_STAT_CLR);
195         bfin_spi_disable(drv_data);
196         dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
197
198         /* Load the registers */
199         write_CTRL(chip->ctl_reg);
200         write_BAUD(chip->baud);
201         write_FLAG(chip->flag);
202
203         if (!chip->chip_select_requested) {
204                 int i = chip->chip_select_num;
205
206                 dev_dbg(&drv_data->pdev->dev, "chip select number is %d\n", i);
207
208                 if ((i > 0) && (i <= MAX_SPI0_SSEL))
209                         ret = peripheral_request(ssel[i-1], DRV_NAME);
210
211                 chip->chip_select_requested = 1;
212         }
213
214         if (ret)
215                 dev_dbg(&drv_data->pdev->dev,
216                         ": request chip select number %d failed\n",
217                         chip->chip_select_num);
218
219         return ret;
220 }
221
222 /* used to kick off transfer in rx mode */
223 static unsigned short dummy_read(void)
224 {
225         unsigned short tmp;
226         tmp = read_RDBR();
227         return tmp;
228 }
229
230 static void null_writer(struct driver_data *drv_data)
231 {
232         u8 n_bytes = drv_data->n_bytes;
233
234         while (drv_data->tx < drv_data->tx_end) {
235                 write_TDBR(0);
236                 while ((read_STAT() & BIT_STAT_TXS))
237                         continue;
238                 drv_data->tx += n_bytes;
239         }
240 }
241
242 static void null_reader(struct driver_data *drv_data)
243 {
244         u8 n_bytes = drv_data->n_bytes;
245         dummy_read();
246
247         while (drv_data->rx < drv_data->rx_end) {
248                 while (!(read_STAT() & BIT_STAT_RXS))
249                         continue;
250                 dummy_read();
251                 drv_data->rx += n_bytes;
252         }
253 }
254
255 static void u8_writer(struct driver_data *drv_data)
256 {
257         dev_dbg(&drv_data->pdev->dev,
258                 "cr8-s is 0x%x\n", read_STAT());
259         while (drv_data->tx < drv_data->tx_end) {
260                 write_TDBR(*(u8 *) (drv_data->tx));
261                 while (read_STAT() & BIT_STAT_TXS)
262                         continue;
263                 ++drv_data->tx;
264         }
265
266         /* poll for SPI completion before returning */
267         while (!(read_STAT() & BIT_STAT_SPIF))
268                 continue;
269 }
270
271 static void u8_cs_chg_writer(struct driver_data *drv_data)
272 {
273         struct chip_data *chip = drv_data->cur_chip;
274
275         while (drv_data->tx < drv_data->tx_end) {
276                 write_FLAG(chip->flag);
277
278                 write_TDBR(*(u8 *) (drv_data->tx));
279                 while (read_STAT() & BIT_STAT_TXS)
280                         continue;
281                 while (!(read_STAT() & BIT_STAT_SPIF))
282                         continue;
283                 write_FLAG(0xFF00 | chip->flag);
284
285                 if (chip->cs_chg_udelay)
286                         udelay(chip->cs_chg_udelay);
287                 ++drv_data->tx;
288         }
289         write_FLAG(0xFF00);
290
291 }
292
293 static void u8_reader(struct driver_data *drv_data)
294 {
295         dev_dbg(&drv_data->pdev->dev,
296                 "cr-8 is 0x%x\n", read_STAT());
297
298         /* clear TDBR buffer before read(else it will be shifted out) */
299         write_TDBR(0xFFFF);
300
301         dummy_read();
302
303         while (drv_data->rx < drv_data->rx_end - 1) {
304                 while (!(read_STAT() & BIT_STAT_RXS))
305                         continue;
306                 *(u8 *) (drv_data->rx) = read_RDBR();
307                 ++drv_data->rx;
308         }
309
310         while (!(read_STAT() & BIT_STAT_RXS))
311                 continue;
312         *(u8 *) (drv_data->rx) = read_SHAW();
313         ++drv_data->rx;
314 }
315
316 static void u8_cs_chg_reader(struct driver_data *drv_data)
317 {
318         struct chip_data *chip = drv_data->cur_chip;
319
320         while (drv_data->rx < drv_data->rx_end) {
321                 write_FLAG(chip->flag);
322
323                 read_RDBR();    /* kick off */
324                 while (!(read_STAT() & BIT_STAT_RXS))
325                         continue;
326                 while (!(read_STAT() & BIT_STAT_SPIF))
327                         continue;
328                 *(u8 *) (drv_data->rx) = read_SHAW();
329                 write_FLAG(0xFF00 | chip->flag);
330
331                 if (chip->cs_chg_udelay)
332                         udelay(chip->cs_chg_udelay);
333                 ++drv_data->rx;
334         }
335         write_FLAG(0xFF00);
336
337 }
338
339 static void u8_duplex(struct driver_data *drv_data)
340 {
341         /* in duplex mode, clk is triggered by writing of TDBR */
342         while (drv_data->rx < drv_data->rx_end) {
343                 write_TDBR(*(u8 *) (drv_data->tx));
344                 while (!(read_STAT() & BIT_STAT_SPIF))
345                         continue;
346                 while (!(read_STAT() & BIT_STAT_RXS))
347                         continue;
348                 *(u8 *) (drv_data->rx) = read_RDBR();
349                 ++drv_data->rx;
350                 ++drv_data->tx;
351         }
352 }
353
354 static void u8_cs_chg_duplex(struct driver_data *drv_data)
355 {
356         struct chip_data *chip = drv_data->cur_chip;
357
358         while (drv_data->rx < drv_data->rx_end) {
359                 write_FLAG(chip->flag);
360
361
362                 write_TDBR(*(u8 *) (drv_data->tx));
363                 while (!(read_STAT() & BIT_STAT_SPIF))
364                         continue;
365                 while (!(read_STAT() & BIT_STAT_RXS))
366                         continue;
367                 *(u8 *) (drv_data->rx) = read_RDBR();
368                 write_FLAG(0xFF00 | chip->flag);
369
370                 if (chip->cs_chg_udelay)
371                         udelay(chip->cs_chg_udelay);
372                 ++drv_data->rx;
373                 ++drv_data->tx;
374         }
375         write_FLAG(0xFF00);
376
377 }
378
379 static void u16_writer(struct driver_data *drv_data)
380 {
381         dev_dbg(&drv_data->pdev->dev,
382                 "cr16 is 0x%x\n", read_STAT());
383
384         while (drv_data->tx < drv_data->tx_end) {
385                 write_TDBR(*(u16 *) (drv_data->tx));
386                 while ((read_STAT() & BIT_STAT_TXS))
387                         continue;
388                 drv_data->tx += 2;
389         }
390
391         /* poll for SPI completion before returning */
392         while (!(read_STAT() & BIT_STAT_SPIF))
393                 continue;
394 }
395
396 static void u16_cs_chg_writer(struct driver_data *drv_data)
397 {
398         struct chip_data *chip = drv_data->cur_chip;
399
400         while (drv_data->tx < drv_data->tx_end) {
401                 write_FLAG(chip->flag);
402
403                 write_TDBR(*(u16 *) (drv_data->tx));
404                 while ((read_STAT() & BIT_STAT_TXS))
405                         continue;
406                 while (!(read_STAT() & BIT_STAT_SPIF))
407                         continue;
408                 write_FLAG(0xFF00 | chip->flag);
409
410                 if (chip->cs_chg_udelay)
411                         udelay(chip->cs_chg_udelay);
412                 drv_data->tx += 2;
413         }
414         write_FLAG(0xFF00);
415 }
416
417 static void u16_reader(struct driver_data *drv_data)
418 {
419         dev_dbg(&drv_data->pdev->dev,
420                 "cr-16 is 0x%x\n", read_STAT());
421         dummy_read();
422
423         while (drv_data->rx < (drv_data->rx_end - 2)) {
424                 while (!(read_STAT() & BIT_STAT_RXS))
425                         continue;
426                 *(u16 *) (drv_data->rx) = read_RDBR();
427                 drv_data->rx += 2;
428         }
429
430         while (!(read_STAT() & BIT_STAT_RXS))
431                 continue;
432         *(u16 *) (drv_data->rx) = read_SHAW();
433         drv_data->rx += 2;
434 }
435
436 static void u16_cs_chg_reader(struct driver_data *drv_data)
437 {
438         struct chip_data *chip = drv_data->cur_chip;
439
440         while (drv_data->rx < drv_data->rx_end) {
441                 write_FLAG(chip->flag);
442
443                 read_RDBR();    /* kick off */
444                 while (!(read_STAT() & BIT_STAT_RXS))
445                         continue;
446                 while (!(read_STAT() & BIT_STAT_SPIF))
447                         continue;
448                 *(u16 *) (drv_data->rx) = read_SHAW();
449                 write_FLAG(0xFF00 | chip->flag);
450
451                 if (chip->cs_chg_udelay)
452                         udelay(chip->cs_chg_udelay);
453                 drv_data->rx += 2;
454         }
455         write_FLAG(0xFF00);
456 }
457
458 static void u16_duplex(struct driver_data *drv_data)
459 {
460         /* in duplex mode, clk is triggered by writing of TDBR */
461         while (drv_data->tx < drv_data->tx_end) {
462                 write_TDBR(*(u16 *) (drv_data->tx));
463                 while (!(read_STAT() & BIT_STAT_SPIF))
464                         continue;
465                 while (!(read_STAT() & BIT_STAT_RXS))
466                         continue;
467                 *(u16 *) (drv_data->rx) = read_RDBR();
468                 drv_data->rx += 2;
469                 drv_data->tx += 2;
470         }
471 }
472
473 static void u16_cs_chg_duplex(struct driver_data *drv_data)
474 {
475         struct chip_data *chip = drv_data->cur_chip;
476
477         while (drv_data->tx < drv_data->tx_end) {
478                 write_FLAG(chip->flag);
479
480                 write_TDBR(*(u16 *) (drv_data->tx));
481                 while (!(read_STAT() & BIT_STAT_SPIF))
482                         continue;
483                 while (!(read_STAT() & BIT_STAT_RXS))
484                         continue;
485                 *(u16 *) (drv_data->rx) = read_RDBR();
486                 write_FLAG(0xFF00 | chip->flag);
487
488                 if (chip->cs_chg_udelay)
489                         udelay(chip->cs_chg_udelay);
490                 drv_data->rx += 2;
491                 drv_data->tx += 2;
492         }
493         write_FLAG(0xFF00);
494 }
495
496 /* test if ther is more transfer to be done */
497 static void *next_transfer(struct driver_data *drv_data)
498 {
499         struct spi_message *msg = drv_data->cur_msg;
500         struct spi_transfer *trans = drv_data->cur_transfer;
501
502         /* Move to next transfer */
503         if (trans->transfer_list.next != &msg->transfers) {
504                 drv_data->cur_transfer =
505                     list_entry(trans->transfer_list.next,
506                                struct spi_transfer, transfer_list);
507                 return RUNNING_STATE;
508         } else
509                 return DONE_STATE;
510 }
511
512 /*
513  * caller already set message->status;
514  * dma and pio irqs are blocked give finished message back
515  */
516 static void giveback(struct driver_data *drv_data)
517 {
518         struct spi_transfer *last_transfer;
519         unsigned long flags;
520         struct spi_message *msg;
521
522         spin_lock_irqsave(&drv_data->lock, flags);
523         msg = drv_data->cur_msg;
524         drv_data->cur_msg = NULL;
525         drv_data->cur_transfer = NULL;
526         drv_data->cur_chip = NULL;
527         queue_work(drv_data->workqueue, &drv_data->pump_messages);
528         spin_unlock_irqrestore(&drv_data->lock, flags);
529
530         last_transfer = list_entry(msg->transfers.prev,
531                                    struct spi_transfer, transfer_list);
532
533         msg->state = NULL;
534
535         /* disable chip select signal. And not stop spi in autobuffer mode */
536         if (drv_data->tx_dma != 0xFFFF) {
537                 write_FLAG(0xFF00);
538                 bfin_spi_disable(drv_data);
539         }
540
541         if (msg->complete)
542                 msg->complete(msg->context);
543 }
544
545 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
546 {
547         struct driver_data *drv_data = (struct driver_data *)dev_id;
548         struct spi_message *msg = drv_data->cur_msg;
549
550         dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n");
551         clear_dma_irqstat(CH_SPI);
552
553         /* Wait for DMA to complete */
554         while (get_dma_curr_irqstat(CH_SPI) & DMA_RUN)
555                 continue;
556
557         /*
558          * wait for the last transaction shifted out.  HRM states:
559          * at this point there may still be data in the SPI DMA FIFO waiting
560          * to be transmitted ... software needs to poll TXS in the SPI_STAT
561          * register until it goes low for 2 successive reads
562          */
563         if (drv_data->tx != NULL) {
564                 while ((bfin_read_SPI_STAT() & TXS) ||
565                        (bfin_read_SPI_STAT() & TXS))
566                         continue;
567         }
568
569         while (!(bfin_read_SPI_STAT() & SPIF))
570                 continue;
571
572         bfin_spi_disable(drv_data);
573
574         msg->actual_length += drv_data->len_in_bytes;
575
576         /* Move to next transfer */
577         msg->state = next_transfer(drv_data);
578
579         /* Schedule transfer tasklet */
580         tasklet_schedule(&drv_data->pump_transfers);
581
582         /* free the irq handler before next transfer */
583         dev_dbg(&drv_data->pdev->dev,
584                 "disable dma channel irq%d\n",
585                 CH_SPI);
586         dma_disable_irq(CH_SPI);
587
588         return IRQ_HANDLED;
589 }
590
591 static void pump_transfers(unsigned long data)
592 {
593         struct driver_data *drv_data = (struct driver_data *)data;
594         struct spi_message *message = NULL;
595         struct spi_transfer *transfer = NULL;
596         struct spi_transfer *previous = NULL;
597         struct chip_data *chip = NULL;
598         u8 width;
599         u16 cr, dma_width, dma_config;
600         u32 tranf_success = 1;
601
602         /* Get current state information */
603         message = drv_data->cur_msg;
604         transfer = drv_data->cur_transfer;
605         chip = drv_data->cur_chip;
606
607         /*
608          * if msg is error or done, report it back using complete() callback
609          */
610
611          /* Handle for abort */
612         if (message->state == ERROR_STATE) {
613                 message->status = -EIO;
614                 giveback(drv_data);
615                 return;
616         }
617
618         /* Handle end of message */
619         if (message->state == DONE_STATE) {
620                 message->status = 0;
621                 giveback(drv_data);
622                 return;
623         }
624
625         /* Delay if requested at end of transfer */
626         if (message->state == RUNNING_STATE) {
627                 previous = list_entry(transfer->transfer_list.prev,
628                                       struct spi_transfer, transfer_list);
629                 if (previous->delay_usecs)
630                         udelay(previous->delay_usecs);
631         }
632
633         /* Setup the transfer state based on the type of transfer */
634         if (flush(drv_data) == 0) {
635                 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
636                 message->status = -EIO;
637                 giveback(drv_data);
638                 return;
639         }
640
641         if (transfer->tx_buf != NULL) {
642                 drv_data->tx = (void *)transfer->tx_buf;
643                 drv_data->tx_end = drv_data->tx + transfer->len;
644                 dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n",
645                         transfer->tx_buf, drv_data->tx_end);
646         } else {
647                 drv_data->tx = NULL;
648         }
649
650         if (transfer->rx_buf != NULL) {
651                 drv_data->rx = transfer->rx_buf;
652                 drv_data->rx_end = drv_data->rx + transfer->len;
653                 dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
654                         transfer->rx_buf, drv_data->rx_end);
655         } else {
656                 drv_data->rx = NULL;
657         }
658
659         drv_data->rx_dma = transfer->rx_dma;
660         drv_data->tx_dma = transfer->tx_dma;
661         drv_data->len_in_bytes = transfer->len;
662
663         width = chip->width;
664         if (width == CFG_SPI_WORDSIZE16) {
665                 drv_data->len = (transfer->len) >> 1;
666         } else {
667                 drv_data->len = transfer->len;
668         }
669         drv_data->write = drv_data->tx ? chip->write : null_writer;
670         drv_data->read = drv_data->rx ? chip->read : null_reader;
671         drv_data->duplex = chip->duplex ? chip->duplex : null_writer;
672         dev_dbg(&drv_data->pdev->dev, "transfer: ",
673                 "drv_data->write is %p, chip->write is %p, null_wr is %p\n",
674                 drv_data->write, chip->write, null_writer);
675
676         /* speed and width has been set on per message */
677         message->state = RUNNING_STATE;
678         dma_config = 0;
679
680         /* restore spi status for each spi transfer */
681         if (transfer->speed_hz) {
682                 write_BAUD(hz_to_spi_baud(transfer->speed_hz));
683         } else {
684                 write_BAUD(chip->baud);
685         }
686         write_FLAG(chip->flag);
687
688         dev_dbg(&drv_data->pdev->dev,
689                 "now pumping a transfer: width is %d, len is %d\n",
690                 width, transfer->len);
691
692         /*
693          * Try to map dma buffer and do a dma transfer if
694          * successful use different way to r/w according to
695          * drv_data->cur_chip->enable_dma
696          */
697         if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
698
699                 write_STAT(BIT_STAT_CLR);
700                 disable_dma(CH_SPI);
701                 clear_dma_irqstat(CH_SPI);
702                 bfin_spi_disable(drv_data);
703
704                 /* config dma channel */
705                 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
706                 if (width == CFG_SPI_WORDSIZE16) {
707                         set_dma_x_count(CH_SPI, drv_data->len);
708                         set_dma_x_modify(CH_SPI, 2);
709                         dma_width = WDSIZE_16;
710                 } else {
711                         set_dma_x_count(CH_SPI, drv_data->len);
712                         set_dma_x_modify(CH_SPI, 1);
713                         dma_width = WDSIZE_8;
714                 }
715
716                 /* set transfer width,direction. And enable spi */
717                 cr = (read_CTRL() & (~BIT_CTL_TIMOD));
718
719                 /* dirty hack for autobuffer DMA mode */
720                 if (drv_data->tx_dma == 0xFFFF) {
721                         dev_dbg(&drv_data->pdev->dev,
722                                 "doing autobuffer DMA out.\n");
723
724                         /* no irq in autobuffer mode */
725                         dma_config =
726                             (DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
727                         set_dma_config(CH_SPI, dma_config);
728                         set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx);
729                         enable_dma(CH_SPI);
730                         write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) |
731                                    (CFG_SPI_ENABLE << 14));
732
733                         /* just return here, there can only be one transfer in this mode */
734                         message->status = 0;
735                         giveback(drv_data);
736                         return;
737                 }
738
739                 /* In dma mode, rx or tx must be NULL in one transfer */
740                 if (drv_data->rx != NULL) {
741                         /* set transfer mode, and enable SPI */
742                         dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n");
743
744                         /* disable SPI before write to TDBR */
745                         write_CTRL(cr & ~BIT_CTL_ENABLE);
746
747                         /* clear tx reg soformer data is not shifted out */
748                         write_TDBR(0xFF);
749
750                         set_dma_x_count(CH_SPI, drv_data->len);
751
752                         /* start dma */
753                         dma_enable_irq(CH_SPI);
754                         dma_config = (WNR | RESTART | dma_width | DI_EN);
755                         set_dma_config(CH_SPI, dma_config);
756                         set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx);
757                         enable_dma(CH_SPI);
758
759                         cr |=
760                             CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE <<
761                                                               14);
762                         /* set transfer mode, and enable SPI */
763                         write_CTRL(cr);
764                 } else if (drv_data->tx != NULL) {
765                         dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
766
767                         /* start dma */
768                         dma_enable_irq(CH_SPI);
769                         dma_config = (RESTART | dma_width | DI_EN);
770                         set_dma_config(CH_SPI, dma_config);
771                         set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx);
772                         enable_dma(CH_SPI);
773
774                         write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) |
775                                    (CFG_SPI_ENABLE << 14));
776
777                 }
778         } else {
779                 /* IO mode write then read */
780                 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
781
782                 write_STAT(BIT_STAT_CLR);
783
784                 if (drv_data->tx != NULL && drv_data->rx != NULL) {
785                         /* full duplex mode */
786                         BUG_ON((drv_data->tx_end - drv_data->tx) !=
787                                (drv_data->rx_end - drv_data->rx));
788                         cr = (read_CTRL() & (~BIT_CTL_TIMOD));
789                         cr |= CFG_SPI_WRITE | (width << 8) |
790                                 (CFG_SPI_ENABLE << 14);
791                         dev_dbg(&drv_data->pdev->dev,
792                                 "IO duplex: cr is 0x%x\n", cr);
793
794                         write_CTRL(cr);
795
796                         drv_data->duplex(drv_data);
797
798                         if (drv_data->tx != drv_data->tx_end)
799                                 tranf_success = 0;
800                 } else if (drv_data->tx != NULL) {
801                         /* write only half duplex */
802                         cr = (read_CTRL() & (~BIT_CTL_TIMOD));
803                         cr |= CFG_SPI_WRITE | (width << 8) |
804                                 (CFG_SPI_ENABLE << 14);
805                         dev_dbg(&drv_data->pdev->dev,
806                                 "IO write: cr is 0x%x\n", cr);
807
808                         write_CTRL(cr);
809
810                         drv_data->write(drv_data);
811
812                         if (drv_data->tx != drv_data->tx_end)
813                                 tranf_success = 0;
814                 } else if (drv_data->rx != NULL) {
815                         /* read only half duplex */
816                         cr = (read_CTRL() & (~BIT_CTL_TIMOD));
817                         cr |= CFG_SPI_READ | (width << 8) |
818                                 (CFG_SPI_ENABLE << 14);
819                         dev_dbg(&drv_data->pdev->dev,
820                                 "IO read: cr is 0x%x\n", cr);
821
822                         write_CTRL(cr);
823
824                         drv_data->read(drv_data);
825                         if (drv_data->rx != drv_data->rx_end)
826                                 tranf_success = 0;
827                 }
828
829                 if (!tranf_success) {
830                         dev_dbg(&drv_data->pdev->dev,
831                                 "IO write error!\n");
832                         message->state = ERROR_STATE;
833                 } else {
834                         /* Update total byte transfered */
835                         message->actual_length += drv_data->len;
836
837                         /* Move to next transfer of this msg */
838                         message->state = next_transfer(drv_data);
839                 }
840
841                 /* Schedule next transfer tasklet */
842                 tasklet_schedule(&drv_data->pump_transfers);
843
844         }
845 }
846
847 /* pop a msg from queue and kick off real transfer */
848 static void pump_messages(struct work_struct *work)
849 {
850         struct driver_data *drv_data;
851         unsigned long flags;
852
853         drv_data = container_of(work, struct driver_data, pump_messages);
854
855         /* Lock queue and check for queue work */
856         spin_lock_irqsave(&drv_data->lock, flags);
857         if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
858                 /* pumper kicked off but no work to do */
859                 drv_data->busy = 0;
860                 spin_unlock_irqrestore(&drv_data->lock, flags);
861                 return;
862         }
863
864         /* Make sure we are not already running a message */
865         if (drv_data->cur_msg) {
866                 spin_unlock_irqrestore(&drv_data->lock, flags);
867                 return;
868         }
869
870         /* Extract head of queue */
871         drv_data->cur_msg = list_entry(drv_data->queue.next,
872                                        struct spi_message, queue);
873
874         /* Setup the SSP using the per chip configuration */
875         drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
876         if (restore_state(drv_data)) {
877                 spin_unlock_irqrestore(&drv_data->lock, flags);
878                 return;
879         };
880
881         list_del_init(&drv_data->cur_msg->queue);
882
883         /* Initial message state */
884         drv_data->cur_msg->state = START_STATE;
885         drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
886                                             struct spi_transfer, transfer_list);
887
888         dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
889                 "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
890                 drv_data->cur_chip->baud, drv_data->cur_chip->flag,
891                 drv_data->cur_chip->ctl_reg);
892
893         dev_dbg(&drv_data->pdev->dev,
894                 "the first transfer len is %d\n",
895                 drv_data->cur_transfer->len);
896
897         /* Mark as busy and launch transfers */
898         tasklet_schedule(&drv_data->pump_transfers);
899
900         drv_data->busy = 1;
901         spin_unlock_irqrestore(&drv_data->lock, flags);
902 }
903
904 /*
905  * got a msg to transfer, queue it in drv_data->queue.
906  * And kick off message pumper
907  */
908 static int transfer(struct spi_device *spi, struct spi_message *msg)
909 {
910         struct driver_data *drv_data = spi_master_get_devdata(spi->master);
911         unsigned long flags;
912
913         spin_lock_irqsave(&drv_data->lock, flags);
914
915         if (drv_data->run == QUEUE_STOPPED) {
916                 spin_unlock_irqrestore(&drv_data->lock, flags);
917                 return -ESHUTDOWN;
918         }
919
920         msg->actual_length = 0;
921         msg->status = -EINPROGRESS;
922         msg->state = START_STATE;
923
924         dev_dbg(&spi->dev, "adding an msg in transfer() \n");
925         list_add_tail(&msg->queue, &drv_data->queue);
926
927         if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
928                 queue_work(drv_data->workqueue, &drv_data->pump_messages);
929
930         spin_unlock_irqrestore(&drv_data->lock, flags);
931
932         return 0;
933 }
934
935 /* first setup for new devices */
936 static int setup(struct spi_device *spi)
937 {
938         struct bfin5xx_spi_chip *chip_info = NULL;
939         struct chip_data *chip;
940         struct driver_data *drv_data = spi_master_get_devdata(spi->master);
941         u8 spi_flg;
942
943         /* Abort device setup if requested features are not supported */
944         if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
945                 dev_err(&spi->dev, "requested mode not fully supported\n");
946                 return -EINVAL;
947         }
948
949         /* Zero (the default) here means 8 bits */
950         if (!spi->bits_per_word)
951                 spi->bits_per_word = 8;
952
953         if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
954                 return -EINVAL;
955
956         /* Only alloc (or use chip_info) on first setup */
957         chip = spi_get_ctldata(spi);
958         if (chip == NULL) {
959                 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
960                 if (!chip)
961                         return -ENOMEM;
962
963                 chip->enable_dma = 0;
964                 chip_info = spi->controller_data;
965         }
966
967         /* chip_info isn't always needed */
968         if (chip_info) {
969                 chip->enable_dma = chip_info->enable_dma != 0
970                     && drv_data->master_info->enable_dma;
971                 chip->ctl_reg = chip_info->ctl_reg;
972                 chip->bits_per_word = chip_info->bits_per_word;
973                 chip->cs_change_per_word = chip_info->cs_change_per_word;
974                 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
975         }
976
977         /* translate common spi framework into our register */
978         if (spi->mode & SPI_CPOL)
979                 chip->ctl_reg |= CPOL;
980         if (spi->mode & SPI_CPHA)
981                 chip->ctl_reg |= CPHA;
982         if (spi->mode & SPI_LSB_FIRST)
983                 chip->ctl_reg |= LSBF;
984         /* we dont support running in slave mode (yet?) */
985         chip->ctl_reg |= MSTR;
986
987         /*
988          * if any one SPI chip is registered and wants DMA, request the
989          * DMA channel for it
990          */
991         if (chip->enable_dma && !dma_requested) {
992                 /* register dma irq handler */
993                 if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) {
994                         dev_dbg(&spi->dev,
995                                 "Unable to request BlackFin SPI DMA channel\n");
996                         return -ENODEV;
997                 }
998                 if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data)
999                     < 0) {
1000                         dev_dbg(&spi->dev, "Unable to set dma callback\n");
1001                         return -EPERM;
1002                 }
1003                 dma_disable_irq(CH_SPI);
1004                 dma_requested = 1;
1005         }
1006
1007         /*
1008          * Notice: for blackfin, the speed_hz is the value of register
1009          * SPI_BAUD, not the real baudrate
1010          */
1011         chip->baud = hz_to_spi_baud(spi->max_speed_hz);
1012         spi_flg = ~(1 << (spi->chip_select));
1013         chip->flag = ((u16) spi_flg << 8) | (1 << (spi->chip_select));
1014         chip->chip_select_num = spi->chip_select;
1015
1016         switch (chip->bits_per_word) {
1017         case 8:
1018                 chip->n_bytes = 1;
1019                 chip->width = CFG_SPI_WORDSIZE8;
1020                 chip->read = chip->cs_change_per_word ?
1021                         u8_cs_chg_reader : u8_reader;
1022                 chip->write = chip->cs_change_per_word ?
1023                         u8_cs_chg_writer : u8_writer;
1024                 chip->duplex = chip->cs_change_per_word ?
1025                         u8_cs_chg_duplex : u8_duplex;
1026                 break;
1027
1028         case 16:
1029                 chip->n_bytes = 2;
1030                 chip->width = CFG_SPI_WORDSIZE16;
1031                 chip->read = chip->cs_change_per_word ?
1032                         u16_cs_chg_reader : u16_reader;
1033                 chip->write = chip->cs_change_per_word ?
1034                         u16_cs_chg_writer : u16_writer;
1035                 chip->duplex = chip->cs_change_per_word ?
1036                         u16_cs_chg_duplex : u16_duplex;
1037                 break;
1038
1039         default:
1040                 dev_err(&spi->dev, "%d bits_per_word is not supported\n",
1041                                 chip->bits_per_word);
1042                 kfree(chip);
1043                 return -ENODEV;
1044         }
1045
1046         dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
1047                         spi->modalias, chip->width, chip->enable_dma);
1048         dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
1049                         chip->ctl_reg, chip->flag);
1050
1051         spi_set_ctldata(spi, chip);
1052
1053         return 0;
1054 }
1055
1056 /*
1057  * callback for spi framework.
1058  * clean driver specific data
1059  */
1060 static void cleanup(struct spi_device *spi)
1061 {
1062         struct chip_data *chip = spi_get_ctldata(spi);
1063
1064         kfree(chip);
1065 }
1066
1067 static inline int init_queue(struct driver_data *drv_data)
1068 {
1069         INIT_LIST_HEAD(&drv_data->queue);
1070         spin_lock_init(&drv_data->lock);
1071
1072         drv_data->run = QUEUE_STOPPED;
1073         drv_data->busy = 0;
1074
1075         /* init transfer tasklet */
1076         tasklet_init(&drv_data->pump_transfers,
1077                      pump_transfers, (unsigned long)drv_data);
1078
1079         /* init messages workqueue */
1080         INIT_WORK(&drv_data->pump_messages, pump_messages);
1081         drv_data->workqueue =
1082             create_singlethread_workqueue(drv_data->master->dev.parent->bus_id);
1083         if (drv_data->workqueue == NULL)
1084                 return -EBUSY;
1085
1086         return 0;
1087 }
1088
1089 static inline int start_queue(struct driver_data *drv_data)
1090 {
1091         unsigned long flags;
1092
1093         spin_lock_irqsave(&drv_data->lock, flags);
1094
1095         if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1096                 spin_unlock_irqrestore(&drv_data->lock, flags);
1097                 return -EBUSY;
1098         }
1099
1100         drv_data->run = QUEUE_RUNNING;
1101         drv_data->cur_msg = NULL;
1102         drv_data->cur_transfer = NULL;
1103         drv_data->cur_chip = NULL;
1104         spin_unlock_irqrestore(&drv_data->lock, flags);
1105
1106         queue_work(drv_data->workqueue, &drv_data->pump_messages);
1107
1108         return 0;
1109 }
1110
1111 static inline int stop_queue(struct driver_data *drv_data)
1112 {
1113         unsigned long flags;
1114         unsigned limit = 500;
1115         int status = 0;
1116
1117         spin_lock_irqsave(&drv_data->lock, flags);
1118
1119         /*
1120          * This is a bit lame, but is optimized for the common execution path.
1121          * A wait_queue on the drv_data->busy could be used, but then the common
1122          * execution path (pump_messages) would be required to call wake_up or
1123          * friends on every SPI message. Do this instead
1124          */
1125         drv_data->run = QUEUE_STOPPED;
1126         while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1127                 spin_unlock_irqrestore(&drv_data->lock, flags);
1128                 msleep(10);
1129                 spin_lock_irqsave(&drv_data->lock, flags);
1130         }
1131
1132         if (!list_empty(&drv_data->queue) || drv_data->busy)
1133                 status = -EBUSY;
1134
1135         spin_unlock_irqrestore(&drv_data->lock, flags);
1136
1137         return status;
1138 }
1139
1140 static inline int destroy_queue(struct driver_data *drv_data)
1141 {
1142         int status;
1143
1144         status = stop_queue(drv_data);
1145         if (status != 0)
1146                 return status;
1147
1148         destroy_workqueue(drv_data->workqueue);
1149
1150         return 0;
1151 }
1152
1153 static int setup_pin_mux(int action)
1154 {
1155
1156         u16 pin_req[] = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0};
1157
1158         if (action) {
1159                 if (peripheral_request_list(pin_req, DRV_NAME))
1160                         return -EFAULT;
1161         } else {
1162                 peripheral_free_list(pin_req);
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int __init bfin5xx_spi_probe(struct platform_device *pdev)
1169 {
1170         struct device *dev = &pdev->dev;
1171         struct bfin5xx_spi_master *platform_info;
1172         struct spi_master *master;
1173         struct driver_data *drv_data = 0;
1174         int status = 0;
1175
1176         platform_info = dev->platform_data;
1177
1178         /* Allocate master with space for drv_data */
1179         master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1180         if (!master) {
1181                 dev_err(&pdev->dev, "can not alloc spi_master\n");
1182                 return -ENOMEM;
1183         }
1184
1185         if (setup_pin_mux(1)) {
1186                 dev_err(&pdev->dev, ": Requesting Peripherals failed\n");
1187                 goto out_error;
1188         }
1189
1190         drv_data = spi_master_get_devdata(master);
1191         drv_data->master = master;
1192         drv_data->master_info = platform_info;
1193         drv_data->pdev = pdev;
1194
1195         master->bus_num = pdev->id;
1196         master->num_chipselect = platform_info->num_chipselect;
1197         master->cleanup = cleanup;
1198         master->setup = setup;
1199         master->transfer = transfer;
1200
1201         /* Initial and start queue */
1202         status = init_queue(drv_data);
1203         if (status != 0) {
1204                 dev_err(&pdev->dev, "problem initializing queue\n");
1205                 goto out_error_queue_alloc;
1206         }
1207         status = start_queue(drv_data);
1208         if (status != 0) {
1209                 dev_err(&pdev->dev, "problem starting queue\n");
1210                 goto out_error_queue_alloc;
1211         }
1212
1213         /* Register with the SPI framework */
1214         platform_set_drvdata(pdev, drv_data);
1215         status = spi_register_master(master);
1216         if (status != 0) {
1217                 dev_err(&pdev->dev, "problem registering spi master\n");
1218                 goto out_error_queue_alloc;
1219         }
1220         dev_dbg(&pdev->dev, "controller probe successfully\n");
1221         return status;
1222
1223 out_error_queue_alloc:
1224         destroy_queue(drv_data);
1225 out_error:
1226         spi_master_put(master);
1227
1228         return status;
1229 }
1230
1231 /* stop hardware and remove the driver */
1232 static int __devexit bfin5xx_spi_remove(struct platform_device *pdev)
1233 {
1234         struct driver_data *drv_data = platform_get_drvdata(pdev);
1235         int status = 0;
1236
1237         if (!drv_data)
1238                 return 0;
1239
1240         /* Remove the queue */
1241         status = destroy_queue(drv_data);
1242         if (status != 0)
1243                 return status;
1244
1245         /* Disable the SSP at the peripheral and SOC level */
1246         bfin_spi_disable(drv_data);
1247
1248         /* Release DMA */
1249         if (drv_data->master_info->enable_dma) {
1250                 if (dma_channel_active(CH_SPI))
1251                         free_dma(CH_SPI);
1252         }
1253
1254         /* Disconnect from the SPI framework */
1255         spi_unregister_master(drv_data->master);
1256
1257         setup_pin_mux(0);
1258
1259         /* Prevent double remove */
1260         platform_set_drvdata(pdev, NULL);
1261
1262         return 0;
1263 }
1264
1265 #ifdef CONFIG_PM
1266 static int bfin5xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1267 {
1268         struct driver_data *drv_data = platform_get_drvdata(pdev);
1269         int status = 0;
1270
1271         status = stop_queue(drv_data);
1272         if (status != 0)
1273                 return status;
1274
1275         /* stop hardware */
1276         bfin_spi_disable(drv_data);
1277
1278         return 0;
1279 }
1280
1281 static int bfin5xx_spi_resume(struct platform_device *pdev)
1282 {
1283         struct driver_data *drv_data = platform_get_drvdata(pdev);
1284         int status = 0;
1285
1286         /* Enable the SPI interface */
1287         bfin_spi_enable(drv_data);
1288
1289         /* Start the queue running */
1290         status = start_queue(drv_data);
1291         if (status != 0) {
1292                 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1293                 return status;
1294         }
1295
1296         return 0;
1297 }
1298 #else
1299 #define bfin5xx_spi_suspend NULL
1300 #define bfin5xx_spi_resume NULL
1301 #endif                          /* CONFIG_PM */
1302
1303 MODULE_ALIAS("bfin-spi-master");        /* for platform bus hotplug */
1304 static struct platform_driver bfin5xx_spi_driver = {
1305         .driver = {
1306                 .name   = "bfin-spi-master",
1307                 .owner  = THIS_MODULE,
1308         },
1309         .suspend        = bfin5xx_spi_suspend,
1310         .resume         = bfin5xx_spi_resume,
1311         .remove         = __devexit_p(bfin5xx_spi_remove),
1312 };
1313
1314 static int __init bfin5xx_spi_init(void)
1315 {
1316         return platform_driver_probe(&bfin5xx_spi_driver, bfin5xx_spi_probe);
1317 }
1318 module_init(bfin5xx_spi_init);
1319
1320 static void __exit bfin5xx_spi_exit(void)
1321 {
1322         platform_driver_unregister(&bfin5xx_spi_driver);
1323 }
1324 module_exit(bfin5xx_spi_exit);