2 * arch/arm/kernel/dma-sa1100.c
4 * Support functions for the SA11x0 internal DMA channels.
5 * (see also Documentation/arm/SA1100/DMA)
7 * Copyright (C) 2000 Nicolas Pitre
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
22 #include <asm/system.h>
24 #include <asm/hardware.h>
27 #include <asm/mach/dma.h>
32 #define DPRINTK( s, arg... ) printk( "dma<%s>: " s, dma->device_id , ##arg )
34 #define DPRINTK( x... )
39 * DMA control register structure
43 volatile u_long SetDCSR;
44 volatile u_long ClrDCSR;
45 volatile u_long RdDCSR;
46 volatile dma_addr_t DBSA;
48 volatile dma_addr_t DBSB;
54 sa1100_dma_t dma_chan[MAX_SA1100_DMA_CHANNELS];
57 * Maximum physical DMA buffer size
59 #define MAX_DMA_SIZE 0x1fff
60 #define MAX_DMA_ORDER 12
67 static inline int start_sa1100_dma(sa1100_dma_t * dma, dma_addr_t dma_ptr, int size)
69 dma_regs_t *regs = dma->regs;
72 status = regs->RdDCSR;
74 /* If both DMA buffers are started, there's nothing else we can do. */
75 if ((status & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) {
76 DPRINTK("start: st %#x busy\n", status);
80 if (((status & DCSR_BIU) && (status & DCSR_STRTB)) ||
81 (!(status & DCSR_BIU) && !(status & DCSR_STRTA))) {
82 if (status & DCSR_DONEA) {
83 /* give a chance for the interrupt to be processed */
88 regs->SetDCSR = DCSR_STRTA | DCSR_IE | DCSR_RUN;
89 DPRINTK("start a=%#x s=%d on A\n", dma_ptr, size);
91 if (status & DCSR_DONEB) {
92 /* give a chance for the interrupt to be processed */
97 regs->SetDCSR = DCSR_STRTB | DCSR_IE | DCSR_RUN;
98 DPRINTK("start a=%#x s=%d on B\n", dma_ptr, size);
108 static int start_dma(sa1100_dma_t *dma, dma_addr_t dma_ptr, int size)
110 if (channel_is_sa1111_sac(dma - dma_chan))
111 return start_sa1111_sac_dma(dma, dma_ptr, size);
112 return start_sa1100_dma(dma, dma_ptr, size);
116 /* This must be called with IRQ disabled */
117 static void process_dma(sa1100_dma_t * dma)
125 if (!buf || dma->stopped) {
126 /* no more data available */
127 DPRINTK("process: no more buf (dma %s)\n",
128 dma->curr ? "active" : "inactive");
130 * Some devices may require DMA still sending data
131 * at any time for clock reference, etc.
132 * Note: if there is still a data buffer being
133 * processed then the ref count is negative. This
134 * allows for the DMA termination to be accounted in
137 if (dma->spin_size && dma->spin_ref >= 0) {
138 chunksize = dma->spin_size;
139 if (chunksize > MAX_DMA_SIZE)
140 chunksize = (1 << MAX_DMA_ORDER);
141 while (start_dma(dma, dma->spin_addr, chunksize) == 0)
143 if (dma->curr != NULL)
144 dma->spin_ref = -dma->spin_ref;
150 * This improves latency if there are some active spinning
151 * buffers. We kill them altogether.
153 if (dma->spin_ref > 0) {
154 if (channel_is_sa1111_sac(dma - dma_chan))
155 sa1111_reset_sac_dma(dma - dma_chan);
158 DCSR_STRTA|DCSR_STRTB|DCSR_DONEA|DCSR_DONEB;
163 * Let's try to start DMA on the current buffer.
164 * If DMA is busy then we break here.
166 chunksize = buf->size;
167 if (chunksize > MAX_DMA_SIZE)
168 chunksize = (1 << MAX_DMA_ORDER);
169 DPRINTK("process: b=%#x s=%d\n", (int) buf->id, buf->size);
170 if (start_dma(dma, buf->dma_ptr, chunksize) != 0)
175 buf->dma_ptr += chunksize;
176 buf->size -= chunksize;
177 if (buf->size == 0) {
178 /* current buffer is done: move tail to the next one */
179 dma->tail = buf->next;
180 DPRINTK("process: next b=%#x\n", (int) dma->tail);
186 /* This must be called with IRQ disabled */
187 void sa1100_dma_done (sa1100_dma_t *dma)
189 dma_buf_t *buf = dma->curr;
191 if (dma->spin_ref > 0) {
195 if (buf->ref == 0 && buf->size == 0) {
197 * Current buffer is done.
198 * Move current reference to the next one and send
199 * the processed buffer to the callback function,
202 DPRINTK("IRQ: buf done\n");
203 dma->curr = buf->next;
204 dma->spin_ref = -dma->spin_ref;
205 if (dma->head == buf)
208 int size = buf->dma_ptr - buf->dma_start;
209 dma->callback(buf->id, size);
219 static void dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
221 sa1100_dma_t *dma = (sa1100_dma_t *) dev_id;
222 int status = dma->regs->RdDCSR;
224 DPRINTK("IRQ: b=%#x st=%#x\n", (int) dma->curr->id, status);
226 if (status & (DCSR_ERROR)) {
227 printk(KERN_ERR "DMA on \"%s\" caused an error\n", dma->device_id);
228 dma->regs->ClrDCSR = DCSR_ERROR;
231 dma->regs->ClrDCSR = status & (DCSR_DONEA | DCSR_DONEB);
232 if (status & DCSR_DONEA)
233 sa1100_dma_done (dma);
234 if (status & DCSR_DONEB)
235 sa1100_dma_done (dma);
240 * DMA interface functions
243 static spinlock_t dma_list_lock;
245 int sa1100_request_dma (dmach_t * channel, const char *device_id,
248 sa1100_dma_t *dma = NULL;
252 *channel = -1; /* to be sure we catch the freeing of a misregistered channel */
255 spin_lock(&dma_list_lock);
256 for (i = 0; i < SA1100_DMA_CHANNELS; i++) {
257 if (dma_chan[i].in_use) {
258 if (dma_chan[i].device == device) {
272 spin_unlock(&dma_list_lock);
276 err = request_irq(dma->irq, dma_irq_handler, SA_INTERRUPT,
277 device_id, (void *) dma);
280 "%s: unable to request IRQ %d for DMA channel\n",
281 device_id, dma->irq);
285 *channel = dma - dma_chan;
286 dma->device_id = device_id;
287 dma->device = device;
288 dma->callback = NULL;
293 (DCSR_DONEA | DCSR_DONEB | DCSR_STRTA | DCSR_STRTB |
294 DCSR_IE | DCSR_ERROR | DCSR_RUN);
296 DPRINTK("requested\n");
301 int sa1100_dma_set_callback(dmach_t channel, dma_callback_t cb)
303 sa1100_dma_t *dma = &dma_chan[channel];
305 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
309 DPRINTK("cb = %p\n", cb);
314 int sa1100_dma_set_spin(dmach_t channel, dma_addr_t addr, int size)
316 sa1100_dma_t *dma = &dma_chan[channel];
319 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
322 DPRINTK("set spin %d at %#x\n", size, addr);
323 local_irq_save(flags);
324 dma->spin_addr = addr;
325 dma->spin_size = size;
328 local_irq_restore(flags);
333 int sa1100_dma_queue_buffer(dmach_t channel, void *buf_id,
334 dma_addr_t data, int size)
340 dma = &dma_chan[channel];
341 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
344 buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
350 buf->dma_ptr = buf->dma_start = data;
353 DPRINTK("queueing b=%#x a=%#x s=%d\n", (int) buf_id, data, size);
355 local_irq_save(flags);
357 dma->head->next = buf;
362 local_irq_restore(flags);
368 int sa1100_dma_get_current(dmach_t channel, void **buf_id, dma_addr_t *addr)
370 sa1100_dma_t *dma = &dma_chan[channel];
374 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
377 if (channel_is_sa1111_sac(channel))
378 return sa1111_dma_get_current(channel, buf_id, addr);
381 local_irq_save(flags);
382 if (dma->curr && dma->spin_ref <= 0) {
383 dma_buf_t *buf = dma->curr;
384 int status, using_bufa;
386 status = regs->RdDCSR;
388 * If we got here, that's because there is, or recently was, a
389 * buffer being processed. We must determine whether buffer
390 * A or B is active. Two possibilities: either we are
391 * in the middle of a buffer, or the DMA controller just
392 * switched to the next toggle but the interrupt hasn't been
393 * serviced yet. The former case is straight forward. In
394 * the later case, we'll do like if DMA is just at the end
395 * of the previous toggle since all registers haven't been
396 * reset yet. This goes around the edge case and since we're
397 * always a little behind anyways it shouldn't make a big
398 * difference. If DMA has been stopped prior calling this
399 * then the position is always exact.
401 using_bufa = ((!(status & DCSR_BIU) && (status & DCSR_STRTA)) ||
402 ( (status & DCSR_BIU) && !(status & DCSR_STRTB)));
405 *addr = (using_bufa) ? regs->DBSA : regs->DBSB;
407 * Clamp funky pointers sometimes returned by the hardware
408 * on completed DMA transfers
410 if (*addr < buf->dma_start ||
411 *addr > buf->dma_ptr)
412 *addr = buf->dma_ptr;
413 DPRINTK("curr_pos: b=%#x a=%#x\n", (int)dma->curr->id, *addr);
415 } else if (dma->tail && dma->stopped) {
416 dma_buf_t *buf = dma->tail;
419 *addr = buf->dma_ptr;
427 local_irq_restore(flags);
432 int sa1100_dma_stop(dmach_t channel)
434 sa1100_dma_t *dma = &dma_chan[channel];
437 if (channel_is_sa1111_sac(channel))
438 return sa1111_dma_stop(channel);
442 local_irq_save(flags);
445 * Stop DMA and tweak state variables so everything could restart
446 * from there when resume/wakeup occurs.
448 dma->regs->ClrDCSR = DCSR_RUN | DCSR_IE;
450 dma_buf_t *buf = dma->curr;
451 if (dma->spin_ref <= 0) {
453 sa1100_dma_get_current(channel, NULL, &curpos);
454 buf->size += buf->dma_ptr - curpos;
455 buf->dma_ptr = curpos;
462 dma->regs->ClrDCSR = DCSR_STRTA|DCSR_STRTB|DCSR_DONEA|DCSR_DONEB;
464 local_irq_restore(flags);
469 int sa1100_dma_resume(dmach_t channel)
471 sa1100_dma_t *dma = &dma_chan[channel];
473 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
476 if (channel_is_sa1111_sac(channel))
477 return sa1111_dma_resume(channel);
481 local_irq_save(flags);
484 local_irq_restore(flags);
490 int sa1100_dma_flush_all(dmach_t channel)
492 sa1100_dma_t *dma = &dma_chan[channel];
493 dma_buf_t *buf, *next_buf;
496 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
499 local_irq_save(flags);
500 if (channel_is_sa1111_sac(channel))
501 sa1111_reset_sac_dma(channel);
503 dma->regs->ClrDCSR = DCSR_STRTA|DCSR_STRTB|DCSR_DONEA|DCSR_DONEB|DCSR_RUN|DCSR_IE;
507 dma->head = dma->tail = dma->curr = NULL;
511 local_irq_restore(flags);
513 next_buf = buf->next;
517 DPRINTK("flushed\n");
522 void sa1100_free_dma(dmach_t channel)
526 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS)
529 dma = &dma_chan[channel];
531 printk(KERN_ERR "Trying to free free DMA%d\n", channel);
535 sa1100_dma_set_spin(channel, 0, 0);
536 sa1100_dma_flush_all(channel);
538 if (channel_is_sa1111_sac(channel)) {
539 sa1111_cleanup_sac_dma(channel);
541 free_irq(IRQ_DMA0 + channel, (void *) dma);
549 EXPORT_SYMBOL(sa1100_request_dma);
550 EXPORT_SYMBOL(sa1100_dma_set_callback);
551 EXPORT_SYMBOL(sa1100_dma_set_spin);
552 EXPORT_SYMBOL(sa1100_dma_queue_buffer);
553 EXPORT_SYMBOL(sa1100_dma_get_current);
554 EXPORT_SYMBOL(sa1100_dma_stop);
555 EXPORT_SYMBOL(sa1100_dma_resume);
556 EXPORT_SYMBOL(sa1100_dma_flush_all);
557 EXPORT_SYMBOL(sa1100_free_dma);
561 /* Drivers should call this from their PM callback function */
563 int sa1100_dma_sleep(dmach_t channel)
565 sa1100_dma_t *dma = &dma_chan[channel];
568 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
571 if (channel_is_sa1111_sac(channel)) {
572 /* We'll cheat a little until someone actually
573 * write the real thing.
575 sa1111_reset_sac_dma(channel);
579 orig_state = dma->stopped;
580 sa1100_dma_stop(channel);
581 dma->regs->ClrDCSR = DCSR_RUN | DCSR_IE | DCSR_STRTA | DCSR_STRTB;
582 dma->stopped = orig_state;
587 int sa1100_dma_wakeup(dmach_t channel)
589 sa1100_dma_t *dma = &dma_chan[channel];
593 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
596 if (channel_is_sa1111_sac(channel)) {
597 /* We'll cheat a little until someone actually
598 * write the real thing.
605 (DCSR_DONEA | DCSR_DONEB | DCSR_STRTA | DCSR_STRTB |
606 DCSR_IE | DCSR_ERROR | DCSR_RUN);
607 regs->DDAR = dma->device;
608 local_irq_save(flags);
610 local_irq_restore(flags);
614 EXPORT_SYMBOL(sa1100_dma_sleep);
615 EXPORT_SYMBOL(sa1100_dma_wakeup);
620 static int __init sa1100_init_dma(void)
623 for (channel = 0; channel < SA1100_DMA_CHANNELS; channel++) {
624 dma_chan[channel].regs =
625 (dma_regs_t *) &DDAR(channel);
626 dma_chan[channel].irq = IRQ_DMA0 + channel;
631 __initcall(sa1100_init_dma);