1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/scatterlist.h>
16 #include <linux/pci.h>
19 #include <asm/iommu.h>
21 #include "iommu_common.h"
23 #define STC_CTXMATCH_ADDR(STC, CTX) \
24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 #define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27 #define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
30 #define iommu_read(__reg) \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
38 #define iommu_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
44 /* Must be invoked under the IOMMU lock. */
45 static void __iommu_flushall(struct iommu *iommu)
47 if (iommu->iommu_flushinv) {
48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
53 tag = iommu->iommu_tags;
54 for (entry = 0; entry < 16; entry++) {
59 /* Ensure completion of previous PIO writes. */
60 (void) iommu_read(iommu->write_complete_reg);
64 #define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
68 #define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
71 /* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
74 #define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
77 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
79 unsigned long val = iopte_val(*iopte);
82 val |= iommu->dummy_page_pa;
84 iopte_val(*iopte) = val;
87 /* Based largely upon the ppc64 iommu allocator. */
88 static long arena_alloc(struct iommu *iommu, unsigned long npages)
90 struct iommu_arena *arena = &iommu->arena;
91 unsigned long n, i, start, end, limit;
99 n = find_next_zero_bit(arena->map, limit, start);
101 if (unlikely(end >= limit)) {
102 if (likely(pass < 1)) {
105 __iommu_flushall(iommu);
109 /* Scanned the whole thing, give up. */
114 for (i = n; i < end; i++) {
115 if (test_bit(i, arena->map)) {
121 for (i = n; i < end; i++)
122 __set_bit(i, arena->map);
129 static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
133 for (i = base; i < (base + npages); i++)
134 __clear_bit(i, arena->map);
137 int iommu_table_init(struct iommu *iommu, int tsbsize,
138 u32 dma_offset, u32 dma_addr_mask)
140 unsigned long i, tsbbase, order, sz, num_tsb_entries;
142 num_tsb_entries = tsbsize / sizeof(iopte_t);
144 /* Setup initial software IOMMU state. */
145 spin_lock_init(&iommu->lock);
146 iommu->ctx_lowest_free = 1;
147 iommu->page_table_map_base = dma_offset;
148 iommu->dma_addr_mask = dma_addr_mask;
150 /* Allocate and initialize the free area map. */
151 sz = num_tsb_entries / 8;
152 sz = (sz + 7UL) & ~7UL;
153 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
154 if (!iommu->arena.map) {
155 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
158 iommu->arena.limit = num_tsb_entries;
160 /* Allocate and initialize the dummy page which we
161 * set inactive IO PTEs to point to.
163 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
164 if (!iommu->dummy_page) {
165 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
168 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
169 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
171 /* Now allocate and setup the IOMMU page table itself. */
172 order = get_order(tsbsize);
173 tsbbase = __get_free_pages(GFP_KERNEL, order);
175 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
176 goto out_free_dummy_page;
178 iommu->page_table = (iopte_t *)tsbbase;
180 for (i = 0; i < num_tsb_entries; i++)
181 iopte_make_dummy(iommu, &iommu->page_table[i]);
186 free_page(iommu->dummy_page);
187 iommu->dummy_page = 0UL;
190 kfree(iommu->arena.map);
191 iommu->arena.map = NULL;
196 static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
200 entry = arena_alloc(iommu, npages);
201 if (unlikely(entry < 0))
204 return iommu->page_table + entry;
207 static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
209 arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
212 static int iommu_alloc_ctx(struct iommu *iommu)
214 int lowest = iommu->ctx_lowest_free;
215 int sz = IOMMU_NUM_CTXS - lowest;
216 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
218 if (unlikely(n == sz)) {
219 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
220 if (unlikely(n == lowest)) {
221 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
226 __set_bit(n, iommu->ctx_bitmap);
231 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
234 __clear_bit(ctx, iommu->ctx_bitmap);
235 if (ctx < iommu->ctx_lowest_free)
236 iommu->ctx_lowest_free = ctx;
240 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
241 dma_addr_t *dma_addrp, gfp_t gfp)
245 unsigned long flags, order, first_page;
249 size = IO_PAGE_ALIGN(size);
250 order = get_order(size);
254 first_page = __get_free_pages(gfp, order);
255 if (first_page == 0UL)
257 memset((char *)first_page, 0, PAGE_SIZE << order);
259 iommu = dev->archdata.iommu;
261 spin_lock_irqsave(&iommu->lock, flags);
262 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
263 spin_unlock_irqrestore(&iommu->lock, flags);
265 if (unlikely(iopte == NULL)) {
266 free_pages(first_page, order);
270 *dma_addrp = (iommu->page_table_map_base +
271 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
272 ret = (void *) first_page;
273 npages = size >> IO_PAGE_SHIFT;
274 first_page = __pa(first_page);
276 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
278 (first_page & IOPTE_PAGE));
280 first_page += IO_PAGE_SIZE;
286 static void dma_4u_free_coherent(struct device *dev, size_t size,
287 void *cpu, dma_addr_t dvma)
291 unsigned long flags, order, npages;
293 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
294 iommu = dev->archdata.iommu;
295 iopte = iommu->page_table +
296 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
298 spin_lock_irqsave(&iommu->lock, flags);
300 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
302 spin_unlock_irqrestore(&iommu->lock, flags);
304 order = get_order(size);
306 free_pages((unsigned long)cpu, order);
309 static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
310 enum dma_data_direction direction)
313 struct strbuf *strbuf;
315 unsigned long flags, npages, oaddr;
316 unsigned long i, base_paddr, ctx;
318 unsigned long iopte_protection;
320 iommu = dev->archdata.iommu;
321 strbuf = dev->archdata.stc;
323 if (unlikely(direction == DMA_NONE))
326 oaddr = (unsigned long)ptr;
327 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
328 npages >>= IO_PAGE_SHIFT;
330 spin_lock_irqsave(&iommu->lock, flags);
331 base = alloc_npages(iommu, npages);
333 if (iommu->iommu_ctxflush)
334 ctx = iommu_alloc_ctx(iommu);
335 spin_unlock_irqrestore(&iommu->lock, flags);
340 bus_addr = (iommu->page_table_map_base +
341 ((base - iommu->page_table) << IO_PAGE_SHIFT));
342 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
343 base_paddr = __pa(oaddr & IO_PAGE_MASK);
344 if (strbuf->strbuf_enabled)
345 iopte_protection = IOPTE_STREAMING(ctx);
347 iopte_protection = IOPTE_CONSISTENT(ctx);
348 if (direction != DMA_TO_DEVICE)
349 iopte_protection |= IOPTE_WRITE;
351 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
352 iopte_val(*base) = iopte_protection | base_paddr;
357 iommu_free_ctx(iommu, ctx);
359 if (printk_ratelimit())
361 return DMA_ERROR_CODE;
364 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
365 u32 vaddr, unsigned long ctx, unsigned long npages,
366 enum dma_data_direction direction)
370 if (strbuf->strbuf_ctxflush &&
371 iommu->iommu_ctxflush) {
372 unsigned long matchreg, flushreg;
375 flushreg = strbuf->strbuf_ctxflush;
376 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
378 iommu_write(flushreg, ctx);
379 val = iommu_read(matchreg);
386 iommu_write(flushreg, ctx);
389 val = iommu_read(matchreg);
391 printk(KERN_WARNING "strbuf_flush: ctx flush "
392 "timeout matchreg[%lx] ctx[%lx]\n",
400 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
401 iommu_write(strbuf->strbuf_pflush, vaddr);
405 /* If the device could not have possibly put dirty data into
406 * the streaming cache, no flush-flag synchronization needs
409 if (direction == DMA_TO_DEVICE)
412 STC_FLUSHFLAG_INIT(strbuf);
413 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
414 (void) iommu_read(iommu->write_complete_reg);
417 while (!STC_FLUSHFLAG_SET(strbuf)) {
425 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
426 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
430 static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
431 size_t sz, enum dma_data_direction direction)
434 struct strbuf *strbuf;
436 unsigned long flags, npages, ctx, i;
438 if (unlikely(direction == DMA_NONE)) {
439 if (printk_ratelimit())
444 iommu = dev->archdata.iommu;
445 strbuf = dev->archdata.stc;
447 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
448 npages >>= IO_PAGE_SHIFT;
449 base = iommu->page_table +
450 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
451 bus_addr &= IO_PAGE_MASK;
453 spin_lock_irqsave(&iommu->lock, flags);
455 /* Record the context, if any. */
457 if (iommu->iommu_ctxflush)
458 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
460 /* Step 1: Kick data out of streaming buffers if necessary. */
461 if (strbuf->strbuf_enabled)
462 strbuf_flush(strbuf, iommu, bus_addr, ctx,
465 /* Step 2: Clear out TSB entries. */
466 for (i = 0; i < npages; i++)
467 iopte_make_dummy(iommu, base + i);
469 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
471 iommu_free_ctx(iommu, ctx);
473 spin_unlock_irqrestore(&iommu->lock, flags);
476 #define SG_ENT_PHYS_ADDRESS(SG) \
477 (__pa(page_address((SG)->page)) + (SG)->offset)
479 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
480 int nused, int nelems,
481 unsigned long iopte_protection)
483 struct scatterlist *dma_sg = sg;
484 struct scatterlist *sg_end = sg_last(sg, nelems);
487 for (i = 0; i < nused; i++) {
488 unsigned long pteval = ~0UL;
491 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
493 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
495 unsigned long offset;
498 /* If we are here, we know we have at least one
499 * more page to map. So walk forward until we
500 * hit a page crossing, and begin creating new
501 * mappings from that spot.
506 tmp = SG_ENT_PHYS_ADDRESS(sg);
508 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
509 pteval = tmp & IO_PAGE_MASK;
510 offset = tmp & (IO_PAGE_SIZE - 1UL);
513 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
514 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
516 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
522 pteval = iopte_protection | (pteval & IOPTE_PAGE);
524 *iopte++ = __iopte(pteval);
525 pteval += IO_PAGE_SIZE;
526 len -= (IO_PAGE_SIZE - offset);
531 pteval = (pteval & IOPTE_PAGE) + len;
534 /* Skip over any tail mappings we've fully mapped,
535 * adjusting pteval along the way. Stop when we
536 * detect a page crossing event.
538 while (sg != sg_end &&
539 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
540 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
542 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
543 pteval += sg->length;
546 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
548 } while (dma_npages != 0);
549 dma_sg = sg_next(dma_sg);
553 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction)
557 struct strbuf *strbuf;
558 unsigned long flags, ctx, npages, iopte_protection;
561 struct scatterlist *sgtmp;
564 /* Fast path single entry scatterlists. */
566 sglist->dma_address =
567 dma_4u_map_single(dev,
568 (page_address(sglist->page) +
570 sglist->length, direction);
571 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
573 sglist->dma_length = sglist->length;
577 iommu = dev->archdata.iommu;
578 strbuf = dev->archdata.stc;
580 if (unlikely(direction == DMA_NONE))
583 /* Step 1: Prepare scatter list. */
585 npages = prepare_sg(sglist, nelems);
587 /* Step 2: Allocate a cluster and context, if necessary. */
589 spin_lock_irqsave(&iommu->lock, flags);
591 base = alloc_npages(iommu, npages);
593 if (iommu->iommu_ctxflush)
594 ctx = iommu_alloc_ctx(iommu);
596 spin_unlock_irqrestore(&iommu->lock, flags);
601 dma_base = iommu->page_table_map_base +
602 ((base - iommu->page_table) << IO_PAGE_SHIFT);
604 /* Step 3: Normalize DMA addresses. */
608 while (used && sgtmp->dma_length) {
609 sgtmp->dma_address += dma_base;
610 sgtmp = sg_next(sgtmp);
613 used = nelems - used;
615 /* Step 4: Create the mappings. */
616 if (strbuf->strbuf_enabled)
617 iopte_protection = IOPTE_STREAMING(ctx);
619 iopte_protection = IOPTE_CONSISTENT(ctx);
620 if (direction != DMA_TO_DEVICE)
621 iopte_protection |= IOPTE_WRITE;
623 fill_sg(base, sglist, used, nelems, iopte_protection);
626 verify_sglist(sglist, nelems, base, npages);
632 iommu_free_ctx(iommu, ctx);
634 if (printk_ratelimit())
639 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
640 int nelems, enum dma_data_direction direction)
643 struct strbuf *strbuf;
645 unsigned long flags, ctx, i, npages;
646 struct scatterlist *sg, *sgprv;
649 if (unlikely(direction == DMA_NONE)) {
650 if (printk_ratelimit())
654 iommu = dev->archdata.iommu;
655 strbuf = dev->archdata.stc;
657 bus_addr = sglist->dma_address & IO_PAGE_MASK;
660 for_each_sg(sglist, sg, nelems, i) {
661 if (sg->dma_length == 0)
666 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
667 bus_addr) >> IO_PAGE_SHIFT;
669 base = iommu->page_table +
670 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
672 spin_lock_irqsave(&iommu->lock, flags);
674 /* Record the context, if any. */
676 if (iommu->iommu_ctxflush)
677 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
679 /* Step 1: Kick data out of streaming buffers if necessary. */
680 if (strbuf->strbuf_enabled)
681 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
683 /* Step 2: Clear out the TSB entries. */
684 for (i = 0; i < npages; i++)
685 iopte_make_dummy(iommu, base + i);
687 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
689 iommu_free_ctx(iommu, ctx);
691 spin_unlock_irqrestore(&iommu->lock, flags);
694 static void dma_4u_sync_single_for_cpu(struct device *dev,
695 dma_addr_t bus_addr, size_t sz,
696 enum dma_data_direction direction)
699 struct strbuf *strbuf;
700 unsigned long flags, ctx, npages;
702 iommu = dev->archdata.iommu;
703 strbuf = dev->archdata.stc;
705 if (!strbuf->strbuf_enabled)
708 spin_lock_irqsave(&iommu->lock, flags);
710 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
711 npages >>= IO_PAGE_SHIFT;
712 bus_addr &= IO_PAGE_MASK;
714 /* Step 1: Record the context, if any. */
716 if (iommu->iommu_ctxflush &&
717 strbuf->strbuf_ctxflush) {
720 iopte = iommu->page_table +
721 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
722 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
725 /* Step 2: Kick data out of streaming buffers. */
726 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
728 spin_unlock_irqrestore(&iommu->lock, flags);
731 static void dma_4u_sync_sg_for_cpu(struct device *dev,
732 struct scatterlist *sglist, int nelems,
733 enum dma_data_direction direction)
736 struct strbuf *strbuf;
737 unsigned long flags, ctx, npages, i;
738 struct scatterlist *sg, *sgprv;
741 iommu = dev->archdata.iommu;
742 strbuf = dev->archdata.stc;
744 if (!strbuf->strbuf_enabled)
747 spin_lock_irqsave(&iommu->lock, flags);
749 /* Step 1: Record the context, if any. */
751 if (iommu->iommu_ctxflush &&
752 strbuf->strbuf_ctxflush) {
755 iopte = iommu->page_table +
756 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
757 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
760 /* Step 2: Kick data out of streaming buffers. */
761 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
763 for_each_sg(sglist, sg, nelems, i) {
764 if (sg->dma_length == 0)
769 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
770 - bus_addr) >> IO_PAGE_SHIFT;
771 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
773 spin_unlock_irqrestore(&iommu->lock, flags);
776 const struct dma_ops sun4u_dma_ops = {
777 .alloc_coherent = dma_4u_alloc_coherent,
778 .free_coherent = dma_4u_free_coherent,
779 .map_single = dma_4u_map_single,
780 .unmap_single = dma_4u_unmap_single,
781 .map_sg = dma_4u_map_sg,
782 .unmap_sg = dma_4u_unmap_sg,
783 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
784 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
787 const struct dma_ops *dma_ops = &sun4u_dma_ops;
788 EXPORT_SYMBOL(dma_ops);
790 int dma_supported(struct device *dev, u64 device_mask)
792 struct iommu *iommu = dev->archdata.iommu;
793 u64 dma_addr_mask = iommu->dma_addr_mask;
795 if (device_mask >= (1UL << 32UL))
798 if ((device_mask & dma_addr_mask) == dma_addr_mask)
802 if (dev->bus == &pci_bus_type)
803 return pci_dma_supported(to_pci_dev(dev), device_mask);
808 EXPORT_SYMBOL(dma_supported);
810 int dma_set_mask(struct device *dev, u64 dma_mask)
813 if (dev->bus == &pci_bus_type)
814 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
818 EXPORT_SYMBOL(dma_set_mask);