2 * linux/arch/arm/mm/consistent.c
4 * Copyright (C) 2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Dynamic DMA mapping support.
12 #include <linux/config.h>
13 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/vmalloc.h>
17 #include <linux/interrupt.h>
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
23 #include <asm/pgtable.h>
24 #include <asm/pgalloc.h>
27 * This allocates one page of cache-coherent memory space and returns
28 * both the virtual and a "dma" address to that space. It is not clear
29 * whether this could be called from an interrupt context or not. For
30 * now, we expressly forbid it, especially as some of the stuff we do
31 * here is not interrupt context safe.
33 * We should allow this function to be called from interrupt context.
34 * However, we call ioremap, which needs to fiddle around with various
35 * things (like the vmlist_lock, and allocating page tables). These
36 * things aren't interrupt safe (yet).
38 * Note that this does *not* zero the allocated area!
40 void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
42 struct page *page, *end, *free;
50 size = PAGE_ALIGN(size);
51 order = get_order(size);
53 page = alloc_pages(gfp, order);
57 *dma_handle = page_to_bus(page);
58 ret = __ioremap(page_to_pfn(page) << PAGE_SHIFT, size, 0);
62 #if 0 /* ioremap_does_flush_cache_all */
64 void *virt = page_address(page);
67 * we need to ensure that there are no cachelines in use, or
68 * worse dirty in this area. Really, we don't need to do
69 * this since __ioremap does a flush_cache_all() anyway. --rmk
71 invalidate_dcache_range(virt, virt + size);
76 * free wasted pages. We skip the first page since we know
77 * that it will have count = 1 and won't require freeing.
78 * We also mark the pages in use as reserved so that
79 * remap_page_range works.
81 free = page + (size >> PAGE_SHIFT);
82 end = page + (1 << order);
84 for (; page < end; page++) {
85 set_page_count(page, 1);
89 SetPageReserved(page);
94 __free_pages(page, order);
99 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle)
101 int gfp = GFP_KERNEL;
103 #if defined(CONFIG_PCI) || defined(CONFIG_SA1111)
104 if ((hwdev) == NULL || dev_is_sa1111(hwdev) ||
105 (hwdev)->dma_mask != 0xffffffff)
109 return consistent_alloc(gfp, size, handle);
113 * free a page as defined by the above mapping. We expressly forbid
114 * calling this from interrupt context.
116 void consistent_free(void *vaddr, size_t size, dma_addr_t handle)
118 struct page *page, *end;
124 * More messing around with the MM internals. This is
125 * sick, but then so is remap_page_range().
127 size = PAGE_ALIGN(size);
128 page = virt_to_page(bus_to_virt(handle));
129 end = page + (size >> PAGE_SHIFT);
131 for (; page < end; page++)
132 ClearPageReserved(page);
138 * make an area consistent.
140 void consistent_sync(void *vaddr, size_t size, int direction)
142 unsigned long start = (unsigned long)vaddr;
143 unsigned long end = start + size;
148 case PCI_DMA_FROMDEVICE: /* invalidate only */
149 invalidate_dcache_range(start, end);
151 case PCI_DMA_TODEVICE: /* writeback only */
152 clean_dcache_range(start, end);
154 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
155 flush_dcache_range(start, end);