2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
12 #include <asm/hwrpb.h>
20 # define DBGA(args...) printk(KERN_DEBUG args)
22 # define DBGA(args...)
25 # define DBGA2(args...) printk(KERN_DEBUG args)
27 # define DBGA2(args...)
30 #define DEBUG_NODIRECT 0
31 #define DEBUG_FORCEDAC 0
33 /* Most Alphas support 32-bit ISA DMA. Exceptions are XL, Ruffian,
34 Sable, and Alcor (see asm-alpha/dma.h for details). */
35 #define ISA_DMA_MASK (MAX_DMA_ADDRESS - IDENT_ADDR - 1)
37 static inline unsigned long
38 mk_iommu_pte(unsigned long paddr)
40 return (paddr >> (PAGE_SHIFT-1)) | 1;
44 calc_npages(long bytes)
46 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
50 /* Return the minimum of MAX or the first power of two larger
54 size_for_memory(unsigned long max)
56 unsigned long mem = max_low_pfn << PAGE_SHIFT;
58 max = 1UL << ceil_log2(mem);
62 struct pci_iommu_arena *
63 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
64 unsigned long window_size, unsigned long align)
66 unsigned long mem_size;
67 struct pci_iommu_arena *arena;
69 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
71 /* Note that the TLB lookup logic uses bitwise concatenation,
72 not addition, so the required arena alignment is based on
73 the size of the window. Retain the align parameter so that
74 particular systems can over-align the arena. */
79 #ifdef CONFIG_DISCONTIGMEM
81 if (!NODE_DATA(nid) ||
82 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
84 printk("%s: couldn't allocate arena from node %d\n"
85 " falling back to system-wide allocation\n",
87 arena = alloc_bootmem(sizeof(*arena));
90 if (!NODE_DATA(nid) ||
91 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
95 printk("%s: couldn't allocate arena ptes from node %d\n"
96 " falling back to system-wide allocation\n",
98 arena->ptes = __alloc_bootmem(mem_size, align, 0);
101 #else /* CONFIG_DISCONTIGMEM */
103 arena = alloc_bootmem(sizeof(*arena));
104 arena->ptes = __alloc_bootmem(mem_size, align, 0);
106 #endif /* CONFIG_DISCONTIGMEM */
108 spin_lock_init(&arena->lock);
110 arena->dma_base = base;
111 arena->size = window_size;
112 arena->next_entry = 0;
114 /* Align allocations to a multiple of a page size. Not needed
115 unless there are chip bugs. */
116 arena->align_entry = 1;
121 struct pci_iommu_arena *
122 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
123 unsigned long window_size, unsigned long align)
125 return iommu_arena_new_node(0, hose, base, window_size, align);
128 /* Must be called with the arena lock held */
130 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
135 /* Search forward for the first mask-aligned sequence of N free ptes */
137 nent = arena->size >> PAGE_SHIFT;
138 p = (arena->next_entry + mask) & ~mask;
140 while (i < n && p+i < nent) {
142 p = (p + i + 1 + mask) & ~mask, i = 0;
148 /* Reached the end. Flush the TLB and restart the
149 search from the beginning. */
150 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
153 while (i < n && p+i < nent) {
155 p = (p + i + 1 + mask) & ~mask, i = 0;
164 /* Success. It's the responsibility of the caller to mark them
165 in use before releasing the lock */
170 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
176 spin_lock_irqsave(&arena->lock, flags);
178 /* Search for N empty ptes */
180 mask = max(align, arena->align_entry) - 1;
181 p = iommu_arena_find_pages(arena, n, mask);
183 spin_unlock_irqrestore(&arena->lock, flags);
187 /* Success. Mark them all in use, ie not zero and invalid
188 for the iommu tlb that could load them from under us.
189 The chip specific bits will fill this in with something
190 kosher when we return. */
191 for (i = 0; i < n; ++i)
192 ptes[p+i] = IOMMU_INVALID_PTE;
194 arena->next_entry = p + n;
195 spin_unlock_irqrestore(&arena->lock, flags);
201 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
206 p = arena->ptes + ofs;
207 for (i = 0; i < n; ++i)
211 /* Map a single buffer of the indicated size for PCI DMA in streaming
212 mode. The 32-bit PCI bus mastering address to use is returned.
213 Once the device is given the dma address, the device owns this memory
214 until either pci_unmap_single or pci_dma_sync_single is performed. */
217 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
220 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
221 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
222 struct pci_iommu_arena *arena;
223 long npages, dma_ofs, i;
227 paddr = __pa(cpu_addr);
230 /* First check to see if we can use the direct map window. */
231 if (paddr + size + __direct_map_base - 1 <= max_dma
232 && paddr + size <= __direct_map_size) {
233 ret = paddr + __direct_map_base;
235 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
236 cpu_addr, size, ret, __builtin_return_address(0));
242 /* Next, use DAC if selected earlier. */
244 ret = paddr + alpha_mv.pci_dac_offset;
246 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
247 cpu_addr, size, ret, __builtin_return_address(0));
252 /* If the machine doesn't define a pci_tbi routine, we have to
253 assume it doesn't support sg mapping, and, since we tried to
254 use direct_map above, it now must be considered an error. */
255 if (! alpha_mv.mv_pci_tbi) {
256 static int been_here = 0; /* Only print the message once. */
258 printk(KERN_WARNING "pci_map_single: no HW sg\n");
264 arena = hose->sg_pci;
265 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
266 arena = hose->sg_isa;
268 npages = calc_npages((paddr & ~PAGE_MASK) + size);
269 /* Force allocation to 64KB boundary for all ISA devices. */
270 dma_ofs = iommu_arena_alloc(arena, npages, pdev ? 0 : 8);
272 printk(KERN_WARNING "pci_map_single failed: "
273 "could not allocate dma page tables\n");
278 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
279 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
281 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
282 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
284 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
285 cpu_addr, size, npages, ret, __builtin_return_address(0));
291 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
295 if (dir == PCI_DMA_NONE)
298 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
299 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
303 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
304 size_t size, int dir)
308 if (dir == PCI_DMA_NONE)
311 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
312 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
316 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
317 SIZE must match what was provided for in a previous pci_map_single
318 call. All other usages are undefined. After this call, reads by
319 the cpu to the buffer are guarenteed to see whatever the device
323 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
327 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
328 struct pci_iommu_arena *arena;
329 long dma_ofs, npages;
331 if (direction == PCI_DMA_NONE)
334 if (dma_addr >= __direct_map_base
335 && dma_addr < __direct_map_base + __direct_map_size) {
338 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
339 dma_addr, size, __builtin_return_address(0));
344 if (dma_addr > 0xffffffff) {
345 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
346 dma_addr, size, __builtin_return_address(0));
350 arena = hose->sg_pci;
351 if (!arena || dma_addr < arena->dma_base)
352 arena = hose->sg_isa;
354 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
355 if (dma_ofs * PAGE_SIZE >= arena->size) {
356 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
357 " base %lx size %x\n", dma_addr, arena->dma_base,
363 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
365 spin_lock_irqsave(&arena->lock, flags);
367 iommu_arena_free(arena, dma_ofs, npages);
369 /* If we're freeing ptes above the `next_entry' pointer (they
370 may have snuck back into the TLB since the last wrap flush),
371 we need to flush the TLB before reallocating the latter. */
372 if (dma_ofs >= arena->next_entry)
373 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
375 spin_unlock_irqrestore(&arena->lock, flags);
377 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
378 dma_addr, size, npages, __builtin_return_address(0));
382 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
383 size_t size, int direction)
385 pci_unmap_single(pdev, dma_addr, size, direction);
388 /* Allocate and map kernel buffer using consistent mode DMA for PCI
389 device. Returns non-NULL cpu-view pointer to the buffer if
390 successful and sets *DMA_ADDRP to the pci side dma address as well,
391 else DMA_ADDRP is undefined. */
394 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
397 long order = get_order(size);
398 int gfp = GFP_ATOMIC;
401 cpu_addr = (void *)__get_free_pages(gfp, order);
403 printk(KERN_INFO "pci_alloc_consistent: "
404 "get_free_pages failed from %p\n",
405 __builtin_return_address(0));
406 /* ??? Really atomic allocation? Otherwise we could play
407 with vmalloc and sg if we can't find contiguous memory. */
410 memset(cpu_addr, 0, size);
412 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
413 if (*dma_addrp == 0) {
414 free_pages((unsigned long)cpu_addr, order);
415 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
417 /* The address doesn't fit required mask and we
418 do not have iommu. Try again with GFP_DMA. */
423 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
424 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
429 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
430 be values that were returned from pci_alloc_consistent. SIZE must
431 be the same as what as passed into pci_alloc_consistent.
432 References to the memory and mappings assosciated with CPU_ADDR or
433 DMA_ADDR past this call are illegal. */
436 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
439 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
440 free_pages((unsigned long)cpu_addr, get_order(size));
442 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
443 dma_addr, size, __builtin_return_address(0));
447 /* Classify the elements of the scatterlist. Write dma_address
448 of each element with:
449 0 : Followers all physically adjacent.
450 1 : Followers all virtually adjacent.
451 -1 : Not leader, physically adjacent to previous.
452 -2 : Not leader, virtually adjacent to previous.
453 Write dma_length of each leader with the combined lengths of
454 the mergable followers. */
456 #define SG_ENT_VIRT_ADDRESS(SG) \
459 : page_address((SG)->page) + (SG)->offset)
461 #define SG_ENT_PHYS_ADDRESS(SG) \
462 __pa(SG_ENT_VIRT_ADDRESS(SG))
465 sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
467 unsigned long next_paddr;
468 struct scatterlist *leader;
469 long leader_flag, leader_length;
473 leader_length = leader->length;
474 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
476 for (++sg; sg < end; ++sg) {
477 unsigned long addr, len;
478 addr = SG_ENT_PHYS_ADDRESS(sg);
481 if (next_paddr == addr) {
482 sg->dma_address = -1;
483 leader_length += len;
484 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
485 sg->dma_address = -2;
487 leader_length += len;
489 leader->dma_address = leader_flag;
490 leader->dma_length = leader_length;
496 next_paddr = addr + len;
499 leader->dma_address = leader_flag;
500 leader->dma_length = leader_length;
503 /* Given a scatterlist leader, choose an allocation method and fill
507 sg_fill(struct scatterlist *leader, struct scatterlist *end,
508 struct scatterlist *out, struct pci_iommu_arena *arena,
509 dma_addr_t max_dma, int dac_allowed)
511 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
512 long size = leader->dma_length;
513 struct scatterlist *sg;
515 long npages, dma_ofs, i;
518 /* If everything is physically contiguous, and the addresses
519 fall into the direct-map window, use it. */
520 if (leader->dma_address == 0
521 && paddr + size + __direct_map_base - 1 <= max_dma
522 && paddr + size <= __direct_map_size) {
523 out->dma_address = paddr + __direct_map_base;
524 out->dma_length = size;
526 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
527 __va(paddr), size, out->dma_address);
533 /* If physically contiguous and DAC is available, use it. */
534 if (leader->dma_address == 0 && dac_allowed) {
535 out->dma_address = paddr + alpha_mv.pci_dac_offset;
536 out->dma_length = size;
538 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
539 __va(paddr), size, out->dma_address);
544 /* Otherwise, we'll use the iommu to make the pages virtually
548 npages = calc_npages(paddr + size);
549 dma_ofs = iommu_arena_alloc(arena, npages, 0);
551 /* If we attempted a direct map above but failed, die. */
552 if (leader->dma_address == 0)
555 /* Otherwise, break up the remaining virtually contiguous
556 hunks into individual direct maps and retry. */
557 sg_classify(leader, end, 0);
558 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
561 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
562 out->dma_length = size;
564 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
565 __va(paddr), size, out->dma_address, npages);
567 /* All virtually contiguous. We need to find the length of each
568 physically contiguous subsegment to fill in the ptes. */
569 ptes = &arena->ptes[dma_ofs];
573 struct scatterlist *last_sg = sg;
577 paddr = SG_ENT_PHYS_ADDRESS(sg);
579 while (sg+1 < end && (int) sg[1].dma_address == -1) {
580 size += sg[1].length;
584 npages = calc_npages((paddr & ~PAGE_MASK) + size);
587 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
588 *ptes++ = mk_iommu_pte(paddr);
591 DBGA(" (%ld) [%p,%x] np %ld\n",
592 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
593 last_sg->length, npages);
594 while (++last_sg <= sg) {
595 DBGA(" (%ld) [%p,%x] cont\n",
596 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
600 } while (++sg < end && (int) sg->dma_address < 0);
606 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
609 struct scatterlist *start, *end, *out;
610 struct pci_controller *hose;
611 struct pci_iommu_arena *arena;
615 if (direction == PCI_DMA_NONE)
618 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
620 /* Fast path single entry scatterlists. */
622 sg->dma_length = sg->length;
624 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
625 sg->length, dac_allowed);
626 return sg->dma_address != 0;
632 /* First, prepare information about the entries. */
633 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
635 /* Second, figure out where we're going to map things. */
636 if (alpha_mv.mv_pci_tbi) {
637 hose = pdev ? pdev->sysdata : pci_isa_hose;
638 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
639 arena = hose->sg_pci;
640 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
641 arena = hose->sg_isa;
648 /* Third, iterate over the scatterlist leaders and allocate
649 dma space as needed. */
650 for (out = sg; sg < end; ++sg) {
651 if ((int) sg->dma_address < 0)
653 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
658 /* Mark the end of the list for pci_unmap_sg. */
662 if (out - start == 0)
663 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
664 DBGA("pci_map_sg: %ld entries\n", out - start);
669 printk(KERN_WARNING "pci_map_sg failed: "
670 "could not allocate dma page tables\n");
672 /* Some allocation failed while mapping the scatterlist
673 entries. Unmap them now. */
675 pci_unmap_sg(pdev, start, out - start, direction);
679 /* Unmap a set of streaming mode DMA translations. Again, cpu read
680 rules concerning calls here are the same as for pci_unmap_single()
684 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
688 struct pci_controller *hose;
689 struct pci_iommu_arena *arena;
690 struct scatterlist *end;
692 dma_addr_t fbeg, fend;
694 if (direction == PCI_DMA_NONE)
697 if (! alpha_mv.mv_pci_tbi)
700 hose = pdev ? pdev->sysdata : pci_isa_hose;
701 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
702 arena = hose->sg_pci;
703 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
704 arena = hose->sg_isa;
708 spin_lock_irqsave(&arena->lock, flags);
710 for (end = sg + nents; sg < end; ++sg) {
716 addr = sg->dma_address;
717 size = sg->dma_length;
721 if (addr > 0xffffffff) {
722 /* It's a DAC address -- nothing to do. */
723 DBGA(" (%ld) DAC [%lx,%lx]\n",
724 sg - end + nents, addr, size);
728 if (addr >= __direct_map_base
729 && addr < __direct_map_base + __direct_map_size) {
731 DBGA(" (%ld) direct [%lx,%lx]\n",
732 sg - end + nents, addr, size);
736 DBGA(" (%ld) sg [%lx,%lx]\n",
737 sg - end + nents, addr, size);
739 npages = calc_npages((addr & ~PAGE_MASK) + size);
740 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
741 iommu_arena_free(arena, ofs, npages);
743 tend = addr + size - 1;
744 if (fbeg > addr) fbeg = addr;
745 if (fend < tend) fend = tend;
748 /* If we're freeing ptes above the `next_entry' pointer (they
749 may have snuck back into the TLB since the last wrap flush),
750 we need to flush the TLB before reallocating the latter. */
751 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
752 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
754 spin_unlock_irqrestore(&arena->lock, flags);
756 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
760 /* Return whether the given PCI device DMA address mask can be
761 supported properly. */
764 pci_dma_supported(struct pci_dev *pdev, u64 mask)
766 struct pci_controller *hose;
767 struct pci_iommu_arena *arena;
769 /* If there exists a direct map, and the mask fits either
770 the entire direct mapped space or the total system memory as
771 shifted by the map base */
772 if (__direct_map_size != 0
773 && (__direct_map_base + __direct_map_size - 1 <= mask
774 || __direct_map_base + (max_low_pfn<<PAGE_SHIFT)-1 <= mask))
777 /* Check that we have a scatter-gather arena that fits. */
778 hose = pdev ? pdev->sysdata : pci_isa_hose;
779 arena = hose->sg_isa;
780 if (arena && arena->dma_base + arena->size - 1 <= mask)
782 arena = hose->sg_pci;
783 if (arena && arena->dma_base + arena->size - 1 <= mask)
786 /* As last resort try ZONE_DMA. */
787 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
795 * AGP GART extensions to the IOMMU
798 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
804 if (!arena) return -EINVAL;
806 spin_lock_irqsave(&arena->lock, flags);
808 /* Search for N empty ptes. */
810 p = iommu_arena_find_pages(arena, pg_count, align_mask);
812 spin_unlock_irqrestore(&arena->lock, flags);
816 /* Success. Mark them all reserved (ie not zero and invalid)
817 for the iommu tlb that could load them from under us.
818 They will be filled in with valid bits by _bind() */
819 for (i = 0; i < pg_count; ++i)
820 ptes[p+i] = IOMMU_RESERVED_PTE;
822 arena->next_entry = p + pg_count;
823 spin_unlock_irqrestore(&arena->lock, flags);
829 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
834 if (!arena) return -EINVAL;
838 /* Make sure they're all reserved first... */
839 for(i = pg_start; i < pg_start + pg_count; i++)
840 if (ptes[i] != IOMMU_RESERVED_PTE)
843 iommu_arena_free(arena, pg_start, pg_count);
848 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
849 unsigned long *physaddrs)
855 if (!arena) return -EINVAL;
857 spin_lock_irqsave(&arena->lock, flags);
861 for(j = pg_start; j < pg_start + pg_count; j++) {
862 if (ptes[j] != IOMMU_RESERVED_PTE) {
863 spin_unlock_irqrestore(&arena->lock, flags);
868 for(i = 0, j = pg_start; i < pg_count; i++, j++)
869 ptes[j] = mk_iommu_pte(physaddrs[i]);
871 spin_unlock_irqrestore(&arena->lock, flags);
877 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
882 if (!arena) return -EINVAL;
884 p = arena->ptes + pg_start;
885 for(i = 0; i < pg_count; i++)
886 p[i] = IOMMU_RESERVED_PTE;
891 /* True if the machine supports DAC addressing, and DEV can
892 make use of it given MASK. */
895 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
897 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
900 /* If this is not set, the machine doesn't support DAC at all. */
904 /* The device has to be able to address our DAC bit. */
905 if ((dac_offset & dev->dma_mask) != dac_offset)
908 /* If both conditions above are met, we are fine. */
909 DBGA("pci_dac_dma_supported %s from %p\n",
910 ok ? "yes" : "no", __builtin_return_address(0));
916 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
917 unsigned long offset, int direction)
919 return (alpha_mv.pci_dac_offset
920 + __pa(page_address(page))
921 + (dma64_addr_t) offset);
925 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
927 unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
928 return virt_to_page(__va(paddr));
932 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
934 return (dma_addr & ~PAGE_MASK);