1 /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
11 #include <linux/delay.h>
15 #include "iommu_common.h"
17 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
18 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
20 /* Accessing IOMMU and Streaming Buffer registers.
21 * REG parameter is a physical address. All registers
22 * are 64-bits in size.
24 #define pci_iommu_read(__reg) \
26 __asm__ __volatile__("ldxa [%1] %2, %0" \
28 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
32 #define pci_iommu_write(__reg, __val) \
33 __asm__ __volatile__("stxa %0, [%1] %2" \
35 : "r" (__val), "r" (__reg), \
36 "i" (ASI_PHYS_BYPASS_EC_E))
38 /* Must be invoked under the IOMMU lock. */
39 static void __iommu_flushall(struct pci_iommu *iommu)
44 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
45 for (entry = 0; entry < 16; entry++) {
46 pci_iommu_write(tag, 0);
50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg);
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
60 #define IOPTE_CONSISTENT(CTX) \
61 (IOPTE_VALID | IOPTE_CACHE | \
62 (((CTX) << 47) & IOPTE_CONTEXT))
64 #define IOPTE_STREAMING(CTX) \
65 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
67 /* Existing mappings are never marked invalid, instead they
68 * are pointed to a dummy page.
70 #define IOPTE_IS_DUMMY(iommu, iopte) \
71 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
73 static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
75 unsigned long val = iopte_val(*iopte);
78 val |= iommu->dummy_page_pa;
80 iopte_val(*iopte) = val;
83 void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
87 tsbsize /= sizeof(iopte_t);
89 for (i = 0; i < tsbsize; i++)
90 iopte_make_dummy(iommu, &iommu->page_table[i]);
93 static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
95 iopte_t *iopte, *limit, *first;
96 unsigned long cnum, ent, flush_point;
99 while ((1UL << cnum) < npages)
101 iopte = (iommu->page_table +
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
105 limit = (iommu->page_table +
106 iommu->lowest_consistent_map);
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
126 iopte += (1 << cnum);
128 if (iopte >= limit) {
129 iopte = (iommu->page_table +
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
140 /* I've got your streaming cluster right here buddy boy... */
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
149 static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
150 unsigned long npages, unsigned long ctx)
152 unsigned long cnum, ent;
155 while ((1UL << cnum) < npages)
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
161 /* If the global flush might not have caught this entry,
162 * adjust the flush point such that we will flush before
163 * ever trying to reuse it.
165 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
167 iommu->alloc_info[cnum].flush = ent;
171 /* We allocate consistent mappings from the end of cluster zero. */
172 static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
177 while (iopte > iommu->page_table) {
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
188 u32 entry = (iopte - iommu->page_table);
190 if (entry < iommu->lowest_consistent_map)
191 iommu->lowest_consistent_map = entry;
199 /* Allocate and map kernel buffer of size SIZE using consistent mode
200 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
201 * successful and set *DMA_ADDRP to the PCI side dma address.
203 void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
205 struct pcidev_cookie *pcp;
206 struct pci_iommu *iommu;
208 unsigned long flags, order, first_page, ctx;
212 size = IO_PAGE_ALIGN(size);
213 order = get_order(size);
217 first_page = __get_free_pages(GFP_ATOMIC, order);
218 if (first_page == 0UL)
220 memset((char *)first_page, 0, PAGE_SIZE << order);
223 iommu = pcp->pbm->iommu;
225 spin_lock_irqsave(&iommu->lock, flags);
226 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
228 spin_unlock_irqrestore(&iommu->lock, flags);
229 free_pages(first_page, order);
233 *dma_addrp = (iommu->page_table_map_base +
234 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
235 ret = (void *) first_page;
236 npages = size >> IO_PAGE_SHIFT;
238 if (iommu->iommu_ctxflush)
239 ctx = iommu->iommu_cur_ctx++;
240 first_page = __pa(first_page);
242 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
244 (first_page & IOPTE_PAGE));
246 first_page += IO_PAGE_SIZE;
251 u32 daddr = *dma_addrp;
253 npages = size >> IO_PAGE_SHIFT;
254 for (i = 0; i < npages; i++) {
255 pci_iommu_write(iommu->iommu_flush, daddr);
256 daddr += IO_PAGE_SIZE;
260 spin_unlock_irqrestore(&iommu->lock, flags);
265 /* Free and unmap a consistent DMA translation. */
266 void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
268 struct pcidev_cookie *pcp;
269 struct pci_iommu *iommu;
271 unsigned long flags, order, npages, i, ctx;
273 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
275 iommu = pcp->pbm->iommu;
276 iopte = iommu->page_table +
277 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
279 spin_lock_irqsave(&iommu->lock, flags);
281 if ((iopte - iommu->page_table) ==
282 iommu->lowest_consistent_map) {
283 iopte_t *walk = iopte + npages;
286 limit = (iommu->page_table +
287 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
288 while (walk < limit) {
289 if (!IOPTE_IS_DUMMY(iommu, walk))
293 iommu->lowest_consistent_map =
294 (walk - iommu->page_table);
297 /* Data for consistent mappings cannot enter the streaming
298 * buffers, so we only need to update the TSB. We flush
299 * the IOMMU here as well to prevent conflicts with the
300 * streaming mapping deferred tlb flush scheme.
304 if (iommu->iommu_ctxflush)
305 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
307 for (i = 0; i < npages; i++, iopte++)
308 iopte_make_dummy(iommu, iopte);
310 if (iommu->iommu_ctxflush) {
311 pci_iommu_write(iommu->iommu_ctxflush, ctx);
313 for (i = 0; i < npages; i++) {
314 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
316 pci_iommu_write(iommu->iommu_flush, daddr);
320 spin_unlock_irqrestore(&iommu->lock, flags);
322 order = get_order(size);
324 free_pages((unsigned long)cpu, order);
327 /* Map a single buffer at PTR of SZ bytes for PCI DMA
330 dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
332 struct pcidev_cookie *pcp;
333 struct pci_iommu *iommu;
334 struct pci_strbuf *strbuf;
336 unsigned long flags, npages, oaddr;
337 unsigned long i, base_paddr, ctx;
339 unsigned long iopte_protection;
342 iommu = pcp->pbm->iommu;
343 strbuf = &pcp->pbm->stc;
345 if (direction == PCI_DMA_NONE)
348 oaddr = (unsigned long)ptr;
349 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
350 npages >>= IO_PAGE_SHIFT;
352 spin_lock_irqsave(&iommu->lock, flags);
354 base = alloc_streaming_cluster(iommu, npages);
357 bus_addr = (iommu->page_table_map_base +
358 ((base - iommu->page_table) << IO_PAGE_SHIFT));
359 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
360 base_paddr = __pa(oaddr & IO_PAGE_MASK);
362 if (iommu->iommu_ctxflush)
363 ctx = iommu->iommu_cur_ctx++;
364 if (strbuf->strbuf_enabled)
365 iopte_protection = IOPTE_STREAMING(ctx);
367 iopte_protection = IOPTE_CONSISTENT(ctx);
368 if (direction != PCI_DMA_TODEVICE)
369 iopte_protection |= IOPTE_WRITE;
371 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
372 iopte_val(*base) = iopte_protection | base_paddr;
374 spin_unlock_irqrestore(&iommu->lock, flags);
379 spin_unlock_irqrestore(&iommu->lock, flags);
380 return PCI_DMA_ERROR_CODE;
383 static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
387 PCI_STC_FLUSHFLAG_INIT(strbuf);
388 if (strbuf->strbuf_ctxflush &&
389 iommu->iommu_ctxflush) {
390 unsigned long matchreg, flushreg;
392 flushreg = strbuf->strbuf_ctxflush;
393 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
397 pci_iommu_write(flushreg, ctx);
402 } while(((long)pci_iommu_read(matchreg)) < 0L);
404 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
405 "timeout vaddr[%08x] ctx[%lx]\n",
410 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
411 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
414 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
415 (void) pci_iommu_read(iommu->write_complete_reg);
418 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
426 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
427 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
431 /* Unmap a single streaming mode DMA translation. */
432 void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
434 struct pcidev_cookie *pcp;
435 struct pci_iommu *iommu;
436 struct pci_strbuf *strbuf;
438 unsigned long flags, npages, ctx;
440 if (direction == PCI_DMA_NONE)
444 iommu = pcp->pbm->iommu;
445 strbuf = &pcp->pbm->stc;
447 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
448 npages >>= IO_PAGE_SHIFT;
449 base = iommu->page_table +
450 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
451 #ifdef DEBUG_PCI_IOMMU
452 if (IOPTE_IS_DUMMY(iommu, base))
453 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
454 bus_addr, sz, __builtin_return_address(0));
456 bus_addr &= IO_PAGE_MASK;
458 spin_lock_irqsave(&iommu->lock, flags);
460 /* Record the context, if any. */
462 if (iommu->iommu_ctxflush)
463 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
465 /* Step 1: Kick data out of streaming buffers if necessary. */
466 if (strbuf->strbuf_enabled)
467 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
469 /* Step 2: Clear out first TSB entry. */
470 iopte_make_dummy(iommu, base);
472 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
475 spin_unlock_irqrestore(&iommu->lock, flags);
478 #define SG_ENT_PHYS_ADDRESS(SG) \
479 (__pa(page_address((SG)->page)) + (SG)->offset)
481 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
482 int nused, int nelems, unsigned long iopte_protection)
484 struct scatterlist *dma_sg = sg;
485 struct scatterlist *sg_end = sg + nelems;
488 for (i = 0; i < nused; i++) {
489 unsigned long pteval = ~0UL;
492 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
494 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
496 unsigned long offset;
499 /* If we are here, we know we have at least one
500 * more page to map. So walk forward until we
501 * hit a page crossing, and begin creating new
502 * mappings from that spot.
507 tmp = SG_ENT_PHYS_ADDRESS(sg);
509 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
510 pteval = tmp & IO_PAGE_MASK;
511 offset = tmp & (IO_PAGE_SIZE - 1UL);
514 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
515 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
517 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
523 pteval = iopte_protection | (pteval & IOPTE_PAGE);
525 *iopte++ = __iopte(pteval);
526 pteval += IO_PAGE_SIZE;
527 len -= (IO_PAGE_SIZE - offset);
532 pteval = (pteval & IOPTE_PAGE) + len;
535 /* Skip over any tail mappings we've fully mapped,
536 * adjusting pteval along the way. Stop when we
537 * detect a page crossing event.
539 while (sg < sg_end &&
540 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
541 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
543 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
544 pteval += sg->length;
547 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
549 } while (dma_npages != 0);
554 /* Map a set of buffers described by SGLIST with NELEMS array
555 * elements in streaming mode for PCI DMA.
556 * When making changes here, inspect the assembly output. I was having
557 * hard time to kepp this routine out of using stack slots for holding variables.
559 int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
561 struct pcidev_cookie *pcp;
562 struct pci_iommu *iommu;
563 struct pci_strbuf *strbuf;
564 unsigned long flags, ctx, npages, iopte_protection;
567 struct scatterlist *sgtmp;
570 /* Fast path single entry scatterlists. */
572 sglist->dma_address =
574 (page_address(sglist->page) + sglist->offset),
575 sglist->length, direction);
576 sglist->dma_length = sglist->length;
581 iommu = pcp->pbm->iommu;
582 strbuf = &pcp->pbm->stc;
584 if (direction == PCI_DMA_NONE)
587 /* Step 1: Prepare scatter list. */
589 npages = prepare_sg(sglist, nelems);
591 /* Step 2: Allocate a cluster. */
593 spin_lock_irqsave(&iommu->lock, flags);
595 base = alloc_streaming_cluster(iommu, npages);
598 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
600 /* Step 3: Normalize DMA addresses. */
604 while (used && sgtmp->dma_length) {
605 sgtmp->dma_address += dma_base;
609 used = nelems - used;
611 /* Step 4: Choose a context if necessary. */
613 if (iommu->iommu_ctxflush)
614 ctx = iommu->iommu_cur_ctx++;
616 /* Step 5: Create the mappings. */
617 if (strbuf->strbuf_enabled)
618 iopte_protection = IOPTE_STREAMING(ctx);
620 iopte_protection = IOPTE_CONSISTENT(ctx);
621 if (direction != PCI_DMA_TODEVICE)
622 iopte_protection |= IOPTE_WRITE;
623 fill_sg (base, sglist, used, nelems, iopte_protection);
625 verify_sglist(sglist, nelems, base, npages);
628 spin_unlock_irqrestore(&iommu->lock, flags);
633 spin_unlock_irqrestore(&iommu->lock, flags);
634 return PCI_DMA_ERROR_CODE;
637 /* Unmap a set of streaming mode DMA translations. */
638 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
640 struct pcidev_cookie *pcp;
641 struct pci_iommu *iommu;
642 struct pci_strbuf *strbuf;
644 unsigned long flags, ctx, i, npages;
647 if (direction == PCI_DMA_NONE)
651 iommu = pcp->pbm->iommu;
652 strbuf = &pcp->pbm->stc;
654 bus_addr = sglist->dma_address & IO_PAGE_MASK;
656 for (i = 1; i < nelems; i++)
657 if (sglist[i].dma_length == 0)
660 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
662 base = iommu->page_table +
663 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
665 #ifdef DEBUG_PCI_IOMMU
666 if (IOPTE_IS_DUMMY(iommu, base))
667 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
670 spin_lock_irqsave(&iommu->lock, flags);
672 /* Record the context, if any. */
674 if (iommu->iommu_ctxflush)
675 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
677 /* Step 1: Kick data out of streaming buffers if necessary. */
678 if (strbuf->strbuf_enabled)
679 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
681 /* Step 2: Clear out first TSB entry. */
682 iopte_make_dummy(iommu, base);
684 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
687 spin_unlock_irqrestore(&iommu->lock, flags);
690 /* Make physical memory consistent for a single
691 * streaming mode DMA translation after a transfer.
693 void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
695 struct pcidev_cookie *pcp;
696 struct pci_iommu *iommu;
697 struct pci_strbuf *strbuf;
698 unsigned long flags, ctx, npages;
701 iommu = pcp->pbm->iommu;
702 strbuf = &pcp->pbm->stc;
704 if (!strbuf->strbuf_enabled)
707 spin_lock_irqsave(&iommu->lock, flags);
709 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
710 npages >>= IO_PAGE_SHIFT;
711 bus_addr &= IO_PAGE_MASK;
713 /* Step 1: Record the context, if any. */
715 if (iommu->iommu_ctxflush &&
716 strbuf->strbuf_ctxflush) {
719 iopte = iommu->page_table +
720 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
721 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
724 /* Step 2: Kick data out of streaming buffers. */
725 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
727 spin_unlock_irqrestore(&iommu->lock, flags);
730 /* Make physical memory consistent for a set of streaming
731 * mode DMA translations after a transfer.
733 void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
735 struct pcidev_cookie *pcp;
736 struct pci_iommu *iommu;
737 struct pci_strbuf *strbuf;
738 unsigned long flags, ctx, npages, i;
742 iommu = pcp->pbm->iommu;
743 strbuf = &pcp->pbm->stc;
745 if (!strbuf->strbuf_enabled)
748 spin_lock_irqsave(&iommu->lock, flags);
750 /* Step 1: Record the context, if any. */
752 if (iommu->iommu_ctxflush &&
753 strbuf->strbuf_ctxflush) {
756 iopte = iommu->page_table +
757 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
758 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
761 /* Step 2: Kick data out of streaming buffers. */
762 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
763 for(i = 1; i < nelems; i++)
764 if (!sglist[i].dma_length)
767 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
768 - bus_addr) >> IO_PAGE_SHIFT;
769 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
771 spin_unlock_irqrestore(&iommu->lock, flags);
774 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
776 struct pci_dev *ali_isa_bridge;
779 /* ALI sound chips generate 31-bits of DMA, a special register
780 * determines what bit 31 is emitted as.
782 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
783 PCI_DEVICE_ID_AL_M1533,
786 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
791 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
792 pci_dev_put(ali_isa_bridge);
795 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
797 struct pcidev_cookie *pcp = pdev->sysdata;
801 dma_addr_mask = 0xffffffff;
803 struct pci_iommu *iommu = pcp->pbm->iommu;
805 dma_addr_mask = iommu->dma_addr_mask;
807 if (pdev->vendor == PCI_VENDOR_ID_AL &&
808 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
809 device_mask == 0x7fffffff) {
810 ali_sound_dma_hack(pdev,
811 (dma_addr_mask & 0x80000000) != 0);
816 if (device_mask >= (1UL << 32UL))
819 return (device_mask & dma_addr_mask) == dma_addr_mask;