*/
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-static inline struct iommu_table *devnode_table(struct device *dev)
+static inline struct iommu_table *device_to_table(struct device *hwdev)
{
struct pci_dev *pdev;
- if (!dev) {
+ if (!hwdev) {
pdev = ppc64_isabridge_dev;
if (!pdev)
return NULL;
} else
- pdev = to_pci_dev(dev);
+ pdev = to_pci_dev(hwdev);
return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
}
+static inline unsigned long device_to_mask(struct device *hwdev)
+{
+ struct pci_dev *pdev;
+
+ if (!hwdev) {
+ pdev = ppc64_isabridge_dev;
+ if (!pdev) /* This is the best guess we can do */
+ return 0xfffffffful;
+ } else
+ pdev = to_pci_dev(hwdev);
+
+ if (pdev->dma_mask)
+ return pdev->dma_mask;
+
+ /* Assume devices without mask can take 32 bit addresses */
+ return 0xfffffffful;
+}
+
+
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
- flag);
+ return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
+ device_to_mask(hwdev), flag,
+ pcibus_to_node(to_pci_dev(hwdev)->bus));
}
static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
+ iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
- return iommu_map_single(devnode_table(hwdev), vaddr, size, direction);
+ return iommu_map_single(device_to_table(hwdev), vaddr, size,
+ device_to_mask(hwdev), direction);
}
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
- iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
+ iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- return iommu_map_sg(pdev, devnode_table(pdev), sglist,
- nelems, direction);
+ return iommu_map_sg(pdev, device_to_table(pdev), sglist,
+ nelems, device_to_mask(pdev), direction);
}
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
+ iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{
- return 1;
+ struct iommu_table *tbl = device_to_table(dev);
+
+ if (!tbl || tbl->it_offset > mask) {
+ printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
+ if (tbl)
+ printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset);
+ else
+ printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+ mask);
+ return 0;
+ } else
+ return 1;
}
void pci_iommu_init(void)