2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
9 #include <linux/config.h>
13 /* Can be used to override the logic in pci_scan_bus for skipping
14 already-configured bus numbers - to be used for buggy BIOSes
15 or architectures with incomplete PCI setup by the loader */
18 extern unsigned int pcibios_assign_all_busses(void);
20 #define pcibios_assign_all_busses() 0
22 #define pcibios_scan_all_fns() 0
24 #define PCIBIOS_MIN_IO 0x1000
25 #define PCIBIOS_MIN_MEM 0x10000000
27 extern void pcibios_set_master(struct pci_dev *dev);
29 static inline void pcibios_penalize_isa_irq(int irq)
31 /* We don't do dynamic PCI IRQ allocation */
35 * Dynamic DMA mapping stuff.
36 * MIPS has everything mapped statically.
39 #include <linux/types.h>
40 #include <linux/slab.h>
41 #include <asm/scatterlist.h>
42 #include <linux/string.h>
45 #if (defined(CONFIG_DDB5074) || defined(CONFIG_DDB5476))
47 #undef PCIBIOS_MIN_MEM
48 #define PCIBIOS_MIN_IO 0x0100000
49 #define PCIBIOS_MIN_MEM 0x1000000
55 * The PCI address space does equal the physical memory address space. The
56 * networking and block device layers use this boolean for bounce buffer
59 #define PCI_DMA_BUS_IS_PHYS (1)
62 * Allocate and map kernel buffer using consistent mode DMA for a device.
63 * hwdev should be valid struct pci_dev pointer for PCI devices,
64 * NULL for PCI-like buses (ISA, EISA).
65 * Returns non-NULL cpu-view pointer to the buffer if successful and
66 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
69 extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
70 dma_addr_t *dma_handle);
73 * Free and unmap a consistent DMA buffer.
74 * cpu_addr is what was returned from pci_alloc_consistent,
75 * size must be the same as what as passed into pci_alloc_consistent,
76 * and likewise dma_addr must be the same as what *dma_addrp was set to.
78 * References to the memory and mappings associated with cpu_addr/dma_addr
79 * past this call are illegal.
81 extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
82 void *vaddr, dma_addr_t dma_handle);
85 * Map a single buffer of the indicated size for DMA in streaming mode.
86 * The 32-bit bus address to use is returned.
88 * Once the device is given the dma address, the device owns this memory
89 * until either pci_unmap_single or pci_dma_sync_single is performed.
91 static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
92 size_t size, int direction)
94 unsigned long addr = (unsigned long) ptr;
96 if (direction == PCI_DMA_NONE)
99 dma_cache_wback_inv(addr, size);
101 return bus_to_baddr(hwdev->bus, __pa(ptr));
105 * Unmap a single streaming mode DMA translation. The dma_addr and size
106 * must match what was provided for in a previous pci_map_single call. All
107 * other usages are undefined.
109 * After this call, reads by the cpu to the buffer are guarenteed to see
110 * whatever the device wrote there.
112 static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
113 size_t size, int direction)
115 if (direction == PCI_DMA_NONE)
118 if (direction != PCI_DMA_TODEVICE) {
121 addr = baddr_to_bus(hwdev->bus, dma_addr) + PAGE_OFFSET;
122 dma_cache_wback_inv(addr, size);
127 * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
128 * to pci_map_single, but takes a struct page instead of a virtual address
130 static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
131 unsigned long offset, size_t size,
136 if (direction == PCI_DMA_NONE)
139 addr = (unsigned long) page_address(page) + offset;
140 dma_cache_wback_inv(addr, size);
142 return bus_to_baddr(hwdev->bus, page_to_phys(page) + offset);
145 static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
146 size_t size, int direction)
148 if (direction == PCI_DMA_NONE)
151 if (direction != PCI_DMA_TODEVICE) {
154 addr = baddr_to_bus(hwdev->bus, dma_address) + PAGE_OFFSET;
155 dma_cache_wback_inv(addr, size);
159 /* pci_unmap_{page,single} is a nop so... */
160 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
161 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
162 #define pci_unmap_addr(PTR, ADDR_NAME) (0)
163 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
164 #define pci_unmap_len(PTR, LEN_NAME) (0)
165 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
168 * Map a set of buffers described by scatterlist in streaming
169 * mode for DMA. This is the scather-gather version of the
170 * above pci_map_single interface. Here the scatter gather list
171 * elements are each tagged with the appropriate dma address
172 * and length. They are obtained via sg_dma_{address,length}(SG).
174 * NOTE: An implementation may be able to use a smaller number of
175 * DMA address/length pairs than there are SG table elements.
176 * (for example via virtual mapping capabilities)
177 * The routine returns the number of addr/length pairs actually
178 * used, at most nents.
180 * Device ownership issues as mentioned above for pci_map_single are
183 static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
184 int nents, int direction)
188 if (direction == PCI_DMA_NONE)
191 for (i = 0; i < nents; i++, sg++) {
192 if (sg->address && sg->page)
194 else if (!sg->address && !sg->page)
198 dma_cache_wback_inv((unsigned long)sg->address,
200 sg->dma_address = bus_to_baddr(hwdev->bus, __pa(sg->address));
202 sg->dma_address = page_to_bus(sg->page) +
204 dma_cache_wback_inv((unsigned long)
205 (page_address(sg->page) + sg->offset),
214 * Unmap a set of streaming mode DMA translations.
215 * Again, cpu read rules concerning calls here are the same as for
216 * pci_unmap_single() above.
218 static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
219 int nents, int direction)
223 if (direction == PCI_DMA_NONE)
226 if (direction == PCI_DMA_TODEVICE)
229 for (i = 0; i < nents; i++, sg++) {
230 if (sg->address && sg->page)
232 else if (!sg->address && !sg->page)
237 dma_cache_wback_inv((unsigned long)sg->address, sg->length);
242 * Make physical memory consistent for a single
243 * streaming mode DMA translation after a transfer.
245 * If you perform a pci_map_single() but wish to interrogate the
246 * buffer using the cpu, yet do not wish to teardown the PCI dma
247 * mapping, you must call this function before doing so. At the
248 * next point you give the PCI dma address back to the card, the
249 * device again owns the buffer.
251 static inline void pci_dma_sync_single(struct pci_dev *hwdev,
252 dma_addr_t dma_handle,
253 size_t size, int direction)
257 if (direction == PCI_DMA_NONE)
260 addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET;
261 dma_cache_wback_inv(addr, size);
265 * Make physical memory consistent for a set of streaming
266 * mode DMA translations after a transfer.
268 * The same as pci_dma_sync_single but for a scatter-gather list,
269 * same rules and usage.
271 static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
272 struct scatterlist *sg,
273 int nelems, int direction)
275 #ifdef CONFIG_NONCOHERENT_IO
279 if (direction == PCI_DMA_NONE)
282 /* Make sure that gcc doesn't leave the empty loop body. */
283 #ifdef CONFIG_NONCOHERENT_IO
284 for (i = 0; i < nelems; i++, sg++)
285 dma_cache_wback_inv((unsigned long)sg->address, sg->length);
290 * Return whether the given PCI device DMA address mask can
291 * be supported properly. For example, if your device can
292 * only drive the low 24-bits during PCI bus mastering, then
293 * you would pass 0x00ffffff as the mask to this function.
295 static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
298 * we fall back to GFP_DMA when the mask isn't all 1s,
299 * so we can't guarantee allocations that must be
300 * within a tighter range than GFP_DMA..
303 if (mask < 0x00ffffff)
310 /* This is always fine. */
311 #define pci_dac_dma_supported(pci_dev, mask) (1)
313 static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
314 struct page *page, unsigned long offset, int direction)
316 dma64_addr_t addr = page_to_phys(page) + offset;
318 return (dma64_addr_t) bus_to_baddr(pdev->bus, addr);
321 static inline struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
322 dma64_addr_t dma_addr)
324 unsigned long poff = baddr_to_bus(pdev->bus, dma_addr) >> PAGE_SHIFT;
326 return mem_map + poff;
329 static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
330 dma64_addr_t dma_addr)
332 return dma_addr & ~PAGE_MASK;
335 static inline void pci_dac_dma_sync_single(struct pci_dev *pdev,
336 dma64_addr_t dma_addr, size_t len, int direction)
340 if (direction == PCI_DMA_NONE)
343 addr = baddr_to_bus(pdev->bus, dma_addr) + PAGE_OFFSET;
344 dma_cache_wback_inv(addr, len);
348 * Return the index of the PCI controller for device.
350 #define pci_controller_num(pdev) ({ (void)(pdev); 0; })
353 * These macros should be used after a pci_map_sg call has been done
354 * to get bus addresses of each of the SG entries and their lengths.
355 * You should only work with the number of sg entries pci_map_sg
356 * returns, or alternatively stop on the first sg_dma_len(sg) which
359 #define sg_dma_address(sg) ((sg)->dma_address)
360 #define sg_dma_len(sg) ((sg)->length)
362 #endif /* __KERNEL__ */
364 #endif /* _ASM_PCI_H */