2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
9 #include <linux/config.h>
13 /* Can be used to override the logic in pci_scan_bus for skipping
14 already-configured bus numbers - to be used for buggy BIOSes
15 or architectures with incomplete PCI setup by the loader */
18 extern unsigned int pcibios_assign_all_busses(void);
20 #define pcibios_assign_all_busses() 0
23 #define PCIBIOS_MIN_IO 0x1000
24 #define PCIBIOS_MIN_MEM 0x10000000
26 static inline void pcibios_set_master(struct pci_dev *dev)
28 /* No special bus mastering setup handling */
31 static inline void pcibios_penalize_isa_irq(int irq)
33 /* We don't do dynamic PCI IRQ allocation */
37 * Dynamic DMA mapping stuff.
38 * MIPS has everything mapped statically.
41 #include <linux/types.h>
42 #include <linux/slab.h>
43 #include <asm/scatterlist.h>
44 #include <linux/string.h>
47 #if (defined(CONFIG_DDB5074) || defined(CONFIG_DDB5476))
49 #undef PCIBIOS_MIN_MEM
50 #define PCIBIOS_MIN_IO 0x0100000
51 #define PCIBIOS_MIN_MEM 0x1000000
57 * The PCI address space does equal the physical memory address space. The
58 * networking and block device layers use this boolean for bounce buffer
61 #define PCI_DMA_BUS_IS_PHYS (1)
64 * Allocate and map kernel buffer using consistent mode DMA for a device.
65 * hwdev should be valid struct pci_dev pointer for PCI devices,
66 * NULL for PCI-like buses (ISA, EISA).
67 * Returns non-NULL cpu-view pointer to the buffer if successful and
68 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
71 extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
72 dma_addr_t *dma_handle);
75 * Free and unmap a consistent DMA buffer.
76 * cpu_addr is what was returned from pci_alloc_consistent,
77 * size must be the same as what as passed into pci_alloc_consistent,
78 * and likewise dma_addr must be the same as what *dma_addrp was set to.
80 * References to the memory and mappings associated with cpu_addr/dma_addr
81 * past this call are illegal.
83 extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
84 void *vaddr, dma_addr_t dma_handle);
87 * Map a single buffer of the indicated size for DMA in streaming mode.
88 * The 32-bit bus address to use is returned.
90 * Once the device is given the dma address, the device owns this memory
91 * until either pci_unmap_single or pci_dma_sync_single is performed.
93 static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
94 size_t size, int direction)
96 unsigned long addr = (unsigned long) ptr;
98 if (direction == PCI_DMA_NONE)
101 dma_cache_wback_inv(addr, size);
103 return bus_to_baddr(hwdev->bus->number, __pa(ptr));
107 * Unmap a single streaming mode DMA translation. The dma_addr and size
108 * must match what was provided for in a previous pci_map_single call. All
109 * other usages are undefined.
111 * After this call, reads by the cpu to the buffer are guarenteed to see
112 * whatever the device wrote there.
114 static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
115 size_t size, int direction)
117 if (direction == PCI_DMA_NONE)
120 if (direction != PCI_DMA_TODEVICE) {
123 addr = baddr_to_bus(hwdev, dma_addr) + PAGE_OFFSET;
124 dma_cache_wback_inv(addr, size);
129 * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
130 * to pci_map_single, but takes a struct page instead of a virtual address
132 static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
133 unsigned long offset, size_t size,
138 if (direction == PCI_DMA_NONE)
141 addr = (unsigned long) page_address(page) + offset;
142 dma_cache_wback_inv(addr, size);
144 return bus_to_baddr(hwdev, page_to_phys(page) + offset);
147 static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
148 size_t size, int direction)
150 if (direction == PCI_DMA_NONE)
153 if (direction != PCI_DMA_TODEVICE) {
156 addr = baddr_to_bus(hwdev, dma_address) + PAGE_OFFSET;
157 dma_cache_wback_inv(addr, size);
161 /* pci_unmap_{page,single} is a nop so... */
162 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
163 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
164 #define pci_unmap_addr(PTR, ADDR_NAME) (0)
165 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
166 #define pci_unmap_len(PTR, LEN_NAME) (0)
167 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
170 * Map a set of buffers described by scatterlist in streaming
171 * mode for DMA. This is the scather-gather version of the
172 * above pci_map_single interface. Here the scatter gather list
173 * elements are each tagged with the appropriate dma address
174 * and length. They are obtained via sg_dma_{address,length}(SG).
176 * NOTE: An implementation may be able to use a smaller number of
177 * DMA address/length pairs than there are SG table elements.
178 * (for example via virtual mapping capabilities)
179 * The routine returns the number of addr/length pairs actually
180 * used, at most nents.
182 * Device ownership issues as mentioned above for pci_map_single are
185 static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
186 int nents, int direction)
190 if (direction == PCI_DMA_NONE)
193 for (i = 0; i < nents; i++, sg++) {
194 if (sg->address && sg->page)
196 else if (!sg->address && !sg->page)
200 dma_cache_wback_inv((unsigned long)sg->address,
202 sg->dma_address = bus_to_baddr(hwdev, __pa(sg->address));
204 sg->dma_address = page_to_bus(sg->page) +
212 * Unmap a set of streaming mode DMA translations.
213 * Again, cpu read rules concerning calls here are the same as for
214 * pci_unmap_single() above.
216 static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
217 int nents, int direction)
221 if (direction == PCI_DMA_NONE)
224 if (direction == PCI_DMA_TODEVICE)
227 for (i = 0; i < nents; i++, sg++) {
228 if (sg->address && sg->page)
230 else if (!sg->address && !sg->page)
235 dma_cache_wback_inv((unsigned long)sg->address, sg->length);
240 * Make physical memory consistent for a single
241 * streaming mode DMA translation after a transfer.
243 * If you perform a pci_map_single() but wish to interrogate the
244 * buffer using the cpu, yet do not wish to teardown the PCI dma
245 * mapping, you must call this function before doing so. At the
246 * next point you give the PCI dma address back to the card, the
247 * device again owns the buffer.
249 static inline void pci_dma_sync_single(struct pci_dev *hwdev,
250 dma_addr_t dma_handle,
251 size_t size, int direction)
255 if (direction == PCI_DMA_NONE)
258 addr = baddr_to_bus(hwdev, dma_handle) + PAGE_OFFSET;
259 dma_cache_wback_inv(addr, size);
263 * Make physical memory consistent for a set of streaming
264 * mode DMA translations after a transfer.
266 * The same as pci_dma_sync_single but for a scatter-gather list,
267 * same rules and usage.
269 static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
270 struct scatterlist *sg,
271 int nelems, int direction)
273 #ifdef CONFIG_NONCOHERENT_IO
277 if (direction == PCI_DMA_NONE)
280 /* Make sure that gcc doesn't leave the empty loop body. */
281 #ifdef CONFIG_NONCOHERENT_IO
282 for (i = 0; i < nelems; i++, sg++)
283 dma_cache_wback_inv((unsigned long)sg->address, sg->length);
288 * Return whether the given PCI device DMA address mask can
289 * be supported properly. For example, if your device can
290 * only drive the low 24-bits during PCI bus mastering, then
291 * you would pass 0x00ffffff as the mask to this function.
293 static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
296 * we fall back to GFP_DMA when the mask isn't all 1s,
297 * so we can't guarantee allocations that must be
298 * within a tighter range than GFP_DMA..
301 if (mask < 0x00ffffff)
308 /* This is always fine. */
309 #define pci_dac_dma_supported(pci_dev, mask) (1)
311 static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
312 struct page *page, unsigned long offset, int direction)
314 dma64_addr_t addr = page_to_phys(page) + offset;
316 return (dma64_addr_t) bus_to_baddr(hwdev->bus->number, addr);
319 static inline struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
320 dma64_addr_t dma_addr)
322 unsigned long poff = baddr_to_bus(hwdev, dma_addr) >> PAGE_SHIFT;
324 return mem_map + poff;
327 static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
328 dma64_addr_t dma_addr)
330 return dma_addr & ~PAGE_MASK;
333 static inline void pci_dac_dma_sync_single(struct pci_dev *pdev,
334 dma64_addr_t dma_addr, size_t len, int direction)
338 if (direction == PCI_DMA_NONE)
341 addr = baddr_to_bus(hwdev->bus->number, dma_addr) + PAGE_OFFSET;
342 dma_cache_wback_inv(addr, len);
346 * Return the index of the PCI controller for device.
348 #define pci_controller_num(pdev) (0)
351 * These macros should be used after a pci_map_sg call has been done
352 * to get bus addresses of each of the SG entries and their lengths.
353 * You should only work with the number of sg entries pci_map_sg
354 * returns, or alternatively stop on the first sg_dma_len(sg) which
357 #define sg_dma_address(sg) ((sg)->dma_address)
358 #define sg_dma_len(sg) ((sg)->length)
360 #endif /* __KERNEL__ */
362 #endif /* _ASM_PCI_H */