2 * BK Id: SCCS/s.pci.h 1.29 08/13/02 21:52:58 paulus
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
11 #include <asm/scatterlist.h>
16 /* Values for the `which' argument to sys_pciconfig_iobase syscall. */
17 #define IOBASE_BRIDGE_NUMBER 0
18 #define IOBASE_MEMORY 1
20 #define IOBASE_ISA_IO 3
21 #define IOBASE_ISA_MEM 4
24 * Set this to 1 if you want the kernel to re-assign all PCI
27 extern int pci_assign_all_busses;
29 #define pcibios_assign_all_busses() (pci_assign_all_busses)
31 #define PCIBIOS_MIN_IO 0x1000
32 #define PCIBIOS_MIN_MEM 0x10000000
34 extern inline void pcibios_set_master(struct pci_dev *dev)
36 /* No special bus mastering setup handling */
39 extern inline void pcibios_penalize_isa_irq(int irq)
41 /* We don't do dynamic PCI IRQ allocation */
44 extern unsigned long pci_resource_to_bus(struct pci_dev *pdev, struct resource *res);
47 * The PCI bus bridge can translate addresses issued by the processor(s)
48 * into a different address on the PCI bus. On 32-bit cpus, we assume
49 * this mapping is 1-1, but on 64-bit systems it often isn't.
51 * Obsolete ! Drivers should now use pci_resource_to_bus
53 extern unsigned long phys_to_bus(unsigned long pa);
54 extern unsigned long pci_phys_to_bus(unsigned long pa, int busnr);
55 extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
57 #ifndef CONFIG_PPC_ISERIES
60 * Dynamic DMA Mapping stuff
61 * Originally stolen from i386 by ajoshi and updated by paulus
62 * Non-consistent cache support by Dan Malek
65 /* The PCI address space does equal the physical memory
66 * address space. The networking and block device layers use
67 * this boolean for bounce buffer decisions.
68 * XXX is this correct if CONFIG_NOT_COHERENT_CACHE? -- paulus
70 #define PCI_DMA_BUS_IS_PHYS (1)
72 /* Allocate and map kernel buffer using consistent mode DMA for a device.
73 * hwdev should be valid struct pci_dev pointer for PCI devices,
74 * NULL for PCI-like buses (ISA, EISA).
75 * Returns non-NULL cpu-view pointer to the buffer if successful and
76 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
79 extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
80 dma_addr_t *dma_handle);
82 /* Free and unmap a consistent DMA buffer.
83 * cpu_addr is what was returned from pci_alloc_consistent,
84 * size must be the same as what as passed into pci_alloc_consistent,
85 * and likewise dma_addr must be the same as what *dma_addrp was set to.
87 * References to the memory and mappings associated with cpu_addr/dma_addr
88 * past this call are illegal.
90 extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
91 void *vaddr, dma_addr_t dma_handle);
93 /* Map a single buffer of the indicated size for DMA in streaming mode.
94 * The 32-bit bus address to use is returned.
96 * Once the device is given the dma address, the device owns this memory
97 * until either pci_unmap_single or pci_dma_sync_single is performed.
99 static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
100 size_t size, int direction)
102 if (direction == PCI_DMA_NONE)
104 consistent_sync(ptr, size, direction);
105 return virt_to_bus(ptr);
108 static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
109 size_t size, int direction)
111 if (direction == PCI_DMA_NONE)
116 /* pci_unmap_{page,single} is a nop so... */
117 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
118 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
119 #define pci_unmap_addr(PTR, ADDR_NAME) (0)
120 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
121 #define pci_unmap_len(PTR, LEN_NAME) (0)
122 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
125 * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
126 * to pci_map_single, but takes a struct page instead of a virtual address
128 static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
129 unsigned long offset, size_t size,
132 if (direction == PCI_DMA_NONE)
134 consistent_sync_page(page, offset, size, direction);
135 return (page - mem_map) * PAGE_SIZE + PCI_DRAM_OFFSET + offset;
138 static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
139 size_t size, int direction)
141 if (direction == PCI_DMA_NONE)
146 /* Map a set of buffers described by scatterlist in streaming
147 * mode for DMA. This is the scather-gather version of the
148 * above pci_map_single interface. Here the scatter gather list
149 * elements are each tagged with the appropriate dma address
150 * and length. They are obtained via sg_dma_{address,len}(SG),
151 * defined in <asm/scatterlist.h>.
153 * NOTE: An implementation may be able to use a smaller number of
154 * DMA address/length pairs than there are SG table elements.
155 * (for example via virtual mapping capabilities)
156 * The routine returns the number of addr/length pairs actually
157 * used, at most nents.
159 * Device ownership issues as mentioned above for pci_map_single are
162 static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
163 int nents, int direction)
167 if (direction == PCI_DMA_NONE)
173 for (i = 0; i < nents; i++) {
174 if (sg[i].address && sg[i].page)
176 else if (!sg[i].address && !sg[i].page)
180 consistent_sync(sg[i].address, sg[i].length, direction);
181 sg[i].dma_address = virt_to_bus(sg[i].address);
183 consistent_sync_page(sg[i].page, sg[i].offset,
184 sg[i].length, direction);
185 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
187 sg[i].dma_length = sg[i].length;
193 /* Unmap a set of streaming mode DMA translations.
194 * Again, cpu read rules concerning calls here are the same as for
195 * pci_unmap_single() above.
197 static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
198 int nents, int direction)
200 if (direction == PCI_DMA_NONE)
205 /* Make physical memory consistent for a single
206 * streaming mode DMA translation after a transfer.
208 * If you perform a pci_map_single() but wish to interrogate the
209 * buffer using the cpu, yet do not wish to teardown the PCI dma
210 * mapping, you must call this function before doing so. At the
211 * next point you give the PCI dma address back to the card, the
212 * device again owns the buffer.
214 static inline void pci_dma_sync_single(struct pci_dev *hwdev,
215 dma_addr_t dma_handle,
216 size_t size, int direction)
218 if (direction == PCI_DMA_NONE)
220 #ifdef CONFIG_NOT_COHERENT_CACHE
221 /* The bus_to_virt() can't be used here, in case dma_handle
222 * points to something that doesn't have the same cache attributes
223 * as the 1:1 mapped kernel memory. If we used it, we could
224 * get a cache line alias with the wrong attributes.
225 * Since there isn't any Linux way to get the real VA from a PA,
226 * it is just easier to flush the whole cache. The code to
227 * determine VA from PA would probably do the same :-).
228 * I don't know why these functions don't pass VA, since the
229 * cache operations use VA and the caller has this information.
236 /* Make physical memory consistent for a set of streaming
237 * mode DMA translations after a transfer.
239 * The same as pci_dma_sync_single but for a scatter-gather list,
240 * same rules and usage.
242 static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
243 struct scatterlist *sg,
244 int nelems, int direction)
248 if (direction == PCI_DMA_NONE)
251 for (i = 0; i < nelems; i++, sg++)
252 consistent_sync(sg->address, sg->length, direction);
255 /* Return whether the given PCI device DMA address mask can
256 * be supported properly. For example, if your device can
257 * only drive the low 24-bits during PCI bus mastering, then
258 * you would pass 0x00ffffff as the mask to this function.
260 static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
265 #else /* CONFIG_PPC_ISERIES */
268 * Dynamic DMA Mapping for iSeries.
269 * This is more complex than on other ppc32 platforms
270 * because we have to set up TCE mappings for any DMA
271 * using hypervisor calls.
272 * See comments above for explanations of the functions.
275 #define PCI_DMA_BUS_IS_PHYS (0)
277 extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
278 dma_addr_t *dma_handle);
279 extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
280 void *vaddr, dma_addr_t dma_handle);
281 extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
282 size_t size, int direction);
283 extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
284 size_t size, int direction);
285 extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
286 unsigned long offset, size_t size,
288 extern void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
289 size_t size, int direction);
290 extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
291 int nents, int direction);
292 extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
293 int nents, int direction);
295 static inline void pci_dma_sync_single(struct pci_dev *hwdev,
296 dma_addr_t dma_handle,
297 size_t size, int direction)
299 if (direction == PCI_DMA_NONE)
303 static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
304 struct scatterlist *sg,
305 int nelems, int direction)
307 if (direction == PCI_DMA_NONE)
311 static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
316 /* pci_unmap_{single,page} is not a nop, thus... */
317 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
318 dma_addr_t ADDR_NAME;
319 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
321 #define PCI_UNMAP_ADDR(PTR, ADDR_NAME) \
323 #define PCI_UNMAP_ADDR_SET(PTR, ADDR_NAME, VAL) \
324 (((PTR)->ADDR_NAME) = (VAL))
325 #define PCI_UNMAP_LEN(PTR, LEN_NAME) \
327 #define PCI_UNMAP_LEN_SET(PTR, LEN_NAME, VAL) \
328 (((PTR)->LEN_NAME) = (VAL))
330 #endif /* CONFIG_PPC_ISERIES */
333 * At present there are very few 32-bit PPC machines that can have
334 * memory above the 4GB point, and we don't support that.
336 #define pci_dac_dma_supported(pci_dev, mask) (0)
338 static __inline__ dma64_addr_t
339 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
341 return (dma64_addr_t) page_to_bus(page) + offset;
344 static __inline__ struct page *
345 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
347 return mem_map + (unsigned long)(dma_addr >> PAGE_SHIFT);
350 static __inline__ unsigned long
351 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
353 return (dma_addr & ~PAGE_MASK);
356 static __inline__ void
357 pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
362 /* Return the index of the PCI controller for device PDEV. */
363 extern int pci_controller_num(struct pci_dev *pdev);
365 /* Map a range of PCI memory or I/O space for a device into user space */
366 int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
367 enum pci_mmap_state mmap_state, int write_combine);
369 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
370 #define HAVE_PCI_MMAP 1
372 #endif /* __KERNEL__ */
374 #endif /* __PPC_PCI_H */