5 #include <linux/config.h>
7 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #define SIO_CONFIG_RA 0x398
13 #define SIO_CONFIG_RD 0x399
17 #define PMAC_ISA_MEM_BASE 0
18 #define PMAC_PCI_DRAM_OFFSET 0
19 #define CHRP_ISA_IO_BASE 0xf8000000
20 #define CHRP_ISA_MEM_BASE 0xf7000000
21 #define CHRP_PCI_DRAM_OFFSET 0
22 #define PREP_ISA_IO_BASE 0x80000000
23 #define PREP_ISA_MEM_BASE 0xc0000000
24 #define PREP_PCI_DRAM_OFFSET 0x80000000
26 #if defined(CONFIG_40x)
27 #include <asm/ibm4xx.h>
28 #elif defined(CONFIG_8xx)
29 #include <asm/mpc8xx.h>
30 #elif defined(CONFIG_8260)
31 #include <asm/mpc8260.h>
32 #elif defined(CONFIG_APUS)
34 #define _ISA_MEM_BASE 0
35 #define PCI_DRAM_OFFSET 0
36 #else /* Everyone else */
37 #define _IO_BASE isa_io_base
38 #define _ISA_MEM_BASE isa_mem_base
39 #define PCI_DRAM_OFFSET pci_dram_offset
40 #endif /* Platform-dependant I/O */
42 extern unsigned long isa_io_base;
43 extern unsigned long isa_mem_base;
44 extern unsigned long pci_dram_offset;
46 #define readb(addr) in_8((volatile u8 *)(addr))
47 #define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
48 #if defined(CONFIG_APUS)
49 #define readw(addr) (*(volatile u16 *) (addr))
50 #define readl(addr) (*(volatile u32 *) (addr))
51 #define writew(b,addr) ((*(volatile u16 *) (addr)) = (b))
52 #define writel(b,addr) ((*(volatile u32 *) (addr)) = (b))
54 #define readw(addr) in_le16((volatile u16 *)(addr))
55 #define readl(addr) in_le32((volatile u32 *)(addr))
56 #define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
57 #define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
61 #define __raw_readb(addr) (*(volatile unsigned char *)(addr))
62 #define __raw_readw(addr) (*(volatile unsigned short *)(addr))
63 #define __raw_readl(addr) (*(volatile unsigned int *)(addr))
64 #define __raw_writeb(v, addr) (*(volatile unsigned char *)(addr) = (v))
65 #define __raw_writew(v, addr) (*(volatile unsigned short *)(addr) = (v))
66 #define __raw_writel(v, addr) (*(volatile unsigned int *)(addr) = (v))
69 * The insw/outsw/insl/outsl macros don't do byte-swapping.
70 * They are only used in practice for transferring buffers which
71 * are arrays of bytes, and byte-swapping is not appropriate in
74 #define insb(port, buf, ns) _insb((u8 *)((port)+_IO_BASE), (buf), (ns))
75 #define outsb(port, buf, ns) _outsb((u8 *)((port)+_IO_BASE), (buf), (ns))
76 #define insw(port, buf, ns) _insw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
77 #define outsw(port, buf, ns) _outsw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
78 #define insl(port, buf, nl) _insl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
79 #define outsl(port, buf, nl) _outsl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
83 * On powermacs, we will get a machine check exception if we
84 * try to read data from a non-existent I/O port. Because the
85 * machine check is an asynchronous exception, it isn't
86 * well-defined which instruction SRR0 will point to when the
88 * With the sequence below (twi; isync; nop), we have found that
89 * the machine check occurs on one of the three instructions on
90 * all PPC implementations tested so far. The twi and isync are
91 * needed on the 601 (in fact twi; sync works too), the isync and
92 * nop are needed on 604[e|r], and any of twi, sync or isync will
93 * work on 603[e], 750, 74x0.
94 * The twi creates an explicit data dependency on the returned
95 * value which seems to be needed to make the 601 wait for the
99 #define __do_in_asm(name, op) \
100 extern __inline__ unsigned int name(unsigned int port) \
103 __asm__ __volatile__( \
109 ".section .fixup,\"ax\"\n" \
113 ".section __ex_table,\"a\"\n" \
120 : "r" (port + _IO_BASE)); \
124 #define __do_out_asm(name, op) \
125 extern __inline__ void name(unsigned int val, unsigned int port) \
127 __asm__ __volatile__( \
131 ".section __ex_table,\"a\"\n" \
135 : : "r" (val), "r" (port + _IO_BASE)); \
138 __do_in_asm(inb, "lbzx")
139 __do_in_asm(inw, "lhbrx")
140 __do_in_asm(inl, "lwbrx")
141 __do_out_asm(outb, "stbx")
142 __do_out_asm(outw, "sthbrx")
143 __do_out_asm(outl, "stwbrx")
145 #elif defined(CONFIG_APUS)
146 #define inb(port) in_8((u8 *)((port)+_IO_BASE))
147 #define outb(val, port) out_8((u8 *)((port)+_IO_BASE), (val))
148 #define inw(port) in_be16((u16 *)((port)+_IO_BASE))
149 #define outw(val, port) out_be16((u16 *)((port)+_IO_BASE), (val))
150 #define inl(port) in_be32((u32 *)((port)+_IO_BASE))
151 #define outl(val, port) out_be32((u32 *)((port)+_IO_BASE), (val))
153 #else /* not APUS or ALL_PPC */
154 #define inb(port) in_8((u8 *)((port)+_IO_BASE))
155 #define outb(val, port) out_8((u8 *)((port)+_IO_BASE), (val))
156 #define inw(port) in_le16((u16 *)((port)+_IO_BASE))
157 #define outw(val, port) out_le16((u16 *)((port)+_IO_BASE), (val))
158 #define inl(port) in_le32((u32 *)((port)+_IO_BASE))
159 #define outl(val, port) out_le32((u32 *)((port)+_IO_BASE), (val))
162 #define inb_p(port) inb((port))
163 #define outb_p(val, port) outb((val), (port))
164 #define inw_p(port) inw((port))
165 #define outw_p(val, port) outw((val), (port))
166 #define inl_p(port) inl((port))
167 #define outl_p(val, port) outl((val), (port))
169 extern void _insb(volatile u8 *port, void *buf, int ns);
170 extern void _outsb(volatile u8 *port, const void *buf, int ns);
171 extern void _insw(volatile u16 *port, void *buf, int ns);
172 extern void _outsw(volatile u16 *port, const void *buf, int ns);
173 extern void _insl(volatile u32 *port, void *buf, int nl);
174 extern void _outsl(volatile u32 *port, const void *buf, int nl);
175 extern void _insw_ns(volatile u16 *port, void *buf, int ns);
176 extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
177 extern void _insl_ns(volatile u32 *port, void *buf, int nl);
178 extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
181 * The *_ns versions below don't do byte-swapping.
182 * Neither do the standard versions now, these are just here
185 #define insw_ns(port, buf, ns) _insw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
186 #define outsw_ns(port, buf, ns) _outsw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
187 #define insl_ns(port, buf, nl) _insl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
188 #define outsl_ns(port, buf, nl) _outsl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
191 #define IO_SPACE_LIMIT ~0
193 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
194 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
195 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
198 * Map in an area of physical address space, for accessing
201 extern void *__ioremap(phys_addr_t address, unsigned long size,
202 unsigned long flags);
203 extern void *ioremap(phys_addr_t address, unsigned long size);
204 extern void *ioremap64(unsigned long long address, unsigned long size);
205 #define ioremap_nocache(addr, size) ioremap((addr), (size))
206 extern void iounmap(void *addr);
207 extern unsigned long iopa(unsigned long addr);
208 extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
209 extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
210 unsigned int size, int flags);
213 * This makes sure that a value has been returned from a device
214 * before any subsequent loads or stores are performed.
216 extern inline void io_flush(int value)
218 __asm__ __volatile__("twi 0,%0,0; isync" : : "r" (value));
222 * The PCI bus is inherently Little-Endian. The PowerPC is being
223 * run Big-Endian. Thus all values which cross the [PCI] barrier
224 * must be endian-adjusted. Also, the local DRAM has a different
225 * address from the PCI point of view, thus buffer addresses also
226 * have to be modified [mapped] appropriately.
228 extern inline unsigned long virt_to_bus(volatile void * address)
231 return (iopa((unsigned long) address) + PCI_DRAM_OFFSET);
233 if (address == (void *)0)
235 return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET;
239 extern inline void * bus_to_virt(unsigned long address)
242 return (void*) mm_ptov (address - PCI_DRAM_OFFSET);
246 return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
251 * Change virtual addresses to physical addresses and vv, for
252 * addresses in the area where the kernel has the RAM mapped.
254 extern inline unsigned long virt_to_phys(volatile void * address)
257 return iopa ((unsigned long) address);
259 return (unsigned long) address - KERNELBASE;
263 extern inline void * phys_to_virt(unsigned long address)
266 return (void*) mm_ptov (address);
268 return (void *) (address + KERNELBASE);
273 * Change "struct page" to physical address.
275 #define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + PPC_MEMSTART)
276 #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
279 * Enforce In-order Execution of I/O:
280 * Acts as a barrier to ensure all previous I/O accesses have
281 * completed before any further ones are issued.
283 extern inline void eieio(void)
285 __asm__ __volatile__ ("eieio" : : : "memory");
288 /* Enforce in-order execution of data I/O.
289 * No distinction between read/write on PPC; use eieio for all three.
291 #define iobarrier_rw() eieio()
292 #define iobarrier_r() eieio()
293 #define iobarrier_w() eieio()
296 * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
298 * Read operations have additional twi & isync to make sure the read
299 * is actually performed (i.e. the data has come back) before we start
300 * executing any following instructions.
302 extern inline int in_8(volatile unsigned char *addr)
306 __asm__ __volatile__(
309 "isync" : "=r" (ret) : "m" (*addr));
313 extern inline void out_8(volatile unsigned char *addr, int val)
315 __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
318 extern inline int in_le16(volatile unsigned short *addr)
322 __asm__ __volatile__("lhbrx %0,0,%1;\n"
324 "isync" : "=r" (ret) :
325 "r" (addr), "m" (*addr));
329 extern inline int in_be16(volatile unsigned short *addr)
333 __asm__ __volatile__("lhz%U1%X1 %0,%1;\n"
335 "isync" : "=r" (ret) : "m" (*addr));
339 extern inline void out_le16(volatile unsigned short *addr, int val)
341 __asm__ __volatile__("sthbrx %1,0,%2; eieio" : "=m" (*addr) :
342 "r" (val), "r" (addr));
345 extern inline void out_be16(volatile unsigned short *addr, int val)
347 __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
350 extern inline unsigned in_le32(volatile unsigned *addr)
354 __asm__ __volatile__("lwbrx %0,0,%1;\n"
356 "isync" : "=r" (ret) :
357 "r" (addr), "m" (*addr));
361 extern inline unsigned in_be32(volatile unsigned *addr)
365 __asm__ __volatile__("lwz%U1%X1 %0,%1;\n"
367 "isync" : "=r" (ret) : "m" (*addr));
371 extern inline void out_le32(volatile unsigned *addr, int val)
373 __asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) :
374 "r" (val), "r" (addr));
377 extern inline void out_be32(volatile unsigned *addr, int val)
379 __asm__ __volatile__("stw%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
382 static inline int check_signature(unsigned long io_addr,
383 const unsigned char *signature, int length)
387 if (readb(io_addr) != *signature)
398 /* Make some pcmcia drivers happy */
399 static inline int isa_check_signature(unsigned long io_addr,
400 const unsigned char *signature, int length)
405 #ifdef CONFIG_NOT_COHERENT_CACHE
408 * DMA-consistent mapping functions for PowerPCs that don't support
409 * cache snooping. These allocate/free a region of uncached mapped
410 * memory space for use with DMA devices. Alternatively, you could
411 * allocate the space "normally" and use the cache management functions
412 * to ensure it is consistent.
414 extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
415 extern void consistent_free(void *vaddr);
416 extern void consistent_sync(void *vaddr, size_t size, int rw);
417 extern void consistent_sync_page(struct page *page, unsigned long offset,
418 size_t size, int rw);
420 #define dma_cache_inv(_start,_size) \
421 invalidate_dcache_range(_start, (_start + _size))
422 #define dma_cache_wback(_start,_size) \
423 clean_dcache_range(_start, (_start + _size))
424 #define dma_cache_wback_inv(_start,_size) \
425 flush_dcache_range(_start, (_start + _size))
427 #else /* ! CONFIG_NOT_COHERENT_CACHE */
430 * Cache coherent cores.
433 #define dma_cache_inv(_start,_size) do { } while (0)
434 #define dma_cache_wback(_start,_size) do { } while (0)
435 #define dma_cache_wback_inv(_start,_size) do { } while (0)
437 #define consistent_alloc(gfp, size, handle) NULL
438 #define consistent_free(addr, size) do { } while (0)
439 #define consistent_sync(addr, size, rw) do { } while (0)
440 #define consistent_sync_page(pg, off, sz, rw) do { } while (0)
442 #endif /* CONFIG_NOT_COHERENT_CACHE */
443 #endif /* _PPC_IO_H */
444 #endif /* __KERNEL__ */