2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2000 FSMLabs, Inc.
14 #include <linux/config.h>
15 #include <linux/pagemap.h>
16 #include <linux/types.h>
17 #include <asm/addrspace.h>
18 #include <asm/pgtable-bits.h>
19 #include <asm/byteorder.h>
21 #ifdef CONFIG_SGI_IP27
22 extern unsigned long bus_to_baddr[256];
24 #define bus_to_baddr(hwdev, addr) (bus_to_baddr[(hwdev)->bus->number] + (addr))
25 #define baddr_to_bus(hwdev, addr) ((addr) - bus_to_baddr[(hwdev)->bus->number])
27 #define bus_to_baddr(hwdev, addr) (addr)
28 #define baddr_to_bus(hwdev, addr) (addr)
32 * Slowdown I/O port space accesses for antique hardware.
34 #undef CONF_SLOWDOWN_IO
37 * Sane hardware offers swapping of I/O space accesses in hardware; less
38 * sane hardware forces software to fiddle with this ...
40 #if defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__)
42 #define __ioswab8(x) (x)
43 #ifdef CONFIG_SGI_IP22
44 /* IP22 seems braindead enough to swap 16bits values in hardware, but
45 not 32bits. Go figure... Can't tell without documentation. */
46 #define __ioswab16(x) (x)
48 #define __ioswab16(x) swab16(x)
50 #define __ioswab32(x) swab32(x)
54 #define __ioswab8(x) (x)
55 #define __ioswab16(x) (x)
56 #define __ioswab32(x) (x)
61 * <Bacchus> Historically I wrote this stuff the same way as Linus did
62 * because I was young and clueless. And now it's so jucky that I
63 * don't want to put my eyes on it again to get rid of it :-)
65 * I'll do it then, because this code offends both me and my compiler
66 * - particularly the bits of inline asm which end up doing crap like
67 * 'lb $2,$2($5)' -- dwmw2
70 #define IO_SPACE_LIMIT 0xffff
73 * On MIPS I/O ports are memory mapped, so we access them using normal
74 * load/store instructions. mips_io_port_base is the virtual address to
75 * which all ports are being mapped. For sake of efficiency some code
76 * assumes that this is an address that can be loaded with a single lui
77 * instruction, so the lower 16 bits must be zero. Should be true on
78 * on any sane architecture; generic code does not use this assumption.
80 extern const unsigned long mips_io_port_base;
82 #define set_io_port_base(base) \
83 do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
86 * Thanks to James van Artsdalen for a better timing-fix than
87 * the two short jumps: using outb's to a nonexistent port seems
88 * to guarantee better timings even on fast machines.
90 * On the other hand, I'd like to be sure of a non-existent port:
91 * I feel a bit unsafe about using 0x80 (should be safe, though)
97 #define __SLOW_DOWN_IO \
98 __asm__ __volatile__( \
100 : : "r" (mips_io_port_base));
102 #ifdef CONF_SLOWDOWN_IO
103 #ifdef REALLY_SLOW_IO
104 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
106 #define SLOW_DOWN_IO __SLOW_DOWN_IO
113 * virt_to_phys - map virtual addresses to physical
114 * @address: address to remap
116 * The returned physical address is the physical (CPU) mapping for
117 * the memory address given. It is only valid to use this function on
118 * addresses directly mapped or allocated via kmalloc.
120 * This function does not give bus mappings for DMA transfers. In
121 * almost all conceivable cases a device driver should not be using
125 static inline unsigned long virt_to_phys(volatile void * address)
127 return PHYSADDR(address);
131 * phys_to_virt - map physical address to virtual
132 * @address: address to remap
134 * The returned virtual address is a current CPU mapping for
135 * the memory address given. It is only valid to use this function on
136 * addresses that have a kernel mapping
138 * This function does not handle bus mappings for DMA transfers. In
139 * almost all conceivable cases a device driver should not be using
143 static inline void * phys_to_virt(unsigned long address)
145 return (void *)KSEG0ADDR(address);
149 * IO bus memory addresses are also 1:1 with the physical address
151 static inline unsigned long virt_to_bus(volatile void * address)
153 return PHYSADDR(address);
156 static inline void * bus_to_virt(unsigned long address)
158 return (void *)KSEG0ADDR(address);
161 #define page_to_bus page_to_phys
164 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
167 extern unsigned long isa_slot_offset;
170 * Change "struct page" to physical address.
172 #ifdef CONFIG_64BIT_PHYS_ADDR
173 #define page_to_phys(page) ((u64)(page - mem_map) << PAGE_SHIFT)
175 #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
179 extern void * __ioremap(phys_t offset, phys_t size, unsigned long flags);
182 * ioremap - map bus memory into CPU space
183 * @offset: bus address of the memory
184 * @size: size of the resource to map
186 * ioremap performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
193 #define ioremap(offset, size) \
194 __ioremap((offset), (size), _CACHE_UNCACHED)
197 * ioremap_nocache - map bus memory into CPU space
198 * @offset: bus address of the memory
199 * @size: size of the resource to map
201 * ioremap_nocache performs a platform specific sequence of operations to
202 * make bus memory CPU accessible via the readb/readw/readl/writeb/
203 * writew/writel functions and the other mmio helpers. The returned
204 * address is not guaranteed to be usable directly as a virtual
207 * This version of ioremap ensures that the memory is marked uncachable
208 * on the CPU as well as honouring existing caching rules from things like
209 * the PCI bus. Note that there are other caches and buffers on many
210 * busses. In paticular driver authors should read up on PCI writes
212 * It's useful if some control registers are in such an area and
213 * write combining or read caching is not desirable:
215 #define ioremap_nocache(offset, size) \
216 __ioremap((offset), (size), _CACHE_UNCACHED)
217 #define ioremap_cacheable_cow(offset, size) \
218 __ioremap((offset), (size), _CACHE_CACHABLE_COW)
219 #define ioremap_uncached_accelerated(offset, size) \
220 __ioremap((offset), (size), _CACHE_UNCACHED_ACCELERATED)
222 extern void iounmap(void *addr);
225 * XXX We need system specific versions of these to handle EISA address bits
227 * XXX more SNI hacks.
229 #define readb(addr) (*(volatile unsigned char *)(addr))
230 #define readw(addr) __ioswab16((*(volatile unsigned short *)(addr)))
231 #define readl(addr) __ioswab32((*(volatile unsigned int *)(addr)))
233 #define __raw_readb(addr) (*(volatile unsigned char *)(addr))
234 #define __raw_readw(addr) (*(volatile unsigned short *)(addr))
235 #define __raw_readl(addr) (*(volatile unsigned int *)(addr))
237 #define writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (__ioswab8(b)))
238 #define writew(b,addr) ((*(volatile unsigned short *)(addr)) = (__ioswab16(b)))
239 #define writel(b,addr) ((*(volatile unsigned int *)(addr)) = (__ioswab32(b)))
241 #define __raw_writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (b))
242 #define __raw_writew(w,addr) ((*(volatile unsigned short *)(addr)) = (w))
243 #define __raw_writel(l,addr) ((*(volatile unsigned int *)(addr)) = (l))
245 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
246 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
247 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
250 * ISA space is 'always mapped' on currently supported MIPS systems, no need
251 * to explicitly ioremap() it. The fact that the ISA IO space is mapped
252 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
253 * are physical addresses. The following constant pointer can be
254 * used as the IO-area pointer (it can be iounmapped as well, so the
255 * analogy with PCI is quite large):
257 #define __ISA_IO_base ((char *)(isa_slot_offset))
259 #define isa_readb(a) readb(__ISA_IO_base + (a))
260 #define isa_readw(a) readw(__ISA_IO_base + (a))
261 #define isa_readl(a) readl(__ISA_IO_base + (a))
262 #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
263 #define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
264 #define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
265 #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
266 #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
267 #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
270 * We don't have csum_partial_copy_fromio() yet, so we cheat here and
271 * just copy it. The net code will then do the checksum later.
273 #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
274 #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
277 * check_signature - find BIOS signatures
278 * @io_addr: mmio address to check
279 * @signature: signature block
280 * @length: length of signature
282 * Perform a signature comparison with the mmio address io_addr. This
283 * address should have been obtained by ioremap.
284 * Returns 1 on a match.
286 static inline int check_signature(unsigned long io_addr,
287 const unsigned char *signature, int length)
291 if (readb(io_addr) != *signature)
303 * isa_check_signature - find BIOS signatures
304 * @io_addr: mmio address to check
305 * @signature: signature block
306 * @length: length of signature
308 * Perform a signature comparison with the ISA mmio address io_addr.
309 * Returns 1 on a match.
311 * This function is deprecated. New drivers should use ioremap and
315 static inline int isa_check_signature(unsigned long io_addr,
316 const unsigned char *signature, int length)
320 if (isa_readb(io_addr) != *signature)
334 #define outb(val,port) \
336 *(volatile u8 *)(mips_io_port_base + (port)) = __ioswab8(val); \
339 #define outw(val,port) \
341 *(volatile u16 *)(mips_io_port_base + (port)) = __ioswab16(val); \
344 #define outl(val,port) \
346 *(volatile u32 *)(mips_io_port_base + (port)) = __ioswab32(val);\
349 #define outb_p(val,port) \
351 *(volatile u8 *)(mips_io_port_base + (port)) = __ioswab8(val); \
355 #define outw_p(val,port) \
357 *(volatile u16 *)(mips_io_port_base + (port)) = __ioswab16(val);\
361 #define outl_p(val,port) \
363 *(volatile u32 *)(mips_io_port_base + (port)) = __ioswab32(val);\
367 #define inb(port) __inb(port)
368 #define inw(port) __inw(port)
369 #define inl(port) __inl(port)
370 #define inb_p(port) __inb_p(port)
371 #define inw_p(port) __inw_p(port)
372 #define inl_p(port) __inl_p(port)
374 static inline unsigned char __inb(unsigned long port)
376 return __ioswab8(*(volatile u8 *)(mips_io_port_base + port));
379 static inline unsigned short __inw(unsigned long port)
381 return __ioswab16(*(volatile u16 *)(mips_io_port_base + port));
384 static inline unsigned int __inl(unsigned long port)
386 return __ioswab32(*(volatile u32 *)(mips_io_port_base + port));
389 static inline unsigned char __inb_p(unsigned long port)
393 __val = *(volatile u8 *)(mips_io_port_base + port);
396 return __ioswab8(__val);
399 static inline unsigned short __inw_p(unsigned long port)
403 __val = *(volatile u16 *)(mips_io_port_base + port);
406 return __ioswab16(__val);
409 static inline unsigned int __inl_p(unsigned long port)
413 __val = *(volatile u32 *)(mips_io_port_base + port);
415 return __ioswab32(__val);
418 #define outsb(port, addr, count) __outsb(port, addr, count)
419 #define insb(port, addr, count) __insb(port, addr, count)
420 #define outsw(port, addr, count) __outsw(port, addr, count)
421 #define insw(port, addr, count) __insw(port, addr, count)
422 #define outsl(port, addr, count) __outsl(port, addr, count)
423 #define insl(port, addr, count) __insl(port, addr, count)
425 static inline void __outsb(unsigned long port, void *addr, unsigned int count)
428 outb(*(u8 *)addr, port);
433 static inline void __insb(unsigned long port, void *addr, unsigned int count)
436 *(u8 *)addr = inb(port);
441 static inline void __outsw(unsigned long port, void *addr, unsigned int count)
444 outw(*(u16 *)addr, port);
449 static inline void __insw(unsigned long port, void *addr, unsigned int count)
452 *(u16 *)addr = inw(port);
457 static inline void __outsl(unsigned long port, void *addr, unsigned int count)
460 outl(*(u32 *)addr, port);
465 static inline void __insl(unsigned long port, void *addr, unsigned int count)
468 *(u32 *)addr = inl(port);
474 * The caches on some architectures aren't dma-coherent and have need to
475 * handle this in software. There are three types of operations that
476 * can be applied to dma buffers.
478 * - dma_cache_wback_inv(start, size) makes caches and coherent by
479 * writing the content of the caches back to memory, if necessary.
480 * The function also invalidates the affected part of the caches as
481 * necessary before DMA transfers from outside to memory.
482 * - dma_cache_wback(start, size) makes caches and coherent by
483 * writing the content of the caches back to memory, if necessary.
484 * The function also invalidates the affected part of the caches as
485 * necessary before DMA transfers from outside to memory.
486 * - dma_cache_inv(start, size) invalidates the affected parts of the
487 * caches. Dirty lines of the caches may be written back or simply
488 * be discarded. This operation is necessary before dma operations
491 #ifdef CONFIG_NONCOHERENT_IO
493 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
494 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
495 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
497 #define dma_cache_wback_inv(start,size) _dma_cache_wback_inv(start,size)
498 #define dma_cache_wback(start,size) _dma_cache_wback(start,size)
499 #define dma_cache_inv(start,size) _dma_cache_inv(start,size)
501 #else /* Sane hardware */
503 #define dma_cache_wback_inv(start,size) do { (start); (size); } while (0)
504 #define dma_cache_wback(start,size) do { (start); (size); } while (0)
505 #define dma_cache_inv(start,size) do { (start); (size); } while (0)
507 #endif /* CONFIG_NONCOHERENT_IO */
509 #endif /* _ASM_IO_H */