1 #ifndef __ASM_SH64_IO_H
2 #define __ASM_SH64_IO_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * include/asm-sh64/io.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
18 * read{b,w,l}/write{b,w,l} are for PCI,
19 * while in{b,w,l}/out{b,w,l} are for ISA
20 * These may (will) be platform specific function.
22 * In addition, we have
23 * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
24 * which are processor specific. Address should be the result of
28 #include <asm/cache.h>
29 #include <asm/system.h>
31 #define virt_to_bus virt_to_phys
32 #define bus_to_virt phys_to_virt
33 #define page_to_bus page_to_phys
36 * Nothing overly special here.. instead of doing the same thing
37 * over and over again, we just define a set of sh64_in/out functions
38 * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
39 * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
41 static inline unsigned char sh64_in8(unsigned long addr)
43 return *(volatile unsigned char *)addr;
46 static inline unsigned short sh64_in16(unsigned long addr)
48 return *(volatile unsigned short *)addr;
51 static inline unsigned long sh64_in32(unsigned long addr)
53 return *(volatile unsigned long *)addr;
56 static inline unsigned long long sh64_in64(unsigned long addr)
58 return *(volatile unsigned long long *)addr;
61 static inline void sh64_out8(unsigned char b, unsigned long addr)
63 *(volatile unsigned char *)addr = b;
67 static inline void sh64_out16(unsigned short b, unsigned long addr)
69 *(volatile unsigned short *)addr = b;
73 static inline void sh64_out32(unsigned long b, unsigned long addr)
75 *(volatile unsigned long *)addr = b;
79 static inline void sh64_out64(unsigned long long b, unsigned long addr)
81 *(volatile unsigned long long *)addr = b;
85 #define readb(addr) sh64_in8(addr)
86 #define readw(addr) sh64_in16(addr)
87 #define readl(addr) sh64_in32(addr)
89 #define writeb(b, addr) sh64_out8(b, addr)
90 #define writew(b, addr) sh64_out16(b, addr)
91 #define writel(b, addr) sh64_out32(b, addr)
93 #define ctrl_inb(addr) sh64_in8(addr)
94 #define ctrl_inw(addr) sh64_in16(addr)
95 #define ctrl_inl(addr) sh64_in32(addr)
97 #define ctrl_outb(b, addr) sh64_out8(b, addr)
98 #define ctrl_outw(b, addr) sh64_out16(b, addr)
99 #define ctrl_outl(b, addr) sh64_out32(b, addr)
101 unsigned long inb(unsigned long port);
102 unsigned long inw(unsigned long port);
103 unsigned long inl(unsigned long port);
104 void outb(unsigned long value, unsigned long port);
105 void outw(unsigned long value, unsigned long port);
106 void outl(unsigned long value, unsigned long port);
110 #ifdef CONFIG_SH_CAYMAN
111 extern unsigned long smsc_superio_virt;
114 extern unsigned long pciio_virt;
117 #define IO_SPACE_LIMIT 0xffffffff
120 * Change virtual addresses to physical addresses and vv.
121 * These are trivial on the 1:1 Linux/SuperH mapping
123 extern __inline__ unsigned long virt_to_phys(volatile void * address)
125 return __pa(address);
128 extern __inline__ void * phys_to_virt(unsigned long address)
130 return __va(address);
133 extern void * __ioremap(unsigned long phys_addr, unsigned long size,
134 unsigned long flags);
136 extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
138 return __ioremap(phys_addr, size, 1);
141 extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
143 return __ioremap(phys_addr, size, 0);
146 extern void iounmap(void *addr);
148 unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
149 extern void onchip_unmap(unsigned long vaddr);
151 static __inline__ int check_signature(unsigned long io_addr,
152 const unsigned char *signature, int length)
156 if (readb(io_addr) != *signature)
168 * The caches on some architectures aren't dma-coherent and have need to
169 * handle this in software. There are three types of operations that
170 * can be applied to dma buffers.
172 * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
173 * writing the content of the caches back to memory, if necessary.
174 * The function also invalidates the affected part of the caches as
175 * necessary before DMA transfers from outside to memory.
176 * - dma_cache_inv(start, size) invalidates the affected parts of the
177 * caches. Dirty lines of the caches may be written back or simply
178 * be discarded. This operation is necessary before dma operations
180 * - dma_cache_wback(start, size) writes back any dirty lines but does
181 * not invalidate the cache. This can be used before DMA reads from
186 * Implemented despite DMA is not yet supported on ST50.
188 * Also note that PCI DMA is supposed to be cache coherent,
189 * therefore these should not be used by PCI device drivers.
193 static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
195 unsigned long s = start & L1_CACHE_ALIGN_MASK;
196 unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
198 for (; s <= e; s += L1_CACHE_BYTES)
199 asm volatile ("ocbp %0, 0" : : "r" (s));
202 static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
204 // Note that caller has to be careful with overzealous
205 // invalidation should there be partial cache lines at the extremities
206 // of the specified range
207 unsigned long s = start & L1_CACHE_ALIGN_MASK;
208 unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
210 for (; s <= e; s += L1_CACHE_BYTES)
211 asm volatile ("ocbi %0, 0" : : "r" (s));
214 static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
216 unsigned long s = start & L1_CACHE_ALIGN_MASK;
217 unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
219 for (; s <= e; s += L1_CACHE_BYTES)
220 asm volatile ("ocbwb %0, 0" : : "r" (s));
223 #endif /* __KERNEL__ */
224 #endif /* __ASM_SH64_IO_H */