1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23 Source: "Pentium Pro Family Developer's Manual, Volume 3:
24 Operating System Writer's Guide" (Intel document number 242692),
29 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
30 Initial register-setting code (from proform-1.0).
31 19971216 Richard Gooch <rgooch@atnf.csiro.au>
32 Original version for /proc/mtrr interface, SMP-safe.
34 19971217 Richard Gooch <rgooch@atnf.csiro.au>
35 Bug fix for ioctls()'s.
36 Added sample code in Documentation/mtrr.txt
38 19971218 Richard Gooch <rgooch@atnf.csiro.au>
39 Disallow overlapping regions.
40 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
41 Register-setting fixups.
43 19971222 Richard Gooch <rgooch@atnf.csiro.au>
44 Fixups for kernel 2.1.75.
46 19971229 David Wragg <dpw@doc.ic.ac.uk>
47 Register-setting fixups and conformity with Intel conventions.
48 19971229 Richard Gooch <rgooch@atnf.csiro.au>
49 Cosmetic changes and wrote this ChangeLog ;-)
50 19980106 Richard Gooch <rgooch@atnf.csiro.au>
51 Fixups for kernel 2.1.78.
53 19980119 David Wragg <dpw@doc.ic.ac.uk>
54 Included passive-release enable code (elsewhere in PCI setup).
56 19980131 Richard Gooch <rgooch@atnf.csiro.au>
57 Replaced global kernel lock with private spinlock.
59 19980201 Richard Gooch <rgooch@atnf.csiro.au>
60 Added wait for other CPUs to complete changes.
62 19980202 Richard Gooch <rgooch@atnf.csiro.au>
63 Bug fix in definition of <set_mtrr> for UP.
65 19980319 Richard Gooch <rgooch@atnf.csiro.au>
66 Fixups for kernel 2.1.90.
67 19980323 Richard Gooch <rgooch@atnf.csiro.au>
68 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
70 19980325 Richard Gooch <rgooch@atnf.csiro.au>
71 Fixed test for overlapping regions: confused by adjacent regions
72 19980326 Richard Gooch <rgooch@atnf.csiro.au>
73 Added wbinvd in <set_mtrr_prepare>.
74 19980401 Richard Gooch <rgooch@atnf.csiro.au>
75 Bug fix for non-SMP compilation.
76 19980418 David Wragg <dpw@doc.ic.ac.uk>
77 Fixed-MTRR synchronisation for SMP and use atomic operations
79 19980418 Richard Gooch <rgooch@atnf.csiro.au>
80 Differentiate different MTRR register classes for BIOS fixup.
82 19980419 David Wragg <dpw@doc.ic.ac.uk>
83 Bug fix in variable MTRR synchronisation.
85 19980419 Richard Gooch <rgooch@atnf.csiro.au>
86 Fixups for kernel 2.1.97.
88 19980421 Richard Gooch <rgooch@atnf.csiro.au>
89 Safer synchronisation across CPUs when changing MTRRs.
91 19980423 Richard Gooch <rgooch@atnf.csiro.au>
92 Bugfix for SMP systems without MTRR support.
94 19980427 Richard Gooch <rgooch@atnf.csiro.au>
95 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
97 19980427 Richard Gooch <rgooch@atnf.csiro.au>
98 Use atomic bitops for setting SMP change mask.
100 19980428 Richard Gooch <rgooch@atnf.csiro.au>
101 Removed spurious diagnostic message.
103 19980429 Richard Gooch <rgooch@atnf.csiro.au>
104 Moved register-setting macros into this file.
105 Moved setup code from init/main.c to i386-specific areas.
107 19980502 Richard Gooch <rgooch@atnf.csiro.au>
108 Moved MTRR detection outside conditionals in <mtrr_init>.
110 19980502 Richard Gooch <rgooch@atnf.csiro.au>
111 Documentation improvement: mention Pentium II and AGP.
113 19980521 Richard Gooch <rgooch@atnf.csiro.au>
114 Only manipulate interrupt enable flag on local CPU.
115 Allow enclosed uncachable regions.
117 19980611 Richard Gooch <rgooch@atnf.csiro.au>
118 Always define <main_lock>.
120 19980901 Richard Gooch <rgooch@atnf.csiro.au>
121 Removed module support in order to tidy up code.
122 Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
123 Created addition queue for prior to SMP commence.
125 19980902 Richard Gooch <rgooch@atnf.csiro.au>
126 Ported patch to kernel 2.1.120-pre3.
128 19980910 Richard Gooch <rgooch@atnf.csiro.au>
129 Removed sanity checks and addition queue: Linus prefers an OOPS.
131 19981001 Richard Gooch <rgooch@atnf.csiro.au>
132 Fixed harmless compiler warning in include/asm-i386/mtrr.h
133 Fixed version numbering and history for v1.23 -> v1.24.
135 19990118 Richard Gooch <rgooch@atnf.csiro.au>
138 19990123 Richard Gooch <rgooch@atnf.csiro.au>
139 Changed locking to spin with reschedule.
140 Made use of new <smp_call_function>.
142 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu>
143 Extended the driver to be able to use Cyrix style ARRs.
144 19990204 Richard Gooch <rgooch@atnf.csiro.au>
145 Restructured Cyrix support.
147 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu>
148 Refined ARR support: enable MAPEN in set_mtrr_prepare()
149 and disable MAPEN in set_mtrr_done().
150 19990205 Richard Gooch <rgooch@atnf.csiro.au>
153 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu>
154 Protect plain 6x86s (and other processors without the
155 Page Global Enable feature) against accessing CR4 in
156 set_mtrr_prepare() and set_mtrr_done().
157 19990210 Richard Gooch <rgooch@atnf.csiro.au>
158 Turned <set_mtrr_up> and <get_mtrr> into function pointers.
160 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu>
161 Major rewrite of cyrix_arr_init(): do not touch ARRs,
162 leave them as the BIOS have set them up.
163 Enable usage of all 8 ARRs.
164 Avoid multiplications by 3 everywhere and other
165 code clean ups/speed ups.
166 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu>
167 Set up other Cyrix processors identical to the boot cpu.
168 Since Cyrix don't support Intel APIC, this is l'art pour l'art.
170 If size <= 32M is given, set up ARR# we were given.
171 If size > 32M is given, set up ARR7 only if it is free,
173 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu>
174 Also check for size >= 256K if we are to set up ARR7,
175 mtrr_add() returns the value it gets from set_mtrr()
176 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu>
177 Remove Cyrix "coma bug" workaround from here.
178 Moved to linux/arch/i386/kernel/setup.c and
179 linux/include/asm-i386/bugs.h
180 19990228 Richard Gooch <rgooch@atnf.csiro.au>
181 Added MTRRIOC_KILL_ENTRY ioctl(2)
182 Trap for counter underflow in <mtrr_file_del>.
183 Trap for 4 MiB aligned regions for PPro, stepping <= 7.
184 19990301 Richard Gooch <rgooch@atnf.csiro.au>
185 Created <get_free_region> hook.
186 19990305 Richard Gooch <rgooch@atnf.csiro.au>
187 Temporarily disable AMD support now MTRR capability flag is set.
189 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu>
190 Adjust my changes (19990212-19990218) to Richard Gooch's
191 latest changes. (19990228-19990305)
193 19990309 Richard Gooch <rgooch@atnf.csiro.au>
194 Fixed typo in <printk> message.
195 19990310 Richard Gooch <rgooch@atnf.csiro.au>
196 Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
198 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
199 Support Centaur C6 MCR's.
200 19990512 Richard Gooch <rgooch@atnf.csiro.au>
203 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu>
204 Check whether ARR3 is protected in cyrix_get_free_region()
205 and mtrr_del(). The code won't attempt to delete or change it
206 from now on if the BIOS protected ARR3. It silently skips ARR3
207 in cyrix_get_free_region() or returns with an error code from
209 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu>
210 Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
211 if ARR3 isn't protected. This is needed because if SMM is active
212 and ARR3 isn't protected then deleting and setting ARR3 again
213 may lock up the processor. With SMM entirely disabled, it does
215 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu>
216 Rearrange switch() statements so the driver accomodates to
217 the fact that the AMD Athlon handles its MTRRs the same way
219 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu>
220 Double check for Intel in mtrr_add()'s big switch() because
221 that revision check is only valid for Intel CPUs.
222 19990819 Alan Cox <alan@redhat.com>
223 Tested Zoltan's changes on a pre production Athlon - 100%
225 19991008 Manfred Spraul <manfreds@colorfullife.com>
226 replaced spin_lock_reschedule() with a normal semaphore.
228 20000221 Richard Gooch <rgooch@atnf.csiro.au>
229 Compile fix if procfs and devfs not enabled.
232 20001109 H. Peter Anvin <hpa@zytor.com>
233 Use the new centralized CPU feature detects.
236 20010309 Dave Jones <davej@suse.de>
237 Add support for Cyrix III.
240 20010312 Dave Jones <davej@suse.de>
241 Ugh, I broke AMD support.
242 Reworked fix by Troels Walsted Hansen <troels@thule.no>
245 20010327 Dave Jones <davej@suse.de>
246 Adapted Cyrix III support to include VIA C3.
249 #include <linux/types.h>
250 #include <linux/errno.h>
251 #include <linux/sched.h>
252 #include <linux/tty.h>
253 #include <linux/timer.h>
254 #include <linux/config.h>
255 #include <linux/kernel.h>
256 #include <linux/wait.h>
257 #include <linux/string.h>
258 #include <linux/slab.h>
259 #include <linux/ioport.h>
260 #include <linux/delay.h>
261 #include <linux/fs.h>
262 #include <linux/ctype.h>
263 #include <linux/proc_fs.h>
264 #include <linux/devfs_fs_kernel.h>
265 #include <linux/mm.h>
266 #include <linux/module.h>
267 #include <linux/pci.h>
268 #define MTRR_NEED_STRINGS
269 #include <asm/mtrr.h>
270 #include <linux/init.h>
271 #include <linux/smp.h>
272 #include <linux/smp_lock.h>
274 #include <asm/uaccess.h>
276 #include <asm/processor.h>
277 #include <asm/system.h>
278 #include <asm/pgtable.h>
279 #include <asm/segment.h>
280 #include <asm/bitops.h>
281 #include <asm/atomic.h>
284 #include <asm/hardirq.h>
285 #include <linux/irq.h>
287 #define MTRR_VERSION "1.40 (20010327)"
293 * The code assumes all processors support the same MTRR
294 * interface. This is generally a good assumption, but could
295 * potentially be a problem.
298 MTRR_IF_NONE, /* No MTRRs supported */
299 MTRR_IF_INTEL, /* Intel (P6) standard MTRRs */
300 MTRR_IF_AMD_K6, /* AMD pre-Athlon MTRRs */
301 MTRR_IF_CYRIX_ARR, /* Cyrix ARRs */
302 MTRR_IF_CENTAUR_MCR, /* Centaur MCRs */
303 } mtrr_if = MTRR_IF_NONE;
305 static __initdata char *mtrr_if_name[] = {
306 "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
309 #define MTRRcap_MSR 0x0fe
310 #define MTRRdefType_MSR 0x2ff
312 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
313 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
315 #define NUM_FIXED_RANGES 88
316 #define MTRRfix64K_00000_MSR 0x250
317 #define MTRRfix16K_80000_MSR 0x258
318 #define MTRRfix16K_A0000_MSR 0x259
319 #define MTRRfix4K_C0000_MSR 0x268
320 #define MTRRfix4K_C8000_MSR 0x269
321 #define MTRRfix4K_D0000_MSR 0x26a
322 #define MTRRfix4K_D8000_MSR 0x26b
323 #define MTRRfix4K_E0000_MSR 0x26c
324 #define MTRRfix4K_E8000_MSR 0x26d
325 #define MTRRfix4K_F0000_MSR 0x26e
326 #define MTRRfix4K_F8000_MSR 0x26f
329 # define MTRR_CHANGE_MASK_FIXED 0x01
330 # define MTRR_CHANGE_MASK_VARIABLE 0x02
331 # define MTRR_CHANGE_MASK_DEFTYPE 0x04
334 /* In the Intel processor's MTRR interface, the MTRR type is always held in
336 typedef u8 mtrr_type;
339 #define JIFFIE_TIMEOUT 100
342 # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
344 # define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
348 #if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS)
349 # define USERSPACE_INTERFACE
352 #ifndef USERSPACE_INTERFACE
353 # define compute_ascii() while (0)
356 #ifdef USERSPACE_INTERFACE
357 static char *ascii_buffer;
358 static unsigned int ascii_buf_bytes;
360 static unsigned int *usage_table;
361 static DECLARE_MUTEX(main_lock);
363 /* Private functions */
364 #ifdef USERSPACE_INTERFACE
365 static void compute_ascii (void);
369 struct set_mtrr_context
372 unsigned long deftype_lo;
373 unsigned long deftype_hi;
374 unsigned long cr4val;
378 static int arr3_protected;
380 /* Put the processor into a state where MTRRs can be safely set */
381 static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
383 /* Disable interrupts locally */
384 __save_flags (ctxt->flags); __cli ();
386 if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
389 /* Save value of CR4 and clear Page Global Enable (bit 7) */
390 if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) {
391 ctxt->cr4val = read_cr4();
392 write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7));
395 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
398 unsigned int cr0 = read_cr0() | 0x40000000;
404 if ( mtrr_if == MTRR_IF_INTEL ) {
405 /* Save MTRR state */
406 rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
408 /* Cyrix ARRs - everything else were excluded at the top */
409 ctxt->ccr3 = getCx86 (CX86_CCR3);
411 } /* End Function set_mtrr_prepare_save */
413 static void set_mtrr_disable (struct set_mtrr_context *ctxt)
415 if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
418 if ( mtrr_if == MTRR_IF_INTEL ) {
419 /* Disable MTRRs, and set the default type to uncached */
420 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
422 /* Cyrix ARRs - everything else were excluded at the top */
423 setCx86 (CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
425 } /* End Function set_mtrr_disable */
427 /* Restore the processor after a set_mtrr_prepare */
428 static void set_mtrr_done (struct set_mtrr_context *ctxt)
430 if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR ) {
431 __restore_flags (ctxt->flags);
435 /* Flush caches and TLBs */
438 /* Restore MTRRdefType */
439 if ( mtrr_if == MTRR_IF_INTEL ) {
440 /* Intel (P6) standard MTRRs */
441 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
443 /* Cyrix ARRs - everything else was excluded at the top */
444 setCx86 (CX86_CCR3, ctxt->ccr3);
448 write_cr0( read_cr0() & 0xbfffffff );
450 /* Restore value of CR4 */
451 if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
452 write_cr4(ctxt->cr4val);
454 /* Re-enable interrupts locally (if enabled previously) */
455 __restore_flags (ctxt->flags);
456 } /* End Function set_mtrr_done */
458 /* This function returns the number of variable MTRRs */
459 static unsigned int get_num_var_ranges (void)
461 unsigned long config, dummy;
466 rdmsr (MTRRcap_MSR, config, dummy);
467 return (config & 0xff);
470 case MTRR_IF_CYRIX_ARR:
472 case MTRR_IF_CENTAUR_MCR:
477 } /* End Function get_num_var_ranges */
479 /* Returns non-zero if we have the write-combining memory type */
480 static int have_wrcomb (void)
482 unsigned long config, dummy;
483 struct pci_dev *dev = NULL;
485 /* ServerWorks LE chipsets have problems with write-combining
486 Don't allow it and leave room for other chipsets to be tagged */
488 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
489 if ((dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
490 (dev->device == PCI_DEVICE_ID_SERVERWORKS_LE)) {
491 printk (KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
495 /* Intel 450NX errata # 23. Non ascending cachline evictions to
496 write combining memory may resulting in data corruption */
497 dev = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, NULL);
500 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
507 rdmsr (MTRRcap_MSR, config, dummy);
508 return (config & (1<<10));
511 case MTRR_IF_CENTAUR_MCR:
512 case MTRR_IF_CYRIX_ARR:
517 } /* End Function have_wrcomb */
519 static u32 size_or_mask, size_and_mask;
521 static void intel_get_mtrr (unsigned int reg, unsigned long *base,
522 unsigned long *size, mtrr_type *type)
524 unsigned long mask_lo, mask_hi, base_lo, base_hi;
526 rdmsr (MTRRphysMask_MSR(reg), mask_lo, mask_hi);
527 if ( (mask_lo & 0x800) == 0 )
529 /* Invalid (i.e. free) range */
536 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
538 /* Work out the shifted address mask. */
539 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
540 | mask_lo >> PAGE_SHIFT;
542 /* This works correctly if size is a power of two, i.e. a
545 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
546 *type = base_lo & 0xff;
547 } /* End Function intel_get_mtrr */
549 static void cyrix_get_arr (unsigned int reg, unsigned long *base,
550 unsigned long *size, mtrr_type *type)
553 unsigned char arr, ccr3, rcr, shift;
555 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
557 /* Save flags and disable interrupts */
558 __save_flags (flags); __cli ();
560 ccr3 = getCx86 (CX86_CCR3);
561 setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
562 ((unsigned char *) base)[3] = getCx86 (arr);
563 ((unsigned char *) base)[2] = getCx86 (arr+1);
564 ((unsigned char *) base)[1] = getCx86 (arr+2);
565 rcr = getCx86(CX86_RCR_BASE + reg);
566 setCx86 (CX86_CCR3, ccr3); /* disable MAPEN */
568 /* Enable interrupts if it was enabled previously */
569 __restore_flags (flags);
570 shift = ((unsigned char *) base)[1] & 0x0f;
571 *base >>= PAGE_SHIFT;
573 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
574 * Note: shift==0xf means 4G, this is unsupported.
577 *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
581 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
586 case 1: *type = MTRR_TYPE_UNCACHABLE; break;
587 case 8: *type = MTRR_TYPE_WRBACK; break;
588 case 9: *type = MTRR_TYPE_WRCOMB; break;
590 default: *type = MTRR_TYPE_WRTHROUGH; break;
596 case 0: *type = MTRR_TYPE_UNCACHABLE; break;
597 case 8: *type = MTRR_TYPE_WRCOMB; break;
598 case 9: *type = MTRR_TYPE_WRBACK; break;
600 default: *type = MTRR_TYPE_WRTHROUGH; break;
603 } /* End Function cyrix_get_arr */
605 static void amd_get_mtrr (unsigned int reg, unsigned long *base,
606 unsigned long *size, mtrr_type *type)
608 unsigned long low, high;
610 rdmsr (MSR_K6_UWCCR, low, high);
611 /* Upper dword is region 1, lower is region 0 */
612 if (reg == 1) low = high;
613 /* The base masks off on the right alignment */
614 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
616 if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
617 if (low & 2) *type = MTRR_TYPE_WRCOMB;
624 * This needs a little explaining. The size is stored as an
625 * inverted mask of bits of 128K granularity 15 bits long offset
628 * So to get a size we do invert the mask and add 1 to the lowest
629 * mask bit (4 as its 2 bits in). This gives us a size we then shift
630 * to turn into 128K blocks
632 * eg 111 1111 1111 1100 is 512K
634 * invert 000 0000 0000 0011
635 * +1 000 0000 0000 0100
638 low = (~low) & 0x1FFFC;
639 *size = (low + 4) << (15 - PAGE_SHIFT);
641 } /* End Function amd_get_mtrr */
649 static u8 centaur_mcr_reserved;
650 static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
653 * Report boot time MCR setups
656 void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
658 centaur_mcr[mcr].low = lo;
659 centaur_mcr[mcr].high = hi;
662 static void centaur_get_mcr (unsigned int reg, unsigned long *base,
663 unsigned long *size, mtrr_type *type)
665 *base = centaur_mcr[reg].high >> PAGE_SHIFT;
666 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
667 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
668 if(centaur_mcr_type==1 && ((centaur_mcr[reg].low&31)&2))
669 *type = MTRR_TYPE_UNCACHABLE;
670 if(centaur_mcr_type==1 && (centaur_mcr[reg].low&31)==25)
671 *type = MTRR_TYPE_WRBACK;
672 if(centaur_mcr_type==0 && (centaur_mcr[reg].low&31)==31)
673 *type = MTRR_TYPE_WRBACK;
675 } /* End Function centaur_get_mcr */
677 static void (*get_mtrr) (unsigned int reg, unsigned long *base,
678 unsigned long *size, mtrr_type *type);
680 static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
681 unsigned long size, mtrr_type type, int do_safe)
682 /* [SUMMARY] Set variable MTRR register on the local CPU.
683 <reg> The register to set.
684 <base> The base address of the region.
685 <size> The size of the region. If this is 0 the region is disabled.
686 <type> The type of the region.
687 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
692 struct set_mtrr_context ctxt;
695 set_mtrr_prepare_save (&ctxt);
696 set_mtrr_disable (&ctxt);
700 /* The invalid bit is kept in the mask, so we simply clear the
701 relevant mask register to disable a range. */
702 wrmsr (MTRRphysMask_MSR (reg), 0, 0);
706 wrmsr (MTRRphysBase_MSR (reg), base << PAGE_SHIFT | type,
707 (base & size_and_mask) >> (32 - PAGE_SHIFT));
708 wrmsr (MTRRphysMask_MSR (reg), -size << PAGE_SHIFT | 0x800,
709 (-size & size_and_mask) >> (32 - PAGE_SHIFT));
711 if (do_safe) set_mtrr_done (&ctxt);
712 } /* End Function intel_set_mtrr_up */
714 static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
715 unsigned long size, mtrr_type type, int do_safe)
717 struct set_mtrr_context ctxt;
718 unsigned char arr, arr_type, arr_size;
720 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
722 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
726 size &= 0x7fff; /* make sure arr_size <= 14 */
727 for(arr_size = 0; size; arr_size++, size >>= 1);
732 case MTRR_TYPE_UNCACHABLE: arr_type = 1; break;
733 case MTRR_TYPE_WRCOMB: arr_type = 9; break;
734 case MTRR_TYPE_WRTHROUGH: arr_type = 24; break;
735 default: arr_type = 8; break;
742 case MTRR_TYPE_UNCACHABLE: arr_type = 0; break;
743 case MTRR_TYPE_WRCOMB: arr_type = 8; break;
744 case MTRR_TYPE_WRTHROUGH: arr_type = 25; break;
745 default: arr_type = 9; break;
750 set_mtrr_prepare_save (&ctxt);
751 set_mtrr_disable (&ctxt);
754 setCx86(arr, ((unsigned char *) &base)[3]);
755 setCx86(arr+1, ((unsigned char *) &base)[2]);
756 setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
757 setCx86(CX86_RCR_BASE + reg, arr_type);
758 if (do_safe) set_mtrr_done (&ctxt);
759 } /* End Function cyrix_set_arr_up */
761 static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
762 unsigned long size, mtrr_type type, int do_safe)
763 /* [SUMMARY] Set variable MTRR register on the local CPU.
764 <reg> The register to set.
765 <base> The base address of the region.
766 <size> The size of the region. If this is 0 the region is disabled.
767 <type> The type of the region.
768 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
774 struct set_mtrr_context ctxt;
777 set_mtrr_prepare_save (&ctxt);
778 set_mtrr_disable (&ctxt);
781 * Low is MTRR0 , High MTRR 1
783 rdmsr (MSR_K6_UWCCR, regs[0], regs[1]);
790 /* Set the register to the base, the type (off by one) and an
791 inverted bitmask of the size The size is the only odd
792 bit. We are fed say 512K We invert this and we get 111 1111
793 1111 1011 but if you subtract one and invert you get the
794 desired 111 1111 1111 1100 mask
796 But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
797 regs[reg] = (-size>>(15-PAGE_SHIFT) & 0x0001FFFC)
798 | (base<<PAGE_SHIFT) | (type+1);
801 * The writeback rule is quite specific. See the manual. Its
802 * disable local interrupts, write back the cache, set the mtrr
805 wrmsr (MSR_K6_UWCCR, regs[0], regs[1]);
806 if (do_safe) set_mtrr_done (&ctxt);
807 } /* End Function amd_set_mtrr_up */
810 static void centaur_set_mcr_up (unsigned int reg, unsigned long base,
811 unsigned long size, mtrr_type type,
814 struct set_mtrr_context ctxt;
815 unsigned long low, high;
818 set_mtrr_prepare_save (&ctxt);
819 set_mtrr_disable (&ctxt);
828 high = base << PAGE_SHIFT;
829 if(centaur_mcr_type == 0)
830 low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
833 if(type == MTRR_TYPE_UNCACHABLE)
834 low = -size << PAGE_SHIFT | 0x02; /* NC */
836 low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */
839 centaur_mcr[reg].high = high;
840 centaur_mcr[reg].low = low;
841 wrmsr (MSR_IDT_MCR0 + reg, low, high);
842 if (do_safe) set_mtrr_done( &ctxt );
843 } /* End Function centaur_set_mtrr_up */
845 static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
846 unsigned long size, mtrr_type type,
851 struct mtrr_var_range
853 unsigned long base_lo;
854 unsigned long base_hi;
855 unsigned long mask_lo;
856 unsigned long mask_hi;
860 /* Get the MSR pair relating to a var range */
861 static void __init get_mtrr_var_range (unsigned int index,
862 struct mtrr_var_range *vr)
864 rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
865 rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
866 } /* End Function get_mtrr_var_range */
869 /* Set the MSR pair relating to a var range. Returns TRUE if
871 static int __init set_mtrr_var_range_testing (unsigned int index,
872 struct mtrr_var_range *vr)
877 rdmsr(MTRRphysBase_MSR(index), lo, hi);
878 if ( (vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
879 || (vr->base_hi & 0xfUL) != (hi & 0xfUL) )
881 wrmsr (MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
885 rdmsr (MTRRphysMask_MSR(index), lo, hi);
887 if ( (vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
888 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL) )
890 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
894 } /* End Function set_mtrr_var_range_testing */
896 static void __init get_fixed_ranges(mtrr_type *frs)
898 unsigned long *p = (unsigned long *)frs;
901 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
903 for (i = 0; i < 2; i++)
904 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
905 for (i = 0; i < 8; i++)
906 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
907 } /* End Function get_fixed_ranges */
909 static int __init set_fixed_ranges_testing(mtrr_type *frs)
911 unsigned long *p = (unsigned long *)frs;
914 unsigned long lo, hi;
916 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
917 if (p[0] != lo || p[1] != hi)
919 wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
923 for (i = 0; i < 2; i++)
925 rdmsr (MTRRfix16K_80000_MSR + i, lo, hi);
926 if (p[2 + i*2] != lo || p[3 + i*2] != hi)
928 wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
933 for (i = 0; i < 8; i++)
935 rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi);
936 if (p[6 + i*2] != lo || p[7 + i*2] != hi)
938 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
943 } /* End Function set_fixed_ranges_testing */
947 unsigned int num_var_ranges;
948 struct mtrr_var_range *var_ranges;
949 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
950 unsigned char enabled;
955 /* Grab all of the MTRR state for this CPU into *state */
956 static void __init get_mtrr_state(struct mtrr_state *state)
958 unsigned int nvrs, i;
959 struct mtrr_var_range *vrs;
960 unsigned long lo, dummy;
962 nvrs = state->num_var_ranges = get_num_var_ranges();
963 vrs = state->var_ranges
964 = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
966 nvrs = state->num_var_ranges = 0;
968 for (i = 0; i < nvrs; i++)
969 get_mtrr_var_range (i, &vrs[i]);
970 get_fixed_ranges (state->fixed_ranges);
972 rdmsr (MTRRdefType_MSR, lo, dummy);
973 state->def_type = (lo & 0xff);
974 state->enabled = (lo & 0xc00) >> 10;
975 } /* End Function get_mtrr_state */
978 /* Free resources associated with a struct mtrr_state */
979 static void __init finalize_mtrr_state(struct mtrr_state *state)
981 if (state->var_ranges) kfree (state->var_ranges);
982 } /* End Function finalize_mtrr_state */
985 static unsigned long __init set_mtrr_state (struct mtrr_state *state,
986 struct set_mtrr_context *ctxt)
987 /* [SUMMARY] Set the MTRR state for this CPU.
988 <state> The MTRR state information to read.
989 <ctxt> Some relevant CPU context.
990 [NOTE] The CPU must already be in a safe state for MTRR changes.
991 [RETURNS] 0 if no changes made, else a mask indication what was changed.
995 unsigned long change_mask = 0;
997 for (i = 0; i < state->num_var_ranges; i++)
998 if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
999 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
1001 if ( set_fixed_ranges_testing(state->fixed_ranges) )
1002 change_mask |= MTRR_CHANGE_MASK_FIXED;
1003 /* Set_mtrr_restore restores the old value of MTRRdefType,
1004 so to set it we fiddle with the saved value */
1005 if ( (ctxt->deftype_lo & 0xff) != state->def_type
1006 || ( (ctxt->deftype_lo & 0xc00) >> 10 ) != state->enabled)
1008 ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
1009 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
1013 } /* End Function set_mtrr_state */
1016 static atomic_t undone_count;
1017 static volatile int wait_barrier_mtrr_disable = FALSE;
1018 static volatile int wait_barrier_execute = FALSE;
1019 static volatile int wait_barrier_cache_enable = FALSE;
1021 struct set_mtrr_data
1023 unsigned long smp_base;
1024 unsigned long smp_size;
1025 unsigned int smp_reg;
1029 static void ipi_handler (void *info)
1030 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
1034 struct set_mtrr_data *data = info;
1035 struct set_mtrr_context ctxt;
1036 set_mtrr_prepare_save (&ctxt);
1037 /* Notify master that I've flushed and disabled my cache */
1038 atomic_dec (&undone_count);
1039 while (wait_barrier_mtrr_disable) { rep_nop(); barrier(); }
1040 set_mtrr_disable (&ctxt);
1041 /* Notify master that I've flushed and disabled my cache */
1042 atomic_dec (&undone_count);
1043 while (wait_barrier_execute) { rep_nop(); barrier(); }
1044 /* The master has cleared me to execute */
1045 (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
1046 data->smp_type, FALSE);
1047 /* Notify master CPU that I've executed the function */
1048 atomic_dec (&undone_count);
1049 /* Wait for master to clear me to enable cache and return */
1050 while (wait_barrier_cache_enable) { rep_nop(); barrier(); }
1051 set_mtrr_done (&ctxt);
1052 } /* End Function ipi_handler */
1054 static void set_mtrr_smp (unsigned int reg, unsigned long base,
1055 unsigned long size, mtrr_type type)
1057 struct set_mtrr_data data;
1058 struct set_mtrr_context ctxt;
1061 data.smp_base = base;
1062 data.smp_size = size;
1063 data.smp_type = type;
1064 wait_barrier_mtrr_disable = TRUE;
1065 wait_barrier_execute = TRUE;
1066 wait_barrier_cache_enable = TRUE;
1067 atomic_set (&undone_count, smp_num_cpus - 1);
1068 /* Start the ball rolling on other CPUs */
1069 if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
1070 panic ("mtrr: timed out waiting for other CPUs\n");
1071 /* Flush and disable the local CPU's cache */
1072 set_mtrr_prepare_save (&ctxt);
1073 /* Wait for all other CPUs to flush and disable their caches */
1074 while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
1075 /* Set up for completion wait and then release other CPUs to change MTRRs*/
1076 atomic_set (&undone_count, smp_num_cpus - 1);
1077 wait_barrier_mtrr_disable = FALSE;
1078 set_mtrr_disable (&ctxt);
1080 /* Wait for all other CPUs to flush and disable their caches */
1081 while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
1082 /* Set up for completion wait and then release other CPUs to change MTRRs*/
1083 atomic_set (&undone_count, smp_num_cpus - 1);
1084 wait_barrier_execute = FALSE;
1085 (*set_mtrr_up) (reg, base, size, type, FALSE);
1086 /* Now wait for other CPUs to complete the function */
1087 while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
1088 /* Now all CPUs should have finished the function. Release the barrier to
1089 allow them to re-enable their caches and return from their interrupt,
1090 then enable the local cache and return */
1091 wait_barrier_cache_enable = FALSE;
1092 set_mtrr_done (&ctxt);
1093 } /* End Function set_mtrr_smp */
1096 /* Some BIOS's are fucked and don't set all MTRRs the same! */
1097 static void __init mtrr_state_warn(unsigned long mask)
1100 if (mask & MTRR_CHANGE_MASK_FIXED)
1101 printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
1102 if (mask & MTRR_CHANGE_MASK_VARIABLE)
1103 printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
1104 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
1105 printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
1106 printk ("mtrr: probably your BIOS does not setup all CPUs\n");
1107 } /* End Function mtrr_state_warn */
1109 #endif /* CONFIG_SMP */
1111 static char *attrib_to_str (int x)
1113 return (x <= 6) ? mtrr_strings[x] : "?";
1114 } /* End Function attrib_to_str */
1116 static void init_table (void)
1120 max = get_num_var_ranges ();
1121 if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
1124 printk ("mtrr: could not allocate\n");
1127 for (i = 0; i < max; i++) usage_table[i] = 1;
1128 #ifdef USERSPACE_INTERFACE
1129 if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
1131 printk ("mtrr: could not allocate\n");
1134 ascii_buf_bytes = 0;
1137 } /* End Function init_table */
1139 static int generic_get_free_region (unsigned long base, unsigned long size)
1140 /* [SUMMARY] Get a free MTRR.
1141 <base> The starting (base) address of the region.
1142 <size> The size (in bytes) of the region.
1143 [RETURNS] The index of the region on success, else -1 on error.
1148 unsigned long lbase, lsize;
1150 max = get_num_var_ranges ();
1151 for (i = 0; i < max; ++i)
1153 (*get_mtrr) (i, &lbase, &lsize, <ype);
1154 if (lsize == 0) return i;
1157 } /* End Function generic_get_free_region */
1159 static int centaur_get_free_region (unsigned long base, unsigned long size)
1160 /* [SUMMARY] Get a free MTRR.
1161 <base> The starting (base) address of the region.
1162 <size> The size (in bytes) of the region.
1163 [RETURNS] The index of the region on success, else -1 on error.
1168 unsigned long lbase, lsize;
1170 max = get_num_var_ranges ();
1171 for (i = 0; i < max; ++i)
1173 if(centaur_mcr_reserved & (1<<i))
1175 (*get_mtrr) (i, &lbase, &lsize, <ype);
1176 if (lsize == 0) return i;
1179 } /* End Function generic_get_free_region */
1181 static int cyrix_get_free_region (unsigned long base, unsigned long size)
1182 /* [SUMMARY] Get a free ARR.
1183 <base> The starting (base) address of the region.
1184 <size> The size (in bytes) of the region.
1185 [RETURNS] The index of the region on success, else -1 on error.
1190 unsigned long lbase, lsize;
1192 /* If we are to set up a region >32M then look at ARR7 immediately */
1195 cyrix_get_arr (7, &lbase, &lsize, <ype);
1196 if (lsize == 0) return 7;
1197 /* Else try ARR0-ARR6 first */
1201 for (i = 0; i < 7; i++)
1203 cyrix_get_arr (i, &lbase, &lsize, <ype);
1204 if ((i == 3) && arr3_protected) continue;
1205 if (lsize == 0) return i;
1207 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
1208 cyrix_get_arr (i, &lbase, &lsize, <ype);
1209 if ((lsize == 0) && (size >= 0x40)) return i;
1212 } /* End Function cyrix_get_free_region */
1214 static int (*get_free_region) (unsigned long base,
1215 unsigned long size) = generic_get_free_region;
1218 * mtrr_add_page - Add a memory type region
1219 * @base: Physical base address of region in pages (4 KB)
1220 * @size: Physical size of region in pages (4 KB)
1221 * @type: Type of MTRR desired
1222 * @increment: If this is true do usage counting on the region
1224 * Memory type region registers control the caching on newer Intel and
1225 * non Intel processors. This function allows drivers to request an
1226 * MTRR is added. The details and hardware specifics of each processor's
1227 * implementation are hidden from the caller, but nevertheless the
1228 * caller should expect to need to provide a power of two size on an
1229 * equivalent power of two boundary.
1231 * If the region cannot be added either because all regions are in use
1232 * or the CPU cannot support it a negative value is returned. On success
1233 * the register number for this entry is returned, but should be treated
1236 * On a multiprocessor machine the changes are made to all processors.
1237 * This is required on x86 by the Intel processors.
1239 * The available types are
1241 * %MTRR_TYPE_UNCACHABLE - No caching
1243 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
1245 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
1247 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
1249 * BUGS: Needs a quiet flag for the cases where drivers do not mind
1250 * failures and do not wish system log messages to be sent.
1253 int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, char increment)
1255 /* [SUMMARY] Add an MTRR entry.
1256 <base> The starting (base, in pages) address of the region.
1257 <size> The size of the region. (in pages)
1258 <type> The type of the new region.
1259 <increment> If true and the region already exists, the usage count will be
1261 [RETURNS] The MTRR register on success, else a negative number indicating
1263 [NOTE] This routine uses a spinlock.
1267 unsigned long lbase, lsize, last;
1272 return -ENXIO; /* No MTRRs whatsoever */
1274 case MTRR_IF_AMD_K6:
1275 /* Apply the K6 block alignment and size rules
1277 o Uncached or gathering only
1278 o 128K or bigger block
1280 o base suitably aligned to the power
1282 if ( type > MTRR_TYPE_WRCOMB || size < (1 << (17-PAGE_SHIFT)) ||
1283 (size & ~(size-1))-size || ( base & (size-1) ) )
1288 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
1289 and not touch 0x70000000->0x7003FFFF */
1290 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1291 boot_cpu_data.x86 == 6 &&
1292 boot_cpu_data.x86_model == 1 &&
1293 boot_cpu_data.x86_mask <= 7 )
1295 if ( base & ((1 << (22-PAGE_SHIFT))-1) )
1297 printk (KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
1300 if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
1301 (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK))
1303 printk (KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
1309 case MTRR_IF_CYRIX_ARR:
1310 case MTRR_IF_CENTAUR_MCR:
1311 if ( mtrr_if == MTRR_IF_CENTAUR_MCR )
1314 * FIXME: Winchip2 supports uncached
1316 if (type != MTRR_TYPE_WRCOMB && (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE))
1318 printk (KERN_WARNING "mtrr: only write-combining%s supported\n",
1319 centaur_mcr_type?" and uncacheable are":" is");
1323 else if (base + size < 0x100)
1325 printk (KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
1329 /* Check upper bits of base and last are equal and lower bits are 0
1330 for base and 1 for last */
1331 last = base + size - 1;
1332 for (lbase = base; !(lbase & 1) && (last & 1);
1333 lbase = lbase >> 1, last = last >> 1);
1336 printk (KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
1346 if (type >= MTRR_NUM_TYPES)
1348 printk ("mtrr: type: %u illegal\n", type);
1352 /* If the type is WC, check that this processor supports it */
1353 if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
1355 printk (KERN_WARNING "mtrr: your processor doesn't support write-combining\n");
1359 if ( base & size_or_mask || size & size_or_mask )
1361 printk ("mtrr: base or size exceeds the MTRR width\n");
1365 increment = increment ? 1 : 0;
1366 max = get_num_var_ranges ();
1367 /* Search for existing MTRR */
1369 for (i = 0; i < max; ++i)
1371 (*get_mtrr) (i, &lbase, &lsize, <ype);
1372 if (base >= lbase + lsize) continue;
1373 if ( (base < lbase) && (base + size <= lbase) ) continue;
1374 /* At this point we know there is some kind of overlap/enclosure */
1375 if ( (base < lbase) || (base + size > lbase + lsize) )
1378 printk (KERN_WARNING "mtrr: 0x%lx000,0x%lx000 overlaps existing"
1379 " 0x%lx000,0x%lx000\n",
1380 base, size, lbase, lsize);
1383 /* New region is enclosed by an existing region */
1386 if (type == MTRR_TYPE_UNCACHABLE) continue;
1388 printk ( "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
1389 base, size, attrib_to_str (ltype), attrib_to_str (type) );
1392 if (increment) ++usage_table[i];
1397 /* Search for an empty MTRR */
1398 i = (*get_free_region) (base, size);
1402 printk ("mtrr: no more MTRRs available\n");
1405 set_mtrr (i, base, size, type);
1410 } /* End Function mtrr_add_page */
1413 * mtrr_add - Add a memory type region
1414 * @base: Physical base address of region
1415 * @size: Physical size of region
1416 * @type: Type of MTRR desired
1417 * @increment: If this is true do usage counting on the region
1419 * Memory type region registers control the caching on newer Intel and
1420 * non Intel processors. This function allows drivers to request an
1421 * MTRR is added. The details and hardware specifics of each processor's
1422 * implementation are hidden from the caller, but nevertheless the
1423 * caller should expect to need to provide a power of two size on an
1424 * equivalent power of two boundary.
1426 * If the region cannot be added either because all regions are in use
1427 * or the CPU cannot support it a negative value is returned. On success
1428 * the register number for this entry is returned, but should be treated
1431 * On a multiprocessor machine the changes are made to all processors.
1432 * This is required on x86 by the Intel processors.
1434 * The available types are
1436 * %MTRR_TYPE_UNCACHABLE - No caching
1438 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
1440 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
1442 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
1444 * BUGS: Needs a quiet flag for the cases where drivers do not mind
1445 * failures and do not wish system log messages to be sent.
1448 int mtrr_add(unsigned long base, unsigned long size, unsigned int type, char increment)
1450 /* [SUMMARY] Add an MTRR entry.
1451 <base> The starting (base) address of the region.
1452 <size> The size (in bytes) of the region.
1453 <type> The type of the new region.
1454 <increment> If true and the region already exists, the usage count will be
1456 [RETURNS] The MTRR register on success, else a negative number indicating
1460 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1462 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1463 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1466 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment);
1467 } /* End Function mtrr_add */
1470 * mtrr_del_page - delete a memory type region
1471 * @reg: Register returned by mtrr_add
1472 * @base: Physical base address
1473 * @size: Size of region
1475 * If register is supplied then base and size are ignored. This is
1476 * how drivers should call it.
1478 * Releases an MTRR region. If the usage count drops to zero the
1479 * register is freed and the region returns to default state.
1480 * On success the register is returned, on failure a negative error
1484 int mtrr_del_page (int reg, unsigned long base, unsigned long size)
1485 /* [SUMMARY] Delete MTRR/decrement usage count.
1486 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1488 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1489 <size> The size of the region. This is ignored if <<reg>> is >= 0.
1490 [RETURNS] The register on success, else a negative number indicating
1492 [NOTE] This routine uses a spinlock.
1497 unsigned long lbase, lsize;
1499 if ( mtrr_if == MTRR_IF_NONE ) return -ENXIO;
1501 max = get_num_var_ranges ();
1505 /* Search for existing MTRR */
1506 for (i = 0; i < max; ++i)
1508 (*get_mtrr) (i, &lbase, &lsize, <ype);
1509 if (lbase == base && lsize == size)
1518 printk ("mtrr: no MTRR for %lx000,%lx000 found\n", base, size);
1525 printk ("mtrr: register: %d too big\n", reg);
1528 if ( mtrr_if == MTRR_IF_CYRIX_ARR )
1530 if ( (reg == 3) && arr3_protected )
1533 printk ("mtrr: ARR3 cannot be changed\n");
1537 (*get_mtrr) (reg, &lbase, &lsize, <ype);
1541 printk ("mtrr: MTRR %d not used\n", reg);
1544 if (usage_table[reg] < 1)
1547 printk ("mtrr: reg: %d has count=0\n", reg);
1550 if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
1554 } /* End Function mtrr_del_page */
1557 * mtrr_del - delete a memory type region
1558 * @reg: Register returned by mtrr_add
1559 * @base: Physical base address
1560 * @size: Size of region
1562 * If register is supplied then base and size are ignored. This is
1563 * how drivers should call it.
1565 * Releases an MTRR region. If the usage count drops to zero the
1566 * register is freed and the region returns to default state.
1567 * On success the register is returned, on failure a negative error
1571 int mtrr_del (int reg, unsigned long base, unsigned long size)
1572 /* [SUMMARY] Delete MTRR/decrement usage count.
1573 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1575 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1576 <size> The size of the region. This is ignored if <<reg>> is >= 0.
1577 [RETURNS] The register on success, else a negative number indicating
1581 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1583 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1584 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1587 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
1590 #ifdef USERSPACE_INTERFACE
1592 static int mtrr_file_add (unsigned long base, unsigned long size,
1593 unsigned int type, char increment, struct file *file, int page)
1596 unsigned int *fcount = file->private_data;
1598 max = get_num_var_ranges ();
1601 if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
1603 printk ("mtrr: could not allocate\n");
1606 memset (fcount, 0, max * sizeof *fcount);
1607 file->private_data = fcount;
1610 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1612 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1613 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1616 base >>= PAGE_SHIFT;
1617 size >>= PAGE_SHIFT;
1619 reg = mtrr_add_page (base, size, type, 1);
1620 if (reg >= 0) ++fcount[reg];
1622 } /* End Function mtrr_file_add */
1624 static int mtrr_file_del (unsigned long base, unsigned long size,
1625 struct file *file, int page)
1628 unsigned int *fcount = file->private_data;
1631 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1633 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1634 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1637 base >>= PAGE_SHIFT;
1638 size >>= PAGE_SHIFT;
1640 reg = mtrr_del_page (-1, base, size);
1641 if (reg < 0) return reg;
1642 if (fcount == NULL) return reg;
1643 if (fcount[reg] < 1) return -EINVAL;
1646 } /* End Function mtrr_file_del */
1648 static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
1652 if (pos < 0 || pos >= ascii_buf_bytes)
1654 if (len > ascii_buf_bytes - pos)
1655 len = ascii_buf_bytes - pos;
1656 if (copy_to_user(buf, ascii_buffer + pos, len))
1662 } /* End Function mtrr_read */
1664 static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
1666 /* Format of control line:
1667 "base=%Lx size=%Lx type=%s" OR:
1673 unsigned long long base, size;
1675 char line[LINE_SIZE];
1677 if (!len) return -EINVAL;
1678 if ( !suser () ) return -EPERM;
1679 /* Can't seek (pwrite) on this device */
1680 if (ppos != &file->f_pos) return -ESPIPE;
1681 memset (line, 0, LINE_SIZE);
1682 if (len > LINE_SIZE) len = LINE_SIZE;
1683 if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
1684 ptr = line + strlen (line) - 1;
1685 if (*ptr == '\n') *ptr = '\0';
1686 if ( !strncmp (line, "disable=", 8) )
1688 reg = simple_strtoul (line + 8, &ptr, 0);
1689 err = mtrr_del_page (reg, 0, 0);
1690 if (err < 0) return err;
1693 if ( strncmp (line, "base=", 5) )
1695 printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
1698 base = simple_strtoull (line + 5, &ptr, 0);
1699 for (; isspace (*ptr); ++ptr);
1700 if ( strncmp (ptr, "size=", 5) )
1702 printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
1705 size = simple_strtoull (ptr + 5, &ptr, 0);
1706 if ( (base & 0xfff) || (size & 0xfff) )
1708 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1709 printk ("mtrr: size: 0x%Lx base: 0x%Lx\n", size, base);
1712 for (; isspace (*ptr); ++ptr);
1713 if ( strncmp (ptr, "type=", 5) )
1715 printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
1719 for (; isspace (*ptr); ++ptr);
1720 for (i = 0; i < MTRR_NUM_TYPES; ++i)
1722 if ( strcmp (ptr, mtrr_strings[i]) ) continue;
1723 base >>= PAGE_SHIFT;
1724 size >>= PAGE_SHIFT;
1725 err = mtrr_add_page ((unsigned long)base, (unsigned long)size, i, 1);
1726 if (err < 0) return err;
1729 printk ("mtrr: illegal type: \"%s\"\n", ptr);
1731 } /* End Function mtrr_write */
1733 static int mtrr_ioctl (struct inode *inode, struct file *file,
1734 unsigned int cmd, unsigned long arg)
1738 struct mtrr_sentry sentry;
1739 struct mtrr_gentry gentry;
1744 return -ENOIOCTLCMD;
1745 case MTRRIOC_ADD_ENTRY:
1746 if ( !suser () ) return -EPERM;
1747 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1749 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 0);
1750 if (err < 0) return err;
1752 case MTRRIOC_SET_ENTRY:
1753 if ( !suser () ) return -EPERM;
1754 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1756 err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1757 if (err < 0) return err;
1759 case MTRRIOC_DEL_ENTRY:
1760 if ( !suser () ) return -EPERM;
1761 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1763 err = mtrr_file_del (sentry.base, sentry.size, file, 0);
1764 if (err < 0) return err;
1766 case MTRRIOC_KILL_ENTRY:
1767 if ( !suser () ) return -EPERM;
1768 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1770 err = mtrr_del (-1, sentry.base, sentry.size);
1771 if (err < 0) return err;
1773 case MTRRIOC_GET_ENTRY:
1774 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1776 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1777 (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1779 /* Hide entries that go above 4GB */
1780 if (gentry.base + gentry.size > 0x100000 || gentry.size == 0x100000)
1781 gentry.base = gentry.size = gentry.type = 0;
1783 gentry.base <<= PAGE_SHIFT;
1784 gentry.size <<= PAGE_SHIFT;
1788 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1791 case MTRRIOC_ADD_PAGE_ENTRY:
1792 if ( !suser () ) return -EPERM;
1793 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1795 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 1);
1796 if (err < 0) return err;
1798 case MTRRIOC_SET_PAGE_ENTRY:
1799 if ( !suser () ) return -EPERM;
1800 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1802 err = mtrr_add_page (sentry.base, sentry.size, sentry.type, 0);
1803 if (err < 0) return err;
1805 case MTRRIOC_DEL_PAGE_ENTRY:
1806 if ( !suser () ) return -EPERM;
1807 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1809 err = mtrr_file_del (sentry.base, sentry.size, file, 1);
1810 if (err < 0) return err;
1812 case MTRRIOC_KILL_PAGE_ENTRY:
1813 if ( !suser () ) return -EPERM;
1814 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1816 err = mtrr_del_page (-1, sentry.base, sentry.size);
1817 if (err < 0) return err;
1819 case MTRRIOC_GET_PAGE_ENTRY:
1820 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1822 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1823 (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1826 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1831 } /* End Function mtrr_ioctl */
1833 static int mtrr_close (struct inode *ino, struct file *file)
1836 unsigned int *fcount = file->private_data;
1838 if (fcount == NULL) return 0;
1840 max = get_num_var_ranges ();
1841 for (i = 0; i < max; ++i)
1843 while (fcount[i] > 0)
1845 if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1851 file->private_data = NULL;
1853 } /* End Function mtrr_close */
1855 static struct file_operations mtrr_fops =
1861 release: mtrr_close,
1864 # ifdef CONFIG_PROC_FS
1866 static struct proc_dir_entry *proc_root_mtrr;
1868 # endif /* CONFIG_PROC_FS */
1870 static devfs_handle_t devfs_handle;
1872 static void compute_ascii (void)
1877 unsigned long base, size;
1879 ascii_buf_bytes = 0;
1880 max = get_num_var_ranges ();
1881 for (i = 0; i < max; i++)
1883 (*get_mtrr) (i, &base, &size, &type);
1884 if (size == 0) usage_table[i] = 0;
1887 if (size < (0x100000 >> PAGE_SHIFT))
1891 size <<= PAGE_SHIFT - 10;
1896 size >>= 20 - PAGE_SHIFT;
1899 (ascii_buffer + ascii_buf_bytes,
1900 "reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n",
1901 i, base, base >> (20 - PAGE_SHIFT), size, factor,
1902 attrib_to_str (type), usage_table[i]);
1903 ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1906 devfs_set_file_size (devfs_handle, ascii_buf_bytes);
1907 # ifdef CONFIG_PROC_FS
1909 proc_root_mtrr->size = ascii_buf_bytes;
1910 # endif /* CONFIG_PROC_FS */
1911 } /* End Function compute_ascii */
1913 #endif /* USERSPACE_INTERFACE */
1915 EXPORT_SYMBOL(mtrr_add);
1916 EXPORT_SYMBOL(mtrr_del);
1927 arr_state_t arr_state[8] __initdata =
1929 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
1930 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
1933 unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
1935 static void __init cyrix_arr_init_secondary(void)
1937 struct set_mtrr_context ctxt;
1940 /* flush cache and enable MAPEN */
1941 set_mtrr_prepare_save (&ctxt);
1942 set_mtrr_disable (&ctxt);
1944 /* the CCRs are not contiguous */
1945 for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
1946 for( ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
1949 arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
1951 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1952 } /* End Function cyrix_arr_init_secondary */
1957 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
1958 * with the SMM (System Management Mode) mode. So we need the following:
1959 * Check whether SMI_LOCK (CCR3 bit 0) is set
1960 * if it is set, write a warning message: ARR3 cannot be changed!
1961 * (it cannot be changed until the next processor reset)
1962 * if it is reset, then we can change it, set all the needed bits:
1963 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
1964 * - disable access to SMM memory (CCR1 bit 2 reset)
1965 * - disable SMM mode (CCR1 bit 1 reset)
1966 * - disable write protection of ARR3 (CCR6 bit 1 reset)
1967 * - (maybe) disable ARR3
1968 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
1970 static void __init cyrix_arr_init(void)
1972 struct set_mtrr_context ctxt;
1973 unsigned char ccr[7];
1974 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
1979 /* flush cache and enable MAPEN */
1980 set_mtrr_prepare_save (&ctxt);
1981 set_mtrr_disable (&ctxt);
1983 /* Save all CCRs locally */
1984 ccr[0] = getCx86 (CX86_CCR0);
1985 ccr[1] = getCx86 (CX86_CCR1);
1986 ccr[2] = getCx86 (CX86_CCR2);
1988 ccr[4] = getCx86 (CX86_CCR4);
1989 ccr[5] = getCx86 (CX86_CCR5);
1990 ccr[6] = getCx86 (CX86_CCR6);
1999 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
2000 * access to SMM memory through ARR3 (bit 7).
2002 if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
2003 if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
2004 if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
2006 if (ccr[6] & 0x02) {
2007 ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3 */
2008 setCx86 (CX86_CCR6, ccr[6]);
2010 /* Disable ARR3. This is safe now that we disabled SMM. */
2011 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
2013 /* If we changed CCR1 in memory, change it in the processor, too. */
2014 if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
2016 /* Enable ARR usage by the processor */
2017 if (!(ccr[5] & 0x20))
2019 ccr[5] |= 0x20; ccrc[5] = 1;
2020 setCx86 (CX86_CCR5, ccr[5]);
2024 for(i=0; i<7; i++) ccr_state[i] = ccr[i];
2027 &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
2030 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
2032 if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
2033 if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
2035 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
2036 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
2037 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
2039 if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
2040 } /* End Function cyrix_arr_init */
2043 * Initialise the later (saner) Winchip MCR variant. In this version
2044 * the BIOS can pass us the registers it has used (but not their values)
2045 * and the control register is read/write
2048 static void __init centaur_mcr1_init(void)
2053 /* Unfortunately, MCR's are read-only, so there is no way to
2054 * find out what the bios might have done.
2057 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2058 if(((lo>>17)&7)==1) /* Type 1 Winchip2 MCR */
2060 lo&= ~0x1C0; /* clear key */
2061 lo|= 0x040; /* set key to 1 */
2062 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */
2065 centaur_mcr_type = 1;
2068 * Clear any unconfigured MCR's.
2071 for (i = 0; i < 8; ++i)
2073 if(centaur_mcr[i]. high == 0 && centaur_mcr[i].low == 0)
2075 if(!(lo & (1<<(9+i))))
2076 wrmsr (MSR_IDT_MCR0 + i , 0, 0);
2079 * If the BIOS set up an MCR we cannot see it
2080 * but we don't wish to obliterate it
2082 centaur_mcr_reserved |= (1<<i);
2086 * Throw the main write-combining switch...
2087 * However if OOSTORE is enabled then people have already done far
2088 * cleverer things and we should behave.
2091 lo |= 15; /* Write combine enables */
2092 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2093 } /* End Function centaur_mcr1_init */
2096 * Initialise the original winchip with read only MCR registers
2097 * no used bitmask for the BIOS to pass on and write only control
2100 static void __init centaur_mcr0_init(void)
2104 /* Unfortunately, MCR's are read-only, so there is no way to
2105 * find out what the bios might have done.
2108 /* Clear any unconfigured MCR's.
2109 * This way we are sure that the centaur_mcr array contains the actual
2110 * values. The disadvantage is that any BIOS tweaks are thus undone.
2113 for (i = 0; i < 8; ++i)
2115 if(centaur_mcr[i]. high == 0 && centaur_mcr[i].low == 0)
2116 wrmsr (MSR_IDT_MCR0 + i , 0, 0);
2119 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */
2120 } /* End Function centaur_mcr0_init */
2123 * Initialise Winchip series MCR registers
2126 static void __init centaur_mcr_init(void)
2128 struct set_mtrr_context ctxt;
2130 set_mtrr_prepare_save (&ctxt);
2131 set_mtrr_disable (&ctxt);
2133 if(boot_cpu_data.x86_model==4)
2134 centaur_mcr0_init();
2135 else if(boot_cpu_data.x86_model==8 || boot_cpu_data.x86_model == 9)
2136 centaur_mcr1_init();
2138 set_mtrr_done (&ctxt);
2139 } /* End Function centaur_mcr_init */
2141 static int __init mtrr_setup(void)
2143 if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {
2144 /* Intel (P6) standard MTRRs */
2145 mtrr_if = MTRR_IF_INTEL;
2146 get_mtrr = intel_get_mtrr;
2147 set_mtrr_up = intel_set_mtrr_up;
2148 switch (boot_cpu_data.x86_vendor) {
2150 case X86_VENDOR_AMD:
2151 /* The original Athlon docs said that
2152 total addressable memory is 44 bits wide.
2153 It was not really clear whether its MTRRs
2154 follow this or not. (Read: 44 or 36 bits).
2155 However, "x86-64_overview.pdf" explicitly
2156 states that "previous implementations support
2157 36 bit MTRRs" and also provides a way to
2158 query the width (in bits) of the physical
2159 addressable memory on the Hammer family.
2161 if (boot_cpu_data.x86 == 15 && (cpuid_eax(0x80000000) >= 0x80000008)) {
2163 phys_addr = cpuid_eax(0x80000008) & 0xff ;
2164 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
2165 size_and_mask = ~size_or_mask & 0xfff00000;
2168 size_or_mask = 0xff000000; /* 36 bits */
2169 size_and_mask = 0x00f00000;
2172 case X86_VENDOR_CENTAUR:
2173 /* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
2174 if (boot_cpu_data.x86 == 6) {
2175 size_or_mask = 0xfff00000; /* 32 bits */
2182 size_or_mask = 0xff000000; /* 36 bits */
2183 size_and_mask = 0x00f00000;
2187 } else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {
2188 /* Pre-Athlon (K6) AMD CPU MTRRs */
2189 mtrr_if = MTRR_IF_AMD_K6;
2190 get_mtrr = amd_get_mtrr;
2191 set_mtrr_up = amd_set_mtrr_up;
2192 size_or_mask = 0xfff00000; /* 32 bits */
2194 } else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {
2196 mtrr_if = MTRR_IF_CYRIX_ARR;
2197 get_mtrr = cyrix_get_arr;
2198 set_mtrr_up = cyrix_set_arr_up;
2199 get_free_region = cyrix_get_free_region;
2201 size_or_mask = 0xfff00000; /* 32 bits */
2203 } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {
2205 mtrr_if = MTRR_IF_CENTAUR_MCR;
2206 get_mtrr = centaur_get_mcr;
2207 set_mtrr_up = centaur_set_mcr_up;
2208 get_free_region = centaur_get_free_region;
2210 size_or_mask = 0xfff00000; /* 32 bits */
2213 /* No supported MTRR interface */
2214 mtrr_if = MTRR_IF_NONE;
2217 printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
2218 "mtrr: detected mtrr type: %s\n",
2219 MTRR_VERSION, mtrr_if_name[mtrr_if]);
2221 return (mtrr_if != MTRR_IF_NONE);
2222 } /* End Function mtrr_setup */
2226 static volatile unsigned long smp_changes_mask __initdata = 0;
2227 static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
2229 void __init mtrr_init_boot_cpu(void)
2231 if ( !mtrr_setup () )
2234 if ( mtrr_if == MTRR_IF_INTEL ) {
2235 /* Only for Intel MTRRs */
2236 get_mtrr_state (&smp_mtrr_state);
2238 } /* End Function mtrr_init_boot_cpu */
2240 static void __init intel_mtrr_init_secondary_cpu(void)
2242 unsigned long mask, count;
2243 struct set_mtrr_context ctxt;
2245 /* Note that this is not ideal, since the cache is only flushed/disabled
2246 for this CPU while the MTRRs are changed, but changing this requires
2247 more invasive changes to the way the kernel boots */
2248 set_mtrr_prepare_save (&ctxt);
2249 set_mtrr_disable (&ctxt);
2250 mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
2251 set_mtrr_done (&ctxt);
2252 /* Use the atomic bitops to update the global mask */
2253 for (count = 0; count < sizeof mask * 8; ++count)
2255 if (mask & 0x01) set_bit (count, &smp_changes_mask);
2258 } /* End Function intel_mtrr_init_secondary_cpu */
2260 void __init mtrr_init_secondary_cpu(void)
2262 switch ( mtrr_if ) {
2264 /* Intel (P6) standard MTRRs */
2265 intel_mtrr_init_secondary_cpu();
2267 case MTRR_IF_CYRIX_ARR:
2268 /* This is _completely theoretical_!
2269 * I assume here that one day Cyrix will support Intel APIC.
2270 * In reality on non-Intel CPUs we won't even get to this routine.
2271 * Hopefully no one will plug two Cyrix processors in a dual P5 board.
2274 cyrix_arr_init_secondary ();
2279 /* I see no MTRRs I can support in SMP mode... */
2280 printk ("mtrr: SMP support incomplete for this vendor\n");
2282 } /* End Function mtrr_init_secondary_cpu */
2283 #endif /* CONFIG_SMP */
2285 int __init mtrr_init(void)
2288 /* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */
2290 if ( mtrr_if == MTRR_IF_INTEL ) {
2291 finalize_mtrr_state (&smp_mtrr_state);
2292 mtrr_state_warn (smp_changes_mask);
2295 if ( !mtrr_setup() )
2296 return 0; /* MTRRs not supported? */
2299 #ifdef CONFIG_PROC_FS
2300 proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
2301 if (proc_root_mtrr) {
2302 proc_root_mtrr->owner = THIS_MODULE;
2303 proc_root_mtrr->proc_fops = &mtrr_fops;
2306 #ifdef USERSPACE_INTERFACE
2307 devfs_handle = devfs_register (NULL, "cpu/mtrr", DEVFS_FL_DEFAULT, 0, 0,
2308 S_IFREG | S_IRUGO | S_IWUSR,
2313 } /* End Function mtrr_init */
2318 * c-file-style:"k&r"