1 /****************************************************************************
3 * SciTech OS Portability Manager Library
5 * ========================================================================
7 * The contents of this file are subject to the SciTech MGL Public
8 * License Version 1.0 (the "License"); you may not use this file
9 * except in compliance with the License. You may obtain a copy of
10 * the License at http://www.scitechsoft.com/mgl-license.txt
12 * Software distributed under the License is distributed on an
13 * "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
14 * implied. See the License for the specific language governing
15 * rights and limitations under the License.
17 * The Original Code is Copyright (C) 1991-1998 SciTech Software, Inc.
19 * The Initial Developer of the Original Code is SciTech Software, Inc.
20 * All Rights Reserved.
22 * ========================================================================
24 * Heavily based on code copyright (C) Richard Gooch
27 * Environment: 32-bit Ring 0 device driver
29 * Description: Generic Memory Type Range Register (MTRR) functions to
30 * manipulate the MTRR registers on supported CPU's. This code
31 * *must* run at ring 0, so you can't normally include this
32 * code directly in normal applications (the except is DOS4GW
33 * apps which run at ring 0 under real DOS). Thus this code
34 * will normally be compiled into a ring 0 device driver for
35 * the target operating system.
37 ****************************************************************************/
45 /*--------------------------- Global variables ----------------------------*/
47 /* Intel pre-defined MTRR registers */
49 #define NUM_FIXED_RANGES 88
50 #define INTEL_cap_MSR 0x0FE
51 #define INTEL_defType_MSR 0x2FF
52 #define INTEL_fix64K_00000_MSR 0x250
53 #define INTEL_fix16K_80000_MSR 0x258
54 #define INTEL_fix16K_A0000_MSR 0x259
55 #define INTEL_fix4K_C0000_MSR 0x268
56 #define INTEL_fix4K_C8000_MSR 0x269
57 #define INTEL_fix4K_D0000_MSR 0x26A
58 #define INTEL_fix4K_D8000_MSR 0x26B
59 #define INTEL_fix4K_E0000_MSR 0x26C
60 #define INTEL_fix4K_E8000_MSR 0x26D
61 #define INTEL_fix4K_F0000_MSR 0x26E
62 #define INTEL_fix4K_F8000_MSR 0x26F
64 /* Macros to find the address of a paricular MSR register */
66 #define INTEL_physBase_MSR(reg) (0x200 + 2 * (reg))
67 #define INTEL_physMask_MSR(reg) (0x200 + 2 * (reg) + 1)
69 /* Cyrix CPU configuration register indexes */
70 #define CX86_CCR0 0xC0
71 #define CX86_CCR1 0xC1
72 #define CX86_CCR2 0xC2
73 #define CX86_CCR3 0xC3
74 #define CX86_CCR4 0xE8
75 #define CX86_CCR5 0xE9
76 #define CX86_CCR6 0xEA
77 #define CX86_DIR0 0xFE
78 #define CX86_DIR1 0xFF
79 #define CX86_ARR_BASE 0xC4
80 #define CX86_RCR_BASE 0xDC
82 /* Structure to maintain machine state while updating MTRR registers */
92 static int numMTRR = -1;
93 static int cpuFamily,cpuType,cpuStepping;
94 static void (*getMTRR)(uint reg,ulong *base,ulong *size,int *type) = NULL;
95 static void (*setMTRR)(uint reg,ulong base,ulong size,int type) = NULL;
96 static int (*getFreeRegion)(ulong base,ulong size) = NULL;
98 /*----------------------------- Implementation ----------------------------*/
100 /****************************************************************************
102 Returns non-zero if we have the write-combining memory type
103 ****************************************************************************/
104 static int MTRR_haveWriteCombine(void)
110 if (cpuType < CPU_AMDAthlon) {
111 /* AMD K6-2 stepping 8 and later support the MTRR registers.
112 * The earlier K6-2 steppings (300Mhz models) do not
115 if ((cpuType < CPU_AMDK6_2) || (cpuType == CPU_AMDK6_2 && cpuStepping < 8))
119 /* Fall through for AMD Athlon which uses P6 style MTRR's */
121 _MTRR_readMSR(INTEL_cap_MSR,&config,&dummy);
122 return (config & (1 << 10));
124 /* Cyrix 6x86 and later support the MTRR registers */
125 if (cpuType < CPU_Cyrix6x86)
132 /****************************************************************************
134 base - The starting physical base address of the region
135 size - The size in bytes of the region
138 The index of the region on success, else -1 on error.
141 Generic function to find the location of a free MTRR register to be used
142 for creating a new mapping.
143 ****************************************************************************/
144 static int GENERIC_getFreeRegion(
151 for (i = 0; i < numMTRR; i++) {
152 getMTRR(i,&lbase,&lsize,<ype);
161 /****************************************************************************
163 base - The starting physical base address of the region
164 size - The size in bytes of the region
167 The index of the region on success, else -1 on error.
170 Generic function to find the location of a free MTRR register to be used
171 for creating a new mapping.
172 ****************************************************************************/
173 static int AMDK6_getFreeRegion(
180 for (i = 0; i < numMTRR; i++) {
181 getMTRR(i,&lbase,&lsize,<ype);
190 /****************************************************************************
192 base - The starting physical base address of the region
193 size - The size in bytes of the region
196 The index of the region on success, else -1 on error.
199 Cyrix specific function to find the location of a free MTRR register to be
200 used for creating a new mapping.
201 ****************************************************************************/
202 static int CYRIX_getFreeRegion(
209 if (size > 0x2000000UL) {
210 /* If we are to set up a region >32M then look at ARR7 immediately */
211 getMTRR(7,&lbase,&lsize,<ype);
216 /* Check ARR0-6 registers */
217 for (i = 0; i < 7; i++) {
218 getMTRR(i,&lbase,&lsize,<ype);
222 /* Try ARR7 but its size must be at least 256K */
223 getMTRR(7,&lbase,&lsize,<ype);
224 if ((lsize < 1) && (size >= 0x40000))
231 /****************************************************************************
233 c - Place to store the machine context across the call
236 Puts the processor into a state where MTRRs can be safely updated
237 ****************************************************************************/
238 static void MTRR_beginUpdate(
241 c->flags = _MTRR_disableInt();
242 if (cpuFamily != CPU_AMD || (cpuFamily == CPU_AMD && cpuType >= CPU_AMDAthlon)) {
246 /* Disable MTRRs, and set the default type to uncached */
247 c->cr4Val = _MTRR_saveCR4();
248 _MTRR_readMSR(INTEL_defType_MSR,&c->defTypeLo,&c->defTypeHi);
249 _MTRR_writeMSR(INTEL_defType_MSR,c->defTypeLo & 0xF300UL,c->defTypeHi);
252 c->ccr3 = _MTRR_getCx86(CX86_CCR3);
253 _MTRR_setCx86(CX86_CCR3, (uchar)((c->ccr3 & 0x0F) | 0x10));
259 /****************************************************************************
261 c - Place to restore the machine context from
264 Restores the processor after updating any of the registers
265 ****************************************************************************/
266 static void MTRR_endUpdate(
269 if (cpuFamily != CPU_AMD || (cpuFamily == CPU_AMD && cpuType >= CPU_AMDAthlon)) {
274 _MTRR_writeMSR(INTEL_defType_MSR,c->defTypeLo,c->defTypeHi);
275 _MTRR_restoreCR4(c->cr4Val);
278 _MTRR_setCx86(CX86_CCR3,(uchar)c->ccr3);
283 /* Re-enable interrupts (if enabled previously) */
284 _MTRR_restoreInt(c->flags);
287 /****************************************************************************
289 reg - MTRR register to read
290 base - Place to store the starting physical base address of the region
291 size - Place to store the size in bytes of the region
292 type - Place to store the type of the MTRR register
295 Intel specific function to read the value of a specific MTRR register.
296 ****************************************************************************/
297 static void INTEL_getMTRR(
303 ulong hi,maskLo,baseLo;
305 _MTRR_readMSR(INTEL_physMask_MSR(reg),&maskLo,&hi);
306 if ((maskLo & 0x800) == 0) {
307 /* MTRR is disabled, so it is free */
313 _MTRR_readMSR(INTEL_physBase_MSR(reg),&baseLo,&hi);
314 maskLo = (maskLo & 0xFFFFF000UL);
315 *size = ~(maskLo - 1);
316 *base = (baseLo & 0xFFFFF000UL);
317 *type = (baseLo & 0xFF);
320 /****************************************************************************
322 reg - MTRR register to set
323 base - The starting physical base address of the region
324 size - The size in bytes of the region
325 type - Type to place into the MTRR register
328 Intel specific function to set the value of a specific MTRR register to
329 the passed in base, size and type.
330 ****************************************************************************/
331 static void INTEL_setMTRR(
339 MTRR_beginUpdate(&c);
341 /* The invalid bit is kept in the mask, so we simply clear the
342 * relevant mask register to disable a range.
344 _MTRR_writeMSR(INTEL_physMask_MSR(reg),0,0);
347 _MTRR_writeMSR(INTEL_physBase_MSR(reg),base | type,0);
348 _MTRR_writeMSR(INTEL_physMask_MSR(reg),~(size - 1) | 0x800,0);
353 /****************************************************************************
355 Disabled banked write combing for Intel processors. We always disable this
356 because it invariably causes problems with older hardware.
357 ****************************************************************************/
358 static void INTEL_disableBankedWriteCombine(void)
362 MTRR_beginUpdate(&c);
363 _MTRR_writeMSR(INTEL_fix16K_A0000_MSR,0,0);
367 /****************************************************************************
369 reg - MTRR register to set
370 base - The starting physical base address of the region
371 size - The size in bytes of the region
372 type - Type to place into the MTRR register
375 Intel specific function to set the value of a specific MTRR register to
376 the passed in base, size and type.
377 ****************************************************************************/
378 static void AMD_getMTRR(
386 /* Upper dword is region 1, lower is region 0 */
387 _MTRR_readMSR(0xC0000085, &low, &high);
391 /* Find the base and type for the region */
392 *base = low & 0xFFFE0000;
395 *type = PM_MTRR_UNCACHABLE;
397 *type = PM_MTRR_WRCOMB;
398 if ((low & 3) == 0) {
403 /* This needs a little explaining. The size is stored as an
404 * inverted mask of bits of 128K granularity 15 bits long offset
407 * So to get a size we do invert the mask and add 1 to the lowest
408 * mask bit (4 as its 2 bits in). This gives us a size we then shift
409 * to turn into 128K blocks
411 * eg 111 1111 1111 1100 is 512K
413 * invert 000 0000 0000 0011
414 * +1 000 0000 0000 0100
417 low = (~low) & 0x0FFFC;
418 *size = (low + 4) << 15;
421 /****************************************************************************
423 reg - MTRR register to set
424 base - The starting physical base address of the region
425 size - The size in bytes of the region
426 type - Type to place into the MTRR register
429 Intel specific function to set the value of a specific MTRR register to
430 the passed in base, size and type.
431 ****************************************************************************/
432 static void AMD_setMTRR(
438 ulong low,high,newVal;
441 MTRR_beginUpdate(&c);
442 _MTRR_readMSR(0xC0000085, &low, &high);
444 /* Clear register to disable */
451 /* Set the register to the base (already shifted for us), the
452 * type (off by one) and an inverted bitmask of the size
453 * The size is the only odd bit. We are fed say 512K
454 * We invert this and we get 111 1111 1111 1011 but
455 * if you subtract one and invert you get the desired
456 * 111 1111 1111 1100 mask
458 newVal = (((~(size-1)) >> 15) & 0x0001FFFC) | base | (type+1);
465 /* The writeback rule is quite specific. See the manual. Its
466 * disable local interrupts, write back the cache, set the MTRR
469 _MTRR_writeMSR(0xC0000085, low, high);
473 /****************************************************************************
475 reg - MTRR register to set
476 base - The starting physical base address of the region
477 size - The size in bytes of the region
478 type - Type to place into the MTRR register
481 Intel specific function to set the value of a specific MTRR register to
482 the passed in base, size and type.
483 ****************************************************************************/
484 static void CYRIX_getMTRR(
491 uchar arr = CX86_ARR_BASE + reg*3;
494 /* Save flags and disable interrupts */
495 MTRR_beginUpdate(&c);
496 ((uchar*)base)[3] = _MTRR_getCx86(arr);
497 ((uchar*)base)[2] = _MTRR_getCx86((uchar)(arr+1));
498 ((uchar*)base)[1] = _MTRR_getCx86((uchar)(arr+2));
499 rcr = _MTRR_getCx86((uchar)(CX86_RCR_BASE + reg));
502 /* Enable interrupts if it was enabled previously */
503 shift = ((uchar*)base)[1] & 0x0f;
504 *base &= 0xFFFFF000UL;
506 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
507 * Note: shift==0xF means 4G, this is unsupported.
510 *size = (reg < 7 ? 0x800UL : 0x20000UL) << shift;
514 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
517 case 1: *type = PM_MTRR_UNCACHABLE; break;
518 case 8: *type = PM_MTRR_WRBACK; break;
519 case 9: *type = PM_MTRR_WRCOMB; break;
521 default: *type = PM_MTRR_WRTHROUGH; break;
526 case 0: *type = PM_MTRR_UNCACHABLE; break;
527 case 8: *type = PM_MTRR_WRCOMB; break;
528 case 9: *type = PM_MTRR_WRBACK; break;
530 default: *type = PM_MTRR_WRTHROUGH; break;
535 /****************************************************************************
537 reg - MTRR register to set
538 base - The starting physical base address of the region
539 size - The size in bytes of the region
540 type - Type to place into the MTRR register
543 Intel specific function to set the value of a specific MTRR register to
544 the passed in base, size and type.
545 ****************************************************************************/
546 static void CYRIX_setMTRR(
553 uchar arr = CX86_ARR_BASE + reg*3;
554 uchar arr_type,arr_size;
556 /* Count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
557 size >>= (reg < 7 ? 12 : 18);
558 size &= 0x7FFF; /* Make sure arr_size <= 14 */
559 for (arr_size = 0; size; arr_size++, size >>= 1)
563 case PM_MTRR_UNCACHABLE: arr_type = 1; break;
564 case PM_MTRR_WRCOMB: arr_type = 9; break;
565 case PM_MTRR_WRTHROUGH: arr_type = 24; break;
566 default: arr_type = 8; break;
571 case PM_MTRR_UNCACHABLE: arr_type = 0; break;
572 case PM_MTRR_WRCOMB: arr_type = 8; break;
573 case PM_MTRR_WRTHROUGH: arr_type = 25; break;
574 default: arr_type = 9; break;
577 MTRR_beginUpdate(&c);
578 _MTRR_setCx86((uchar)arr, ((uchar*)&base)[3]);
579 _MTRR_setCx86((uchar)(arr+1), ((uchar*)&base)[2]);
580 _MTRR_setCx86((uchar)(arr+2), (uchar)((((uchar*)&base)[1]) | arr_size));
581 _MTRR_setCx86((uchar)(CX86_RCR_BASE + reg), (uchar)arr_type);
585 /****************************************************************************
587 On Cyrix 6x86(MX) and MII the ARR3 is special: it has connection
588 with the SMM (System Management Mode) mode. So we need the following:
589 Check whether SMI_LOCK (CCR3 bit 0) is set
590 if it is set, ARR3 cannot be changed (it cannot be changed until the
591 next processor reset)
592 if it is reset, then we can change it, set all the needed bits:
593 - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
594 - disable access to SMM memory (CCR1 bit 2 reset)
595 - disable SMM mode (CCR1 bit 1 reset)
596 - disable write protection of ARR3 (CCR6 bit 1 reset)
597 - (maybe) disable ARR3
598 Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
599 ****************************************************************************/
600 static void CYRIX_initARR(void)
604 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
607 MTRR_beginUpdate(&c);
609 /* Save all CCRs locally */
610 ccr[0] = _MTRR_getCx86(CX86_CCR0);
611 ccr[1] = _MTRR_getCx86(CX86_CCR1);
612 ccr[2] = _MTRR_getCx86(CX86_CCR2);
613 ccr[3] = (uchar)c.ccr3;
614 ccr[4] = _MTRR_getCx86(CX86_CCR4);
615 ccr[5] = _MTRR_getCx86(CX86_CCR5);
616 ccr[6] = _MTRR_getCx86(CX86_CCR6);
620 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
621 * access to SMM memory through ARR3 (bit 7).
625 ccrc[6] = 1; /* Disable write protection of ARR3. */
626 _MTRR_setCx86(CX86_CCR6,ccr[6]);
630 /* If we changed CCR1 in memory, change it in the processor, too. */
632 _MTRR_setCx86(CX86_CCR1,ccr[1]);
634 /* Enable ARR usage by the processor */
635 if (!(ccr[5] & 0x20)) {
638 _MTRR_setCx86(CX86_CCR5,ccr[5]);
641 /* We are finished updating */
645 /****************************************************************************
647 Initialise the MTRR module, by detecting the processor type and determining
648 if the processor supports the MTRR functionality.
649 ****************************************************************************/
653 ulong eax,edx,lbase,lsize;
655 /* Check that we have a compatible CPU */
658 if (!_MTRR_isRing0())
660 cpu = CPU_getProcessorType();
661 cpuFamily = cpu & CPU_familyMask;
662 cpuType = cpu & CPU_mask;
663 cpuStepping = (cpu & CPU_steppingMask) >> CPU_steppingShift;
666 /* Intel Pentium Pro and later support the MTRR registers */
667 if (cpuType < CPU_PentiumPro)
669 _MTRR_readMSR(INTEL_cap_MSR,&eax,&edx);
670 numMTRR = eax & 0xFF;
671 getMTRR = INTEL_getMTRR;
672 setMTRR = INTEL_setMTRR;
673 getFreeRegion = GENERIC_getFreeRegion;
674 INTEL_disableBankedWriteCombine();
677 /* AMD K6-2 and later support the MTRR registers */
678 if ((cpuType < CPU_AMDK6_2) || (cpuType == CPU_AMDK6_2 && cpuStepping < 8))
680 if (cpuType < CPU_AMDAthlon) {
681 numMTRR = 2; /* AMD CPU's have 2 MTRR's */
682 getMTRR = AMD_getMTRR;
683 setMTRR = AMD_setMTRR;
684 getFreeRegion = AMDK6_getFreeRegion;
686 /* For some reason some IBM systems with K6-2 processors
687 * have write combined enabled for the system BIOS
688 * region from 0xE0000 to 0xFFFFFF. We need *both* MTRR's
689 * for our own graphics drivers, so if we detect any
690 * regions below the 1Meg boundary, we remove them
691 * so we can use this MTRR register ourselves.
693 for (i = 0; i < numMTRR; i++) {
694 getMTRR(i,&lbase,&lsize,<ype);
695 if (lbase < 0x100000)
700 /* AMD Athlon uses P6 style MTRR's */
701 _MTRR_readMSR(INTEL_cap_MSR,&eax,&edx);
702 numMTRR = eax & 0xFF;
703 getMTRR = INTEL_getMTRR;
704 setMTRR = INTEL_setMTRR;
705 getFreeRegion = GENERIC_getFreeRegion;
706 INTEL_disableBankedWriteCombine();
710 /* Cyrix 6x86 and later support the MTRR registers */
711 if (cpuType < CPU_Cyrix6x86 || cpuType >= CPU_CyrixMediaGX)
713 numMTRR = 8; /* Cyrix CPU's have 8 ARR's */
714 getMTRR = CYRIX_getMTRR;
715 setMTRR = CYRIX_setMTRR;
716 getFreeRegion = CYRIX_getFreeRegion;
725 /****************************************************************************
727 base - The starting physical base address of the region
728 size - The size in bytes of the region
729 type - Type to place into the MTRR register
732 Error code describing the result.
735 Function to enable write combining for the specified region of memory.
736 ****************************************************************************/
737 int MTRR_enableWriteCombine(
744 ulong lbase,lsize,last;
746 /* Check that we have a CPU that supports MTRR's and type is valid */
748 if (!_MTRR_isRing0())
749 return PM_MTRR_ERR_NO_OS_SUPPORT;
750 return PM_MTRR_NOT_SUPPORTED;
752 if (type >= PM_MTRR_MAX)
753 return PM_MTRR_ERR_PARAMS;
755 /* If the type is WC, check that this processor supports it */
756 if (!MTRR_haveWriteCombine())
757 return PM_MTRR_ERR_NOWRCOMB;
759 /* Adjust the boundaries depending on the CPU type */
762 if (cpuType < CPU_AMDAthlon) {
763 /* Apply the K6 block alignment and size rules. In order:
764 * o Uncached or gathering only
765 * o 128K or bigger block
767 * o base suitably aligned to the power
769 if (type > PM_MTRR_WRCOMB && (size < (1 << 17) || (size & ~(size-1))-size || (base & (size-1))))
770 return PM_MTRR_ERR_NOT_ALIGNED;
773 /* Fall through for AMD Athlon which uses P6 style MTRR's */
776 if ((base & 0xFFF) || (size & 0xFFF)) {
777 /* Base and size must be multiples of 4Kb */
778 return PM_MTRR_ERR_NOT_4KB_ALIGNED;
780 if (base < 0x100000) {
781 /* Base must be >= 1Mb */
782 return PM_MTRR_ERR_BELOW_1MB;
785 /* Check upper bits of base and last are equal and lower bits
786 * are 0 for base and 1 for last
788 last = base + size - 1;
789 for (lbase = base; !(lbase & 1) && (last & 1); lbase = lbase >> 1, last = last >> 1)
792 /* Base is not aligned on the correct boundary */
793 return PM_MTRR_ERR_NOT_ALIGNED;
797 return PM_MTRR_NOT_SUPPORTED;
800 /* Search for existing MTRR */
801 for (i = 0; i < numMTRR; ++i) {
802 getMTRR(i,&lbase,&lsize,<ype);
803 if (lbase == 0 && lsize == 0)
805 if (base > lbase + (lsize-1))
807 if ((base < lbase) && (base+size-1 < lbase))
810 /* Check that we don't overlap an existing region */
811 if (type != PM_MTRR_UNCACHABLE) {
812 if ((base < lbase) || (base+size-1 > lbase+lsize-1))
813 return PM_MTRR_ERR_OVERLAP;
815 else if (base == lbase && size == lsize) {
816 /* The region already exists so leave it alone */
817 return PM_MTRR_ERR_OK;
820 /* New region is enclosed by an existing region, so only allow
821 * a new type to be created if we are setting a region to be
822 * uncacheable (such as MMIO registers within a framebuffer).
824 if (ltype != (int)type) {
825 if (type == PM_MTRR_UNCACHABLE)
827 return PM_MTRR_ERR_TYPE_MISMATCH;
829 return PM_MTRR_ERR_OK;
832 /* Search for an empty MTRR */
833 if ((i = getFreeRegion(base,size)) < 0)
834 return PM_MTRR_ERR_NONE_FREE;
835 setMTRR(i,base,size,type);
836 return PM_MTRR_ERR_OK;
839 /****************************************************************************
841 callback - Function to callback with write combine information
844 Function to enumerate all write combine regions currently enabled for the
846 ****************************************************************************/
847 int PMAPI PM_enumWriteCombine(
848 PM_enumWriteCombine_t callback)
853 /* Check that we have a CPU that supports MTRR's and type is valid */
855 if (!_MTRR_isRing0())
856 return PM_MTRR_ERR_NO_OS_SUPPORT;
857 return PM_MTRR_NOT_SUPPORTED;
860 /* Enumerate all existing MTRR's */
861 for (i = 0; i < numMTRR; ++i) {
862 getMTRR(i,&lbase,&lsize,<ype);
863 callback(lbase,lsize,ltype);
865 return PM_MTRR_ERR_OK;