Simon Glass | 70a09c6 | 2014-11-12 22:42:10 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014 Google, Inc |
| 3 | * |
| 4 | * From Coreboot file of the same name |
| 5 | * |
| 6 | * SPDX-License-Identifier: GPL-2.0+ |
| 7 | */ |
| 8 | |
| 9 | #ifndef _ASM_MTRR_H |
| 10 | #define _ASM_MTRR_H |
| 11 | |
| 12 | /* These are the region types */ |
| 13 | #define MTRR_TYPE_UNCACHEABLE 0 |
| 14 | #define MTRR_TYPE_WRCOMB 1 |
| 15 | /*#define MTRR_TYPE_ 2*/ |
| 16 | /*#define MTRR_TYPE_ 3*/ |
| 17 | #define MTRR_TYPE_WRTHROUGH 4 |
| 18 | #define MTRR_TYPE_WRPROT 5 |
| 19 | #define MTRR_TYPE_WRBACK 6 |
| 20 | #define MTRR_NUM_TYPES 7 |
| 21 | |
| 22 | #define MTRRcap_MSR 0x0fe |
| 23 | #define MTRRdefType_MSR 0x2ff |
| 24 | |
| 25 | #define MTRRdefTypeEn (1 << 11) |
| 26 | #define MTRRdefTypeFixEn (1 << 10) |
| 27 | |
| 28 | #define SMRRphysBase_MSR 0x1f2 |
| 29 | #define SMRRphysMask_MSR 0x1f3 |
| 30 | |
| 31 | #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) |
| 32 | #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) |
| 33 | |
| 34 | #define MTRRphysMaskValid (1 << 11) |
| 35 | |
| 36 | #define NUM_FIXED_RANGES 88 |
| 37 | #define RANGES_PER_FIXED_MTRR 8 |
| 38 | #define MTRRfix64K_00000_MSR 0x250 |
| 39 | #define MTRRfix16K_80000_MSR 0x258 |
| 40 | #define MTRRfix16K_A0000_MSR 0x259 |
| 41 | #define MTRRfix4K_C0000_MSR 0x268 |
| 42 | #define MTRRfix4K_C8000_MSR 0x269 |
| 43 | #define MTRRfix4K_D0000_MSR 0x26a |
| 44 | #define MTRRfix4K_D8000_MSR 0x26b |
| 45 | #define MTRRfix4K_E0000_MSR 0x26c |
| 46 | #define MTRRfix4K_E8000_MSR 0x26d |
| 47 | #define MTRRfix4K_F0000_MSR 0x26e |
| 48 | #define MTRRfix4K_F8000_MSR 0x26f |
| 49 | |
| 50 | #if !defined(__ASSEMBLER__) |
| 51 | |
| 52 | /* |
| 53 | * The MTRR code has some side effects that the callers should be aware for. |
| 54 | * 1. The call sequence matters. x86_setup_mtrrs() calls |
| 55 | * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent |
| 56 | * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers |
| 57 | * want to call the components of x86_setup_mtrrs() because of other |
| 58 | * rquirements the ordering should still preserved. |
| 59 | * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because |
| 60 | * of the nature of the global MTRR enable flag. Therefore, all direct |
| 61 | * or indirect callers of enable_fixed_mtrr() should ensure that the |
| 62 | * variable MTRR MSRs do not contain bad ranges. |
| 63 | * 3. If CONFIG_CACHE_ROM is selected an MTRR is allocated for enabling |
| 64 | * the caching of the ROM. However, it is set to uncacheable (UC). It |
| 65 | * is the responsiblity of the caller to enable it by calling |
| 66 | * x86_mtrr_enable_rom_caching(). |
| 67 | */ |
| 68 | void x86_setup_mtrrs(void); |
| 69 | /* |
| 70 | * x86_setup_var_mtrrs() parameters: |
| 71 | * address_bits - number of physical address bits supported by cpu |
| 72 | * above4gb - 2 means dynamically detect number of variable MTRRs available. |
| 73 | * non-zero means handle memory ranges above 4GiB. |
| 74 | * 0 means ignore memory ranges above 4GiB |
| 75 | */ |
| 76 | void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb); |
| 77 | void enable_fixed_mtrr(void); |
| 78 | void x86_setup_fixed_mtrrs(void); |
| 79 | /* Set up fixed MTRRs but do not enable them. */ |
| 80 | void x86_setup_fixed_mtrrs_no_enable(void); |
| 81 | int x86_mtrr_check(void); |
| 82 | /* ROM caching can be used after variable MTRRs are set up. Beware that |
| 83 | * enabling CONFIG_CACHE_ROM will eat through quite a few MTRRs based on |
| 84 | * one's IO hole size and WRCOMB resources. Be sure to check the console |
| 85 | * log when enabling CONFIG_CACHE_ROM or adding WRCOMB resources. Beware that |
| 86 | * on CPUs with core-scoped MTRR registers such as hyperthreaded CPUs the |
| 87 | * rom caching will be disabled if all threads run the MTRR code. Therefore, |
| 88 | * one needs to call x86_mtrr_enable_rom_caching() after all threads of the |
| 89 | * same core have run the MTRR code. */ |
| 90 | #if CONFIG_CACHE_ROM |
| 91 | void x86_mtrr_enable_rom_caching(void); |
| 92 | void x86_mtrr_disable_rom_caching(void); |
| 93 | /* Return the variable range MTRR index of the ROM cache. */ |
| 94 | long x86_mtrr_rom_cache_var_index(void); |
| 95 | #else |
| 96 | static inline void x86_mtrr_enable_rom_caching(void) {} |
| 97 | static inline void x86_mtrr_disable_rom_caching(void) {} |
| 98 | static inline long x86_mtrr_rom_cache_var_index(void) { return -1; } |
| 99 | #endif /* CONFIG_CACHE_ROM */ |
| 100 | |
| 101 | #endif |
| 102 | |
| 103 | #if !defined(CONFIG_RAMTOP) |
| 104 | # error "CONFIG_RAMTOP not defined" |
| 105 | #endif |
| 106 | |
| 107 | #if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE - 1)) != 0) |
| 108 | # error "CONFIG_XIP_ROM_SIZE is not a power of 2" |
| 109 | #endif |
| 110 | |
| 111 | #if ((CONFIG_CACHE_ROM_SIZE & (CONFIG_CACHE_ROM_SIZE - 1)) != 0) |
| 112 | # error "CONFIG_CACHE_ROM_SIZE is not a power of 2" |
| 113 | #endif |
| 114 | |
| 115 | #define CACHE_ROM_BASE (((1 << 20) - (CONFIG_CACHE_ROM_SIZE >> 12)) << 12) |
| 116 | |
| 117 | #if (CONFIG_RAMTOP & (CONFIG_RAMTOP - 1)) != 0 |
| 118 | # error "CONFIG_RAMTOP must be a power of 2" |
| 119 | #endif |
| 120 | |
| 121 | #endif |