1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2014 Google, Inc
4 *
5 * Memory Type Range Regsters - these are used to tell the CPU whether
6 * memory is cacheable and if so the cache write mode to use.
7 *
8 * These can speed up booting. See the mtrr command.
9 *
10 * Reference: Intel Architecture Software Developer's Manual, Volume 3:
11 * System Programming
12 */
13
14 /*
15 * Note that any console output (e.g. debug()) in this file will likely fail
16 * since the MTRR registers are sometimes in flux.
17 */
18
19 #include <common.h>
20 #include <asm/io.h>
21 #include <asm/msr.h>
22 #include <asm/mtrr.h>
23
24 DECLARE_GLOBAL_DATA_PTR;
25
26 /* Prepare to adjust MTRRs */
mtrr_open(struct mtrr_state * state,bool do_caches)27 void mtrr_open(struct mtrr_state *state, bool do_caches)
28 {
29 if (!gd->arch.has_mtrr)
30 return;
31
32 if (do_caches) {
33 state->enable_cache = dcache_status();
34
35 if (state->enable_cache)
36 disable_caches();
37 }
38 state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
39 wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
40 }
41
42 /* Clean up after adjusting MTRRs, and enable them */
mtrr_close(struct mtrr_state * state,bool do_caches)43 void mtrr_close(struct mtrr_state *state, bool do_caches)
44 {
45 if (!gd->arch.has_mtrr)
46 return;
47
48 wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
49 if (do_caches && state->enable_cache)
50 enable_caches();
51 }
52
mtrr_commit(bool do_caches)53 int mtrr_commit(bool do_caches)
54 {
55 struct mtrr_request *req = gd->arch.mtrr_req;
56 struct mtrr_state state;
57 uint64_t mask;
58 int i;
59
60 debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
61 gd->arch.mtrr_req_count);
62 if (!gd->arch.has_mtrr)
63 return -ENOSYS;
64
65 debug("open\n");
66 mtrr_open(&state, do_caches);
67 debug("open done\n");
68 for (i = 0; i < gd->arch.mtrr_req_count; i++, req++) {
69 mask = ~(req->size - 1);
70 mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
71 wrmsrl(MTRR_PHYS_BASE_MSR(i), req->start | req->type);
72 wrmsrl(MTRR_PHYS_MASK_MSR(i), mask | MTRR_PHYS_MASK_VALID);
73 }
74
75 /* Clear the ones that are unused */
76 debug("clear\n");
77 for (; i < MTRR_COUNT; i++)
78 wrmsrl(MTRR_PHYS_MASK_MSR(i), 0);
79 debug("close\n");
80 mtrr_close(&state, do_caches);
81 debug("mtrr done\n");
82
83 return 0;
84 }
85
mtrr_add_request(int type,uint64_t start,uint64_t size)86 int mtrr_add_request(int type, uint64_t start, uint64_t size)
87 {
88 struct mtrr_request *req;
89 uint64_t mask;
90
91 debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
92 if (!gd->arch.has_mtrr)
93 return -ENOSYS;
94
95 if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
96 return -ENOSPC;
97 req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
98 req->type = type;
99 req->start = start;
100 req->size = size;
101 debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
102 req->type, req->start, req->size);
103 mask = ~(req->size - 1);
104 mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
105 mask |= MTRR_PHYS_MASK_VALID;
106 debug(" %016llx %016llx\n", req->start | req->type, mask);
107
108 return 0;
109 }
110