1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) IBM Corporation, 2014, 2017
4 * Anton Blanchard, Rashmica Gupta.
5 */
6
7 #define pr_fmt(fmt) "memtrace: " fmt
8
9 #include <linux/bitops.h>
10 #include <linux/string.h>
11 #include <linux/memblock.h>
12 #include <linux/init.h>
13 #include <linux/moduleparam.h>
14 #include <linux/fs.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/numa.h>
20 #include <asm/machdep.h>
21 #include <asm/cacheflush.h>
22
23 /* This enables us to keep track of the memory removed from each node. */
24 struct memtrace_entry {
25 void *mem;
26 u64 start;
27 u64 size;
28 u32 nid;
29 struct dentry *dir;
30 char name[16];
31 };
32
33 static DEFINE_MUTEX(memtrace_mutex);
34 static u64 memtrace_size;
35
36 static struct memtrace_entry *memtrace_array;
37 static unsigned int memtrace_array_nr;
38
39
memtrace_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)40 static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
41 size_t count, loff_t *ppos)
42 {
43 struct memtrace_entry *ent = filp->private_data;
44
45 return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
46 }
47
memtrace_mmap(struct file * filp,struct vm_area_struct * vma)48 static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
49 {
50 struct memtrace_entry *ent = filp->private_data;
51 unsigned long ent_nrpages = ent->size >> PAGE_SHIFT;
52 unsigned long vma_nrpages = vma_pages(vma);
53
54 /* The requested page offset should be within object's page count */
55 if (vma->vm_pgoff >= ent_nrpages)
56 return -EINVAL;
57
58 /* The requested mapping range should remain within the bounds */
59 if (vma_nrpages > ent_nrpages - vma->vm_pgoff)
60 return -EINVAL;
61
62 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
63 return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
64 vma->vm_end - vma->vm_start, vma->vm_page_prot);
65 }
66
67 static const struct file_operations memtrace_fops = {
68 .llseek = default_llseek,
69 .read = memtrace_read,
70 .open = simple_open,
71 .mmap = memtrace_mmap,
72 };
73
74 #define FLUSH_CHUNK_SIZE SZ_1G
75 /**
76 * flush_dcache_range_chunked(): Write any modified data cache blocks out to
77 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
78 * Does not invalidate the corresponding instruction cache blocks.
79 *
80 * @start: the start address
81 * @stop: the stop address (exclusive)
82 * @chunk: the max size of the chunks
83 */
flush_dcache_range_chunked(unsigned long start,unsigned long stop,unsigned long chunk)84 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
85 unsigned long chunk)
86 {
87 unsigned long i;
88
89 for (i = start; i < stop; i += chunk) {
90 flush_dcache_range(i, min(stop, i + chunk));
91 cond_resched();
92 }
93 }
94
memtrace_clear_range(unsigned long start_pfn,unsigned long nr_pages)95 static void memtrace_clear_range(unsigned long start_pfn,
96 unsigned long nr_pages)
97 {
98 unsigned long pfn;
99
100 /* As HIGHMEM does not apply, use clear_page() directly. */
101 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
102 if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
103 cond_resched();
104 clear_page(__va(PFN_PHYS(pfn)));
105 }
106 /*
107 * Before we go ahead and use this range as cache inhibited range
108 * flush the cache.
109 */
110 flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
111 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
112 FLUSH_CHUNK_SIZE);
113 }
114
memtrace_alloc_node(u32 nid,u64 size)115 static u64 memtrace_alloc_node(u32 nid, u64 size)
116 {
117 const unsigned long nr_pages = PHYS_PFN(size);
118 unsigned long pfn, start_pfn;
119 struct page *page;
120
121 /*
122 * Trace memory needs to be aligned to the size, which is guaranteed
123 * by alloc_contig_pages().
124 */
125 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
126 __GFP_NOWARN, nid, NULL);
127 if (!page)
128 return 0;
129 start_pfn = page_to_pfn(page);
130
131 /*
132 * Clear the range while we still have a linear mapping.
133 *
134 * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
135 */
136 memtrace_clear_range(start_pfn, nr_pages);
137
138 /*
139 * Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
140 * dumping, ...) should be touching these pages.
141 */
142 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
143 __SetPageOffline(pfn_to_page(pfn));
144
145 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
146
147 return PFN_PHYS(start_pfn);
148 }
149
memtrace_init_regions_runtime(u64 size)150 static int memtrace_init_regions_runtime(u64 size)
151 {
152 u32 nid;
153 u64 m;
154
155 memtrace_array = kcalloc(num_online_nodes(),
156 sizeof(struct memtrace_entry), GFP_KERNEL);
157 if (!memtrace_array) {
158 pr_err("Failed to allocate memtrace_array\n");
159 return -EINVAL;
160 }
161
162 for_each_online_node(nid) {
163 m = memtrace_alloc_node(nid, size);
164
165 /*
166 * A node might not have any local memory, so warn but
167 * continue on.
168 */
169 if (!m) {
170 pr_err("Failed to allocate trace memory on node %d\n", nid);
171 continue;
172 }
173
174 pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
175
176 memtrace_array[memtrace_array_nr].start = m;
177 memtrace_array[memtrace_array_nr].size = size;
178 memtrace_array[memtrace_array_nr].nid = nid;
179 memtrace_array_nr++;
180 }
181
182 return 0;
183 }
184
185 static struct dentry *memtrace_debugfs_dir;
186
memtrace_init_debugfs(void)187 static int memtrace_init_debugfs(void)
188 {
189 int ret = 0;
190 int i;
191
192 for (i = 0; i < memtrace_array_nr; i++) {
193 struct dentry *dir;
194 struct memtrace_entry *ent = &memtrace_array[i];
195
196 ent->mem = ioremap(ent->start, ent->size);
197 /* Warn but continue on */
198 if (!ent->mem) {
199 pr_err("Failed to map trace memory at 0x%llx\n",
200 ent->start);
201 ret = -1;
202 continue;
203 }
204
205 snprintf(ent->name, 16, "%08x", ent->nid);
206 dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
207
208 ent->dir = dir;
209 debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
210 debugfs_create_x64("start", 0400, dir, &ent->start);
211 debugfs_create_x64("size", 0400, dir, &ent->size);
212 }
213
214 return ret;
215 }
216
memtrace_free(int nid,u64 start,u64 size)217 static int memtrace_free(int nid, u64 start, u64 size)
218 {
219 struct mhp_params params = { .pgprot = PAGE_KERNEL };
220 const unsigned long nr_pages = PHYS_PFN(size);
221 const unsigned long start_pfn = PHYS_PFN(start);
222 unsigned long pfn;
223 int ret;
224
225 ret = arch_create_linear_mapping(nid, start, size, ¶ms);
226 if (ret)
227 return ret;
228
229 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
230 __ClearPageOffline(pfn_to_page(pfn));
231
232 free_contig_range(start_pfn, nr_pages);
233 return 0;
234 }
235
236 /*
237 * Iterate through the chunks of memory we allocated and attempt to expose
238 * them back to the kernel.
239 */
memtrace_free_regions(void)240 static int memtrace_free_regions(void)
241 {
242 int i, ret = 0;
243 struct memtrace_entry *ent;
244
245 for (i = memtrace_array_nr - 1; i >= 0; i--) {
246 ent = &memtrace_array[i];
247
248 /* We have freed this chunk previously */
249 if (ent->nid == NUMA_NO_NODE)
250 continue;
251
252 /* Remove from io mappings */
253 if (ent->mem) {
254 iounmap(ent->mem);
255 ent->mem = 0;
256 }
257
258 if (memtrace_free(ent->nid, ent->start, ent->size)) {
259 pr_err("Failed to free trace memory on node %d\n",
260 ent->nid);
261 ret += 1;
262 continue;
263 }
264
265 /*
266 * Memory was freed successfully so clean up references to it
267 * so on reentry we can tell that this chunk was freed.
268 */
269 debugfs_remove_recursive(ent->dir);
270 pr_info("Freed trace memory back on node %d\n", ent->nid);
271 ent->size = ent->start = ent->nid = NUMA_NO_NODE;
272 }
273 if (ret)
274 return ret;
275
276 /* If all chunks of memory were freed successfully, reset globals */
277 kfree(memtrace_array);
278 memtrace_array = NULL;
279 memtrace_size = 0;
280 memtrace_array_nr = 0;
281 return 0;
282 }
283
memtrace_enable_set(void * data,u64 val)284 static int memtrace_enable_set(void *data, u64 val)
285 {
286 int rc = -EAGAIN;
287 u64 bytes;
288
289 /*
290 * Don't attempt to do anything if size isn't aligned to a memory
291 * block or equal to zero.
292 */
293 bytes = memory_block_size_bytes();
294 if (val & (bytes - 1)) {
295 pr_err("Value must be aligned with 0x%llx\n", bytes);
296 return -EINVAL;
297 }
298
299 mutex_lock(&memtrace_mutex);
300
301 /* Free all previously allocated memory. */
302 if (memtrace_size && memtrace_free_regions())
303 goto out_unlock;
304
305 if (!val) {
306 rc = 0;
307 goto out_unlock;
308 }
309
310 /* Allocate memory. */
311 if (memtrace_init_regions_runtime(val))
312 goto out_unlock;
313
314 if (memtrace_init_debugfs())
315 goto out_unlock;
316
317 memtrace_size = val;
318 rc = 0;
319 out_unlock:
320 mutex_unlock(&memtrace_mutex);
321 return rc;
322 }
323
memtrace_enable_get(void * data,u64 * val)324 static int memtrace_enable_get(void *data, u64 *val)
325 {
326 *val = memtrace_size;
327 return 0;
328 }
329
330 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
331 memtrace_enable_set, "0x%016llx\n");
332
memtrace_init(void)333 static int memtrace_init(void)
334 {
335 memtrace_debugfs_dir = debugfs_create_dir("memtrace",
336 arch_debugfs_dir);
337
338 debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
339 NULL, &memtrace_init_fops);
340
341 return 0;
342 }
343 machine_device_initcall(powernv, memtrace_init);
344