1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) IBM Corporation, 2014, 2017
4  * Anton Blanchard, Rashmica Gupta.
5  */
6 
7 #define pr_fmt(fmt) "memtrace: " fmt
8 
9 #include <linux/bitops.h>
10 #include <linux/string.h>
11 #include <linux/memblock.h>
12 #include <linux/init.h>
13 #include <linux/moduleparam.h>
14 #include <linux/fs.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/numa.h>
20 #include <asm/machdep.h>
21 #include <asm/cacheflush.h>
22 
23 /* This enables us to keep track of the memory removed from each node. */
24 struct memtrace_entry {
25 	void *mem;
26 	u64 start;
27 	u64 size;
28 	u32 nid;
29 	struct dentry *dir;
30 	char name[16];
31 };
32 
33 static DEFINE_MUTEX(memtrace_mutex);
34 static u64 memtrace_size;
35 
36 static struct memtrace_entry *memtrace_array;
37 static unsigned int memtrace_array_nr;
38 
39 
40 static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
41 			     size_t count, loff_t *ppos)
42 {
43 	struct memtrace_entry *ent = filp->private_data;
44 
45 	return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
46 }
47 
48 static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
49 {
50 	struct memtrace_entry *ent = filp->private_data;
51 
52 	if (ent->size < vma->vm_end - vma->vm_start)
53 		return -EINVAL;
54 
55 	if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
56 		return -EINVAL;
57 
58 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
59 	return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
60 			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
61 }
62 
63 static const struct file_operations memtrace_fops = {
64 	.llseek = default_llseek,
65 	.read	= memtrace_read,
66 	.open	= simple_open,
67 	.mmap   = memtrace_mmap,
68 };
69 
70 #define FLUSH_CHUNK_SIZE SZ_1G
71 /**
72  * flush_dcache_range_chunked(): Write any modified data cache blocks out to
73  * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
74  * Does not invalidate the corresponding instruction cache blocks.
75  *
76  * @start: the start address
77  * @stop: the stop address (exclusive)
78  * @chunk: the max size of the chunks
79  */
80 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
81 				       unsigned long chunk)
82 {
83 	unsigned long i;
84 
85 	for (i = start; i < stop; i += chunk) {
86 		flush_dcache_range(i, min(stop, i + chunk));
87 		cond_resched();
88 	}
89 }
90 
91 static void memtrace_clear_range(unsigned long start_pfn,
92 				 unsigned long nr_pages)
93 {
94 	unsigned long pfn;
95 
96 	/* As HIGHMEM does not apply, use clear_page() directly. */
97 	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
98 		if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
99 			cond_resched();
100 		clear_page(__va(PFN_PHYS(pfn)));
101 	}
102 	/*
103 	 * Before we go ahead and use this range as cache inhibited range
104 	 * flush the cache.
105 	 */
106 	flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
107 				   (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
108 				   FLUSH_CHUNK_SIZE);
109 }
110 
111 static u64 memtrace_alloc_node(u32 nid, u64 size)
112 {
113 	const unsigned long nr_pages = PHYS_PFN(size);
114 	unsigned long pfn, start_pfn;
115 	struct page *page;
116 
117 	/*
118 	 * Trace memory needs to be aligned to the size, which is guaranteed
119 	 * by alloc_contig_pages().
120 	 */
121 	page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
122 				  __GFP_NOWARN, nid, NULL);
123 	if (!page)
124 		return 0;
125 	start_pfn = page_to_pfn(page);
126 
127 	/*
128 	 * Clear the range while we still have a linear mapping.
129 	 *
130 	 * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
131 	 */
132 	memtrace_clear_range(start_pfn, nr_pages);
133 
134 	/*
135 	 * Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
136 	 * dumping, ...) should be touching these pages.
137 	 */
138 	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
139 		__SetPageOffline(pfn_to_page(pfn));
140 
141 	arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
142 
143 	return PFN_PHYS(start_pfn);
144 }
145 
146 static int memtrace_init_regions_runtime(u64 size)
147 {
148 	u32 nid;
149 	u64 m;
150 
151 	memtrace_array = kcalloc(num_online_nodes(),
152 				sizeof(struct memtrace_entry), GFP_KERNEL);
153 	if (!memtrace_array) {
154 		pr_err("Failed to allocate memtrace_array\n");
155 		return -EINVAL;
156 	}
157 
158 	for_each_online_node(nid) {
159 		m = memtrace_alloc_node(nid, size);
160 
161 		/*
162 		 * A node might not have any local memory, so warn but
163 		 * continue on.
164 		 */
165 		if (!m) {
166 			pr_err("Failed to allocate trace memory on node %d\n", nid);
167 			continue;
168 		}
169 
170 		pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
171 
172 		memtrace_array[memtrace_array_nr].start = m;
173 		memtrace_array[memtrace_array_nr].size = size;
174 		memtrace_array[memtrace_array_nr].nid = nid;
175 		memtrace_array_nr++;
176 	}
177 
178 	return 0;
179 }
180 
181 static struct dentry *memtrace_debugfs_dir;
182 
183 static int memtrace_init_debugfs(void)
184 {
185 	int ret = 0;
186 	int i;
187 
188 	for (i = 0; i < memtrace_array_nr; i++) {
189 		struct dentry *dir;
190 		struct memtrace_entry *ent = &memtrace_array[i];
191 
192 		ent->mem = ioremap(ent->start, ent->size);
193 		/* Warn but continue on */
194 		if (!ent->mem) {
195 			pr_err("Failed to map trace memory at 0x%llx\n",
196 				 ent->start);
197 			ret = -1;
198 			continue;
199 		}
200 
201 		snprintf(ent->name, 16, "%08x", ent->nid);
202 		dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
203 
204 		ent->dir = dir;
205 		debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
206 		debugfs_create_x64("start", 0400, dir, &ent->start);
207 		debugfs_create_x64("size", 0400, dir, &ent->size);
208 	}
209 
210 	return ret;
211 }
212 
213 static int memtrace_free(int nid, u64 start, u64 size)
214 {
215 	struct mhp_params params = { .pgprot = PAGE_KERNEL };
216 	const unsigned long nr_pages = PHYS_PFN(size);
217 	const unsigned long start_pfn = PHYS_PFN(start);
218 	unsigned long pfn;
219 	int ret;
220 
221 	ret = arch_create_linear_mapping(nid, start, size, &params);
222 	if (ret)
223 		return ret;
224 
225 	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
226 		__ClearPageOffline(pfn_to_page(pfn));
227 
228 	free_contig_range(start_pfn, nr_pages);
229 	return 0;
230 }
231 
232 /*
233  * Iterate through the chunks of memory we allocated and attempt to expose
234  * them back to the kernel.
235  */
236 static int memtrace_free_regions(void)
237 {
238 	int i, ret = 0;
239 	struct memtrace_entry *ent;
240 
241 	for (i = memtrace_array_nr - 1; i >= 0; i--) {
242 		ent = &memtrace_array[i];
243 
244 		/* We have freed this chunk previously */
245 		if (ent->nid == NUMA_NO_NODE)
246 			continue;
247 
248 		/* Remove from io mappings */
249 		if (ent->mem) {
250 			iounmap(ent->mem);
251 			ent->mem = 0;
252 		}
253 
254 		if (memtrace_free(ent->nid, ent->start, ent->size)) {
255 			pr_err("Failed to free trace memory on node %d\n",
256 				ent->nid);
257 			ret += 1;
258 			continue;
259 		}
260 
261 		/*
262 		 * Memory was freed successfully so clean up references to it
263 		 * so on reentry we can tell that this chunk was freed.
264 		 */
265 		debugfs_remove_recursive(ent->dir);
266 		pr_info("Freed trace memory back on node %d\n", ent->nid);
267 		ent->size = ent->start = ent->nid = NUMA_NO_NODE;
268 	}
269 	if (ret)
270 		return ret;
271 
272 	/* If all chunks of memory were freed successfully, reset globals */
273 	kfree(memtrace_array);
274 	memtrace_array = NULL;
275 	memtrace_size = 0;
276 	memtrace_array_nr = 0;
277 	return 0;
278 }
279 
280 static int memtrace_enable_set(void *data, u64 val)
281 {
282 	int rc = -EAGAIN;
283 	u64 bytes;
284 
285 	/*
286 	 * Don't attempt to do anything if size isn't aligned to a memory
287 	 * block or equal to zero.
288 	 */
289 	bytes = memory_block_size_bytes();
290 	if (val & (bytes - 1)) {
291 		pr_err("Value must be aligned with 0x%llx\n", bytes);
292 		return -EINVAL;
293 	}
294 
295 	mutex_lock(&memtrace_mutex);
296 
297 	/* Free all previously allocated memory. */
298 	if (memtrace_size && memtrace_free_regions())
299 		goto out_unlock;
300 
301 	if (!val) {
302 		rc = 0;
303 		goto out_unlock;
304 	}
305 
306 	/* Allocate memory. */
307 	if (memtrace_init_regions_runtime(val))
308 		goto out_unlock;
309 
310 	if (memtrace_init_debugfs())
311 		goto out_unlock;
312 
313 	memtrace_size = val;
314 	rc = 0;
315 out_unlock:
316 	mutex_unlock(&memtrace_mutex);
317 	return rc;
318 }
319 
320 static int memtrace_enable_get(void *data, u64 *val)
321 {
322 	*val = memtrace_size;
323 	return 0;
324 }
325 
326 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
327 					memtrace_enable_set, "0x%016llx\n");
328 
329 static int memtrace_init(void)
330 {
331 	memtrace_debugfs_dir = debugfs_create_dir("memtrace",
332 						  arch_debugfs_dir);
333 
334 	debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
335 			    NULL, &memtrace_init_fops);
336 
337 	return 0;
338 }
339 machine_device_initcall(powernv, memtrace_init);
340