12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
29d5171a8SRashmica Gupta /*
39d5171a8SRashmica Gupta  * Copyright (C) IBM Corporation, 2014, 2017
49d5171a8SRashmica Gupta  * Anton Blanchard, Rashmica Gupta.
59d5171a8SRashmica Gupta  */
69d5171a8SRashmica Gupta 
79d5171a8SRashmica Gupta #define pr_fmt(fmt) "memtrace: " fmt
89d5171a8SRashmica Gupta 
99d5171a8SRashmica Gupta #include <linux/bitops.h>
109d5171a8SRashmica Gupta #include <linux/string.h>
119d5171a8SRashmica Gupta #include <linux/memblock.h>
129d5171a8SRashmica Gupta #include <linux/init.h>
139d5171a8SRashmica Gupta #include <linux/moduleparam.h>
149d5171a8SRashmica Gupta #include <linux/fs.h>
159d5171a8SRashmica Gupta #include <linux/debugfs.h>
169d5171a8SRashmica Gupta #include <linux/slab.h>
179d5171a8SRashmica Gupta #include <linux/memory.h>
189d5171a8SRashmica Gupta #include <linux/memory_hotplug.h>
1998fa15f3SAnshuman Khandual #include <linux/numa.h>
209d5171a8SRashmica Gupta #include <asm/machdep.h>
219d5171a8SRashmica Gupta #include <asm/debugfs.h>
229d5171a8SRashmica Gupta 
239d5171a8SRashmica Gupta /* This enables us to keep track of the memory removed from each node. */
249d5171a8SRashmica Gupta struct memtrace_entry {
259d5171a8SRashmica Gupta 	void *mem;
269d5171a8SRashmica Gupta 	u64 start;
279d5171a8SRashmica Gupta 	u64 size;
289d5171a8SRashmica Gupta 	u32 nid;
299d5171a8SRashmica Gupta 	struct dentry *dir;
309d5171a8SRashmica Gupta 	char name[16];
319d5171a8SRashmica Gupta };
329d5171a8SRashmica Gupta 
339d5171a8SRashmica Gupta static u64 memtrace_size;
349d5171a8SRashmica Gupta 
359d5171a8SRashmica Gupta static struct memtrace_entry *memtrace_array;
369d5171a8SRashmica Gupta static unsigned int memtrace_array_nr;
379d5171a8SRashmica Gupta 
389d5171a8SRashmica Gupta 
399d5171a8SRashmica Gupta static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
409d5171a8SRashmica Gupta 			     size_t count, loff_t *ppos)
419d5171a8SRashmica Gupta {
429d5171a8SRashmica Gupta 	struct memtrace_entry *ent = filp->private_data;
439d5171a8SRashmica Gupta 
449d5171a8SRashmica Gupta 	return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
459d5171a8SRashmica Gupta }
469d5171a8SRashmica Gupta 
479d5171a8SRashmica Gupta static const struct file_operations memtrace_fops = {
489d5171a8SRashmica Gupta 	.llseek = default_llseek,
499d5171a8SRashmica Gupta 	.read	= memtrace_read,
509d5171a8SRashmica Gupta 	.open	= simple_open,
519d5171a8SRashmica Gupta };
529d5171a8SRashmica Gupta 
539d5171a8SRashmica Gupta static int check_memblock_online(struct memory_block *mem, void *arg)
549d5171a8SRashmica Gupta {
559d5171a8SRashmica Gupta 	if (mem->state != MEM_ONLINE)
569d5171a8SRashmica Gupta 		return -1;
579d5171a8SRashmica Gupta 
589d5171a8SRashmica Gupta 	return 0;
599d5171a8SRashmica Gupta }
609d5171a8SRashmica Gupta 
619d5171a8SRashmica Gupta static int change_memblock_state(struct memory_block *mem, void *arg)
629d5171a8SRashmica Gupta {
639d5171a8SRashmica Gupta 	unsigned long state = (unsigned long)arg;
649d5171a8SRashmica Gupta 
659d5171a8SRashmica Gupta 	mem->state = state;
669d5171a8SRashmica Gupta 
679d5171a8SRashmica Gupta 	return 0;
689d5171a8SRashmica Gupta }
699d5171a8SRashmica Gupta 
7056668487SDavid Hildenbrand /* called with device_hotplug_lock held */
719d5171a8SRashmica Gupta static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
729d5171a8SRashmica Gupta {
739d5171a8SRashmica Gupta 	u64 end_pfn = start_pfn + nr_pages - 1;
749d5171a8SRashmica Gupta 
759d5171a8SRashmica Gupta 	if (walk_memory_range(start_pfn, end_pfn, NULL,
769d5171a8SRashmica Gupta 	    check_memblock_online))
779d5171a8SRashmica Gupta 		return false;
789d5171a8SRashmica Gupta 
799d5171a8SRashmica Gupta 	walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE,
809d5171a8SRashmica Gupta 			  change_memblock_state);
819d5171a8SRashmica Gupta 
829d5171a8SRashmica Gupta 	if (offline_pages(start_pfn, nr_pages)) {
839d5171a8SRashmica Gupta 		walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE,
849d5171a8SRashmica Gupta 				  change_memblock_state);
859d5171a8SRashmica Gupta 		return false;
869d5171a8SRashmica Gupta 	}
879d5171a8SRashmica Gupta 
889d5171a8SRashmica Gupta 	walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
899d5171a8SRashmica Gupta 			  change_memblock_state);
909d5171a8SRashmica Gupta 
919d5171a8SRashmica Gupta 
929d5171a8SRashmica Gupta 	return true;
939d5171a8SRashmica Gupta }
949d5171a8SRashmica Gupta 
959d5171a8SRashmica Gupta static u64 memtrace_alloc_node(u32 nid, u64 size)
969d5171a8SRashmica Gupta {
973f7daf3dSRashmica Gupta 	u64 start_pfn, end_pfn, nr_pages, pfn;
989d5171a8SRashmica Gupta 	u64 base_pfn;
993f7daf3dSRashmica Gupta 	u64 bytes = memory_block_size_bytes();
1009d5171a8SRashmica Gupta 
1018ccb442dSMichael Ellerman 	if (!node_spanned_pages(nid))
1029d5171a8SRashmica Gupta 		return 0;
1039d5171a8SRashmica Gupta 
1049d5171a8SRashmica Gupta 	start_pfn = node_start_pfn(nid);
1059d5171a8SRashmica Gupta 	end_pfn = node_end_pfn(nid);
1069d5171a8SRashmica Gupta 	nr_pages = size >> PAGE_SHIFT;
1079d5171a8SRashmica Gupta 
1089d5171a8SRashmica Gupta 	/* Trace memory needs to be aligned to the size */
1099d5171a8SRashmica Gupta 	end_pfn = round_down(end_pfn - nr_pages, nr_pages);
1109d5171a8SRashmica Gupta 
11156668487SDavid Hildenbrand 	lock_device_hotplug();
1129d5171a8SRashmica Gupta 	for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
1133f7daf3dSRashmica Gupta 		if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
1143f7daf3dSRashmica Gupta 			/*
1153f7daf3dSRashmica Gupta 			 * Remove memory in memory block size chunks so that
1163f7daf3dSRashmica Gupta 			 * iomem resources are always split to the same size and
1173f7daf3dSRashmica Gupta 			 * we never try to remove memory that spans two iomem
1183f7daf3dSRashmica Gupta 			 * resources.
1193f7daf3dSRashmica Gupta 			 */
1203f7daf3dSRashmica Gupta 			end_pfn = base_pfn + nr_pages;
1213f7daf3dSRashmica Gupta 			for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
122d15e5926SDavid Hildenbrand 				__remove_memory(nid, pfn << PAGE_SHIFT, bytes);
1233f7daf3dSRashmica Gupta 			}
1243f7daf3dSRashmica Gupta 			unlock_device_hotplug();
1259d5171a8SRashmica Gupta 			return base_pfn << PAGE_SHIFT;
1269d5171a8SRashmica Gupta 		}
1273f7daf3dSRashmica Gupta 	}
12856668487SDavid Hildenbrand 	unlock_device_hotplug();
1299d5171a8SRashmica Gupta 
1309d5171a8SRashmica Gupta 	return 0;
1319d5171a8SRashmica Gupta }
1329d5171a8SRashmica Gupta 
1339d5171a8SRashmica Gupta static int memtrace_init_regions_runtime(u64 size)
1349d5171a8SRashmica Gupta {
1359d5171a8SRashmica Gupta 	u32 nid;
1369d5171a8SRashmica Gupta 	u64 m;
1379d5171a8SRashmica Gupta 
1389d5171a8SRashmica Gupta 	memtrace_array = kcalloc(num_online_nodes(),
1399d5171a8SRashmica Gupta 				sizeof(struct memtrace_entry), GFP_KERNEL);
1409d5171a8SRashmica Gupta 	if (!memtrace_array) {
1419d5171a8SRashmica Gupta 		pr_err("Failed to allocate memtrace_array\n");
1429d5171a8SRashmica Gupta 		return -EINVAL;
1439d5171a8SRashmica Gupta 	}
1449d5171a8SRashmica Gupta 
1459d5171a8SRashmica Gupta 	for_each_online_node(nid) {
1469d5171a8SRashmica Gupta 		m = memtrace_alloc_node(nid, size);
1479d5171a8SRashmica Gupta 
1489d5171a8SRashmica Gupta 		/*
1499d5171a8SRashmica Gupta 		 * A node might not have any local memory, so warn but
1509d5171a8SRashmica Gupta 		 * continue on.
1519d5171a8SRashmica Gupta 		 */
1529d5171a8SRashmica Gupta 		if (!m) {
1539d5171a8SRashmica Gupta 			pr_err("Failed to allocate trace memory on node %d\n", nid);
1549d5171a8SRashmica Gupta 			continue;
1559d5171a8SRashmica Gupta 		}
1569d5171a8SRashmica Gupta 
1579d5171a8SRashmica Gupta 		pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
1589d5171a8SRashmica Gupta 
1599d5171a8SRashmica Gupta 		memtrace_array[memtrace_array_nr].start = m;
1609d5171a8SRashmica Gupta 		memtrace_array[memtrace_array_nr].size = size;
1619d5171a8SRashmica Gupta 		memtrace_array[memtrace_array_nr].nid = nid;
1629d5171a8SRashmica Gupta 		memtrace_array_nr++;
1639d5171a8SRashmica Gupta 	}
1649d5171a8SRashmica Gupta 
1659d5171a8SRashmica Gupta 	return 0;
1669d5171a8SRashmica Gupta }
1679d5171a8SRashmica Gupta 
1689d5171a8SRashmica Gupta static struct dentry *memtrace_debugfs_dir;
1699d5171a8SRashmica Gupta 
1709d5171a8SRashmica Gupta static int memtrace_init_debugfs(void)
1719d5171a8SRashmica Gupta {
1729d5171a8SRashmica Gupta 	int ret = 0;
1739d5171a8SRashmica Gupta 	int i;
1749d5171a8SRashmica Gupta 
1759d5171a8SRashmica Gupta 	for (i = 0; i < memtrace_array_nr; i++) {
1769d5171a8SRashmica Gupta 		struct dentry *dir;
1779d5171a8SRashmica Gupta 		struct memtrace_entry *ent = &memtrace_array[i];
1789d5171a8SRashmica Gupta 
1799d5171a8SRashmica Gupta 		ent->mem = ioremap(ent->start, ent->size);
1809d5171a8SRashmica Gupta 		/* Warn but continue on */
1819d5171a8SRashmica Gupta 		if (!ent->mem) {
1829d5171a8SRashmica Gupta 			pr_err("Failed to map trace memory at 0x%llx\n",
1839d5171a8SRashmica Gupta 				 ent->start);
1849d5171a8SRashmica Gupta 			ret = -1;
1859d5171a8SRashmica Gupta 			continue;
1869d5171a8SRashmica Gupta 		}
1879d5171a8SRashmica Gupta 
1889d5171a8SRashmica Gupta 		snprintf(ent->name, 16, "%08x", ent->nid);
1899d5171a8SRashmica Gupta 		dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
190d3da701dSRashmica Gupta 		if (!dir) {
191d3da701dSRashmica Gupta 			pr_err("Failed to create debugfs directory for node %d\n",
192d3da701dSRashmica Gupta 				ent->nid);
1939d5171a8SRashmica Gupta 			return -1;
194d3da701dSRashmica Gupta 		}
1959d5171a8SRashmica Gupta 
1969d5171a8SRashmica Gupta 		ent->dir = dir;
1979d5171a8SRashmica Gupta 		debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
1989d5171a8SRashmica Gupta 		debugfs_create_x64("start", 0400, dir, &ent->start);
1999d5171a8SRashmica Gupta 		debugfs_create_x64("size", 0400, dir, &ent->size);
2009d5171a8SRashmica Gupta 	}
2019d5171a8SRashmica Gupta 
2029d5171a8SRashmica Gupta 	return ret;
2039d5171a8SRashmica Gupta }
2049d5171a8SRashmica Gupta 
205d3da701dSRashmica Gupta static int online_mem_block(struct memory_block *mem, void *arg)
206d3da701dSRashmica Gupta {
207d3da701dSRashmica Gupta 	return device_online(&mem->dev);
208d3da701dSRashmica Gupta }
209d3da701dSRashmica Gupta 
210d3da701dSRashmica Gupta /*
211d3da701dSRashmica Gupta  * Iterate through the chunks of memory we have removed from the kernel
212d3da701dSRashmica Gupta  * and attempt to add them back to the kernel.
213d3da701dSRashmica Gupta  */
214d3da701dSRashmica Gupta static int memtrace_online(void)
215d3da701dSRashmica Gupta {
216d3da701dSRashmica Gupta 	int i, ret = 0;
217d3da701dSRashmica Gupta 	struct memtrace_entry *ent;
218d3da701dSRashmica Gupta 
219d3da701dSRashmica Gupta 	for (i = memtrace_array_nr - 1; i >= 0; i--) {
220d3da701dSRashmica Gupta 		ent = &memtrace_array[i];
221d3da701dSRashmica Gupta 
222d3da701dSRashmica Gupta 		/* We have onlined this chunk previously */
22398fa15f3SAnshuman Khandual 		if (ent->nid == NUMA_NO_NODE)
224d3da701dSRashmica Gupta 			continue;
225d3da701dSRashmica Gupta 
226d3da701dSRashmica Gupta 		/* Remove from io mappings */
227d3da701dSRashmica Gupta 		if (ent->mem) {
228d3da701dSRashmica Gupta 			iounmap(ent->mem);
229d3da701dSRashmica Gupta 			ent->mem = 0;
230d3da701dSRashmica Gupta 		}
231d3da701dSRashmica Gupta 
232d3da701dSRashmica Gupta 		if (add_memory(ent->nid, ent->start, ent->size)) {
233d3da701dSRashmica Gupta 			pr_err("Failed to add trace memory to node %d\n",
234d3da701dSRashmica Gupta 				ent->nid);
235d3da701dSRashmica Gupta 			ret += 1;
236d3da701dSRashmica Gupta 			continue;
237d3da701dSRashmica Gupta 		}
238d3da701dSRashmica Gupta 
239d3da701dSRashmica Gupta 		/*
240d3da701dSRashmica Gupta 		 * If kernel isn't compiled with the auto online option
241d3da701dSRashmica Gupta 		 * we need to online the memory ourselves.
242d3da701dSRashmica Gupta 		 */
243d3da701dSRashmica Gupta 		if (!memhp_auto_online) {
244cec16805SDavid Hildenbrand 			lock_device_hotplug();
245d3da701dSRashmica Gupta 			walk_memory_range(PFN_DOWN(ent->start),
246d3da701dSRashmica Gupta 					  PFN_UP(ent->start + ent->size - 1),
247d3da701dSRashmica Gupta 					  NULL, online_mem_block);
248cec16805SDavid Hildenbrand 			unlock_device_hotplug();
249d3da701dSRashmica Gupta 		}
250d3da701dSRashmica Gupta 
251d3da701dSRashmica Gupta 		/*
252d3da701dSRashmica Gupta 		 * Memory was added successfully so clean up references to it
253d3da701dSRashmica Gupta 		 * so on reentry we can tell that this chunk was added.
254d3da701dSRashmica Gupta 		 */
255d3da701dSRashmica Gupta 		debugfs_remove_recursive(ent->dir);
256d3da701dSRashmica Gupta 		pr_info("Added trace memory back to node %d\n", ent->nid);
25798fa15f3SAnshuman Khandual 		ent->size = ent->start = ent->nid = NUMA_NO_NODE;
258d3da701dSRashmica Gupta 	}
259d3da701dSRashmica Gupta 	if (ret)
260d3da701dSRashmica Gupta 		return ret;
261d3da701dSRashmica Gupta 
262d3da701dSRashmica Gupta 	/* If all chunks of memory were added successfully, reset globals */
263d3da701dSRashmica Gupta 	kfree(memtrace_array);
264d3da701dSRashmica Gupta 	memtrace_array = NULL;
265d3da701dSRashmica Gupta 	memtrace_size = 0;
266d3da701dSRashmica Gupta 	memtrace_array_nr = 0;
267d3da701dSRashmica Gupta 	return 0;
268d3da701dSRashmica Gupta }
269d3da701dSRashmica Gupta 
2709d5171a8SRashmica Gupta static int memtrace_enable_set(void *data, u64 val)
2719d5171a8SRashmica Gupta {
272d3da701dSRashmica Gupta 	u64 bytes;
273d3da701dSRashmica Gupta 
274d3da701dSRashmica Gupta 	/*
275d3da701dSRashmica Gupta 	 * Don't attempt to do anything if size isn't aligned to a memory
276d3da701dSRashmica Gupta 	 * block or equal to zero.
277d3da701dSRashmica Gupta 	 */
278d3da701dSRashmica Gupta 	bytes = memory_block_size_bytes();
279d3da701dSRashmica Gupta 	if (val & (bytes - 1)) {
280d3da701dSRashmica Gupta 		pr_err("Value must be aligned with 0x%llx\n", bytes);
2819d5171a8SRashmica Gupta 		return -EINVAL;
282d3da701dSRashmica Gupta 	}
283d3da701dSRashmica Gupta 
284d3da701dSRashmica Gupta 	/* Re-add/online previously removed/offlined memory */
285d3da701dSRashmica Gupta 	if (memtrace_size) {
286d3da701dSRashmica Gupta 		if (memtrace_online())
287d3da701dSRashmica Gupta 			return -EAGAIN;
288d3da701dSRashmica Gupta 	}
2899d5171a8SRashmica Gupta 
2909d5171a8SRashmica Gupta 	if (!val)
291d3da701dSRashmica Gupta 		return 0;
2929d5171a8SRashmica Gupta 
293d3da701dSRashmica Gupta 	/* Offline and remove memory */
2949d5171a8SRashmica Gupta 	if (memtrace_init_regions_runtime(val))
2959d5171a8SRashmica Gupta 		return -EINVAL;
2969d5171a8SRashmica Gupta 
2979d5171a8SRashmica Gupta 	if (memtrace_init_debugfs())
2989d5171a8SRashmica Gupta 		return -EINVAL;
2999d5171a8SRashmica Gupta 
3009d5171a8SRashmica Gupta 	memtrace_size = val;
3019d5171a8SRashmica Gupta 
3029d5171a8SRashmica Gupta 	return 0;
3039d5171a8SRashmica Gupta }
3049d5171a8SRashmica Gupta 
3059d5171a8SRashmica Gupta static int memtrace_enable_get(void *data, u64 *val)
3069d5171a8SRashmica Gupta {
3079d5171a8SRashmica Gupta 	*val = memtrace_size;
3089d5171a8SRashmica Gupta 	return 0;
3099d5171a8SRashmica Gupta }
3109d5171a8SRashmica Gupta 
3119d5171a8SRashmica Gupta DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
3129d5171a8SRashmica Gupta 					memtrace_enable_set, "0x%016llx\n");
3139d5171a8SRashmica Gupta 
3149d5171a8SRashmica Gupta static int memtrace_init(void)
3159d5171a8SRashmica Gupta {
3169d5171a8SRashmica Gupta 	memtrace_debugfs_dir = debugfs_create_dir("memtrace",
3179d5171a8SRashmica Gupta 						  powerpc_debugfs_root);
3189d5171a8SRashmica Gupta 	if (!memtrace_debugfs_dir)
3199d5171a8SRashmica Gupta 		return -1;
3209d5171a8SRashmica Gupta 
3219d5171a8SRashmica Gupta 	debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
3229d5171a8SRashmica Gupta 			    NULL, &memtrace_init_fops);
3239d5171a8SRashmica Gupta 
3249d5171a8SRashmica Gupta 	return 0;
3259d5171a8SRashmica Gupta }
3269d5171a8SRashmica Gupta machine_device_initcall(powernv, memtrace_init);
327