12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 29d5171a8SRashmica Gupta /* 39d5171a8SRashmica Gupta * Copyright (C) IBM Corporation, 2014, 2017 49d5171a8SRashmica Gupta * Anton Blanchard, Rashmica Gupta. 59d5171a8SRashmica Gupta */ 69d5171a8SRashmica Gupta 79d5171a8SRashmica Gupta #define pr_fmt(fmt) "memtrace: " fmt 89d5171a8SRashmica Gupta 99d5171a8SRashmica Gupta #include <linux/bitops.h> 109d5171a8SRashmica Gupta #include <linux/string.h> 119d5171a8SRashmica Gupta #include <linux/memblock.h> 129d5171a8SRashmica Gupta #include <linux/init.h> 139d5171a8SRashmica Gupta #include <linux/moduleparam.h> 149d5171a8SRashmica Gupta #include <linux/fs.h> 159d5171a8SRashmica Gupta #include <linux/debugfs.h> 169d5171a8SRashmica Gupta #include <linux/slab.h> 179d5171a8SRashmica Gupta #include <linux/memory.h> 189d5171a8SRashmica Gupta #include <linux/memory_hotplug.h> 1998fa15f3SAnshuman Khandual #include <linux/numa.h> 209d5171a8SRashmica Gupta #include <asm/machdep.h> 219d5171a8SRashmica Gupta #include <asm/debugfs.h> 22*2ac02e5eSAneesh Kumar K.V #include <asm/cacheflush.h> 239d5171a8SRashmica Gupta 249d5171a8SRashmica Gupta /* This enables us to keep track of the memory removed from each node. */ 259d5171a8SRashmica Gupta struct memtrace_entry { 269d5171a8SRashmica Gupta void *mem; 279d5171a8SRashmica Gupta u64 start; 289d5171a8SRashmica Gupta u64 size; 299d5171a8SRashmica Gupta u32 nid; 309d5171a8SRashmica Gupta struct dentry *dir; 319d5171a8SRashmica Gupta char name[16]; 329d5171a8SRashmica Gupta }; 339d5171a8SRashmica Gupta 34d6718941SDavid Hildenbrand static DEFINE_MUTEX(memtrace_mutex); 359d5171a8SRashmica Gupta static u64 memtrace_size; 369d5171a8SRashmica Gupta 379d5171a8SRashmica Gupta static struct memtrace_entry *memtrace_array; 389d5171a8SRashmica Gupta static unsigned int memtrace_array_nr; 399d5171a8SRashmica Gupta 409d5171a8SRashmica Gupta 419d5171a8SRashmica Gupta static ssize_t memtrace_read(struct file *filp, char __user *ubuf, 429d5171a8SRashmica Gupta size_t count, loff_t *ppos) 439d5171a8SRashmica Gupta { 449d5171a8SRashmica Gupta struct memtrace_entry *ent = filp->private_data; 459d5171a8SRashmica Gupta 469d5171a8SRashmica Gupta return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size); 479d5171a8SRashmica Gupta } 489d5171a8SRashmica Gupta 499d5171a8SRashmica Gupta static const struct file_operations memtrace_fops = { 509d5171a8SRashmica Gupta .llseek = default_llseek, 519d5171a8SRashmica Gupta .read = memtrace_read, 529d5171a8SRashmica Gupta .open = simple_open, 539d5171a8SRashmica Gupta }; 549d5171a8SRashmica Gupta 55*2ac02e5eSAneesh Kumar K.V #define FLUSH_CHUNK_SIZE SZ_1G 56*2ac02e5eSAneesh Kumar K.V /** 57*2ac02e5eSAneesh Kumar K.V * flush_dcache_range_chunked(): Write any modified data cache blocks out to 58*2ac02e5eSAneesh Kumar K.V * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE 59*2ac02e5eSAneesh Kumar K.V * Does not invalidate the corresponding instruction cache blocks. 60*2ac02e5eSAneesh Kumar K.V * 61*2ac02e5eSAneesh Kumar K.V * @start: the start address 62*2ac02e5eSAneesh Kumar K.V * @stop: the stop address (exclusive) 63*2ac02e5eSAneesh Kumar K.V * @chunk: the max size of the chunks 64*2ac02e5eSAneesh Kumar K.V */ 65*2ac02e5eSAneesh Kumar K.V static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, 66*2ac02e5eSAneesh Kumar K.V unsigned long chunk) 67*2ac02e5eSAneesh Kumar K.V { 68*2ac02e5eSAneesh Kumar K.V unsigned long i; 69*2ac02e5eSAneesh Kumar K.V 70*2ac02e5eSAneesh Kumar K.V for (i = start; i < stop; i += chunk) { 71*2ac02e5eSAneesh Kumar K.V flush_dcache_range(i, min(stop, i + chunk)); 72*2ac02e5eSAneesh Kumar K.V cond_resched(); 73*2ac02e5eSAneesh Kumar K.V } 74*2ac02e5eSAneesh Kumar K.V } 75*2ac02e5eSAneesh Kumar K.V 76c74cf7a3SDavid Hildenbrand static void memtrace_clear_range(unsigned long start_pfn, 77c74cf7a3SDavid Hildenbrand unsigned long nr_pages) 78c74cf7a3SDavid Hildenbrand { 79c74cf7a3SDavid Hildenbrand unsigned long pfn; 80c74cf7a3SDavid Hildenbrand 810bd4b96dSDavid Hildenbrand /* As HIGHMEM does not apply, use clear_page() directly. */ 82c74cf7a3SDavid Hildenbrand for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { 83c74cf7a3SDavid Hildenbrand if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) 84c74cf7a3SDavid Hildenbrand cond_resched(); 85c74cf7a3SDavid Hildenbrand clear_page(__va(PFN_PHYS(pfn))); 86c74cf7a3SDavid Hildenbrand } 87*2ac02e5eSAneesh Kumar K.V /* 88*2ac02e5eSAneesh Kumar K.V * Before we go ahead and use this range as cache inhibited range 89*2ac02e5eSAneesh Kumar K.V * flush the cache. 90*2ac02e5eSAneesh Kumar K.V */ 91*2ac02e5eSAneesh Kumar K.V flush_dcache_range_chunked(PFN_PHYS(start_pfn), 92*2ac02e5eSAneesh Kumar K.V PFN_PHYS(start_pfn + nr_pages), 93*2ac02e5eSAneesh Kumar K.V FLUSH_CHUNK_SIZE); 94c74cf7a3SDavid Hildenbrand } 95c74cf7a3SDavid Hildenbrand 969d5171a8SRashmica Gupta static u64 memtrace_alloc_node(u32 nid, u64 size) 979d5171a8SRashmica Gupta { 980bd4b96dSDavid Hildenbrand const unsigned long nr_pages = PHYS_PFN(size); 990bd4b96dSDavid Hildenbrand unsigned long pfn, start_pfn; 1000bd4b96dSDavid Hildenbrand struct page *page; 1019d5171a8SRashmica Gupta 1023f7daf3dSRashmica Gupta /* 1030bd4b96dSDavid Hildenbrand * Trace memory needs to be aligned to the size, which is guaranteed 1040bd4b96dSDavid Hildenbrand * by alloc_contig_pages(). 105c74cf7a3SDavid Hildenbrand */ 1060bd4b96dSDavid Hildenbrand page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | 1070bd4b96dSDavid Hildenbrand __GFP_NOWARN, nid, NULL); 1080bd4b96dSDavid Hildenbrand if (!page) 1099d5171a8SRashmica Gupta return 0; 1100bd4b96dSDavid Hildenbrand start_pfn = page_to_pfn(page); 1110bd4b96dSDavid Hildenbrand 1120bd4b96dSDavid Hildenbrand /* 1130bd4b96dSDavid Hildenbrand * Clear the range while we still have a linear mapping. 1140bd4b96dSDavid Hildenbrand * 1150bd4b96dSDavid Hildenbrand * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. 1160bd4b96dSDavid Hildenbrand */ 1170bd4b96dSDavid Hildenbrand memtrace_clear_range(start_pfn, nr_pages); 1180bd4b96dSDavid Hildenbrand 1190bd4b96dSDavid Hildenbrand /* 1200bd4b96dSDavid Hildenbrand * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, 1210bd4b96dSDavid Hildenbrand * dumping, ...) should be touching these pages. 1220bd4b96dSDavid Hildenbrand */ 1230bd4b96dSDavid Hildenbrand for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) 1240bd4b96dSDavid Hildenbrand __SetPageOffline(pfn_to_page(pfn)); 1250bd4b96dSDavid Hildenbrand 1260bd4b96dSDavid Hildenbrand arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); 1270bd4b96dSDavid Hildenbrand 1280bd4b96dSDavid Hildenbrand return PFN_PHYS(start_pfn); 1299d5171a8SRashmica Gupta } 1309d5171a8SRashmica Gupta 1319d5171a8SRashmica Gupta static int memtrace_init_regions_runtime(u64 size) 1329d5171a8SRashmica Gupta { 1339d5171a8SRashmica Gupta u32 nid; 1349d5171a8SRashmica Gupta u64 m; 1359d5171a8SRashmica Gupta 1369d5171a8SRashmica Gupta memtrace_array = kcalloc(num_online_nodes(), 1379d5171a8SRashmica Gupta sizeof(struct memtrace_entry), GFP_KERNEL); 1389d5171a8SRashmica Gupta if (!memtrace_array) { 1399d5171a8SRashmica Gupta pr_err("Failed to allocate memtrace_array\n"); 1409d5171a8SRashmica Gupta return -EINVAL; 1419d5171a8SRashmica Gupta } 1429d5171a8SRashmica Gupta 1439d5171a8SRashmica Gupta for_each_online_node(nid) { 1449d5171a8SRashmica Gupta m = memtrace_alloc_node(nid, size); 1459d5171a8SRashmica Gupta 1469d5171a8SRashmica Gupta /* 1479d5171a8SRashmica Gupta * A node might not have any local memory, so warn but 1489d5171a8SRashmica Gupta * continue on. 1499d5171a8SRashmica Gupta */ 1509d5171a8SRashmica Gupta if (!m) { 1519d5171a8SRashmica Gupta pr_err("Failed to allocate trace memory on node %d\n", nid); 1529d5171a8SRashmica Gupta continue; 1539d5171a8SRashmica Gupta } 1549d5171a8SRashmica Gupta 1559d5171a8SRashmica Gupta pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m); 1569d5171a8SRashmica Gupta 1579d5171a8SRashmica Gupta memtrace_array[memtrace_array_nr].start = m; 1589d5171a8SRashmica Gupta memtrace_array[memtrace_array_nr].size = size; 1599d5171a8SRashmica Gupta memtrace_array[memtrace_array_nr].nid = nid; 1609d5171a8SRashmica Gupta memtrace_array_nr++; 1619d5171a8SRashmica Gupta } 1629d5171a8SRashmica Gupta 1639d5171a8SRashmica Gupta return 0; 1649d5171a8SRashmica Gupta } 1659d5171a8SRashmica Gupta 1669d5171a8SRashmica Gupta static struct dentry *memtrace_debugfs_dir; 1679d5171a8SRashmica Gupta 1689d5171a8SRashmica Gupta static int memtrace_init_debugfs(void) 1699d5171a8SRashmica Gupta { 1709d5171a8SRashmica Gupta int ret = 0; 1719d5171a8SRashmica Gupta int i; 1729d5171a8SRashmica Gupta 1739d5171a8SRashmica Gupta for (i = 0; i < memtrace_array_nr; i++) { 1749d5171a8SRashmica Gupta struct dentry *dir; 1759d5171a8SRashmica Gupta struct memtrace_entry *ent = &memtrace_array[i]; 1769d5171a8SRashmica Gupta 1779d5171a8SRashmica Gupta ent->mem = ioremap(ent->start, ent->size); 1789d5171a8SRashmica Gupta /* Warn but continue on */ 1799d5171a8SRashmica Gupta if (!ent->mem) { 1809d5171a8SRashmica Gupta pr_err("Failed to map trace memory at 0x%llx\n", 1819d5171a8SRashmica Gupta ent->start); 1829d5171a8SRashmica Gupta ret = -1; 1839d5171a8SRashmica Gupta continue; 1849d5171a8SRashmica Gupta } 1859d5171a8SRashmica Gupta 1869d5171a8SRashmica Gupta snprintf(ent->name, 16, "%08x", ent->nid); 1879d5171a8SRashmica Gupta dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir); 1889d5171a8SRashmica Gupta 1899d5171a8SRashmica Gupta ent->dir = dir; 1909d5171a8SRashmica Gupta debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops); 1919d5171a8SRashmica Gupta debugfs_create_x64("start", 0400, dir, &ent->start); 1929d5171a8SRashmica Gupta debugfs_create_x64("size", 0400, dir, &ent->size); 1939d5171a8SRashmica Gupta } 1949d5171a8SRashmica Gupta 1959d5171a8SRashmica Gupta return ret; 1969d5171a8SRashmica Gupta } 1979d5171a8SRashmica Gupta 1980bd4b96dSDavid Hildenbrand static int memtrace_free(int nid, u64 start, u64 size) 199d3da701dSRashmica Gupta { 2000bd4b96dSDavid Hildenbrand struct mhp_params params = { .pgprot = PAGE_KERNEL }; 2010bd4b96dSDavid Hildenbrand const unsigned long nr_pages = PHYS_PFN(size); 2020bd4b96dSDavid Hildenbrand const unsigned long start_pfn = PHYS_PFN(start); 2030bd4b96dSDavid Hildenbrand unsigned long pfn; 2040bd4b96dSDavid Hildenbrand int ret; 2050bd4b96dSDavid Hildenbrand 2060bd4b96dSDavid Hildenbrand ret = arch_create_linear_mapping(nid, start, size, ¶ms); 2070bd4b96dSDavid Hildenbrand if (ret) 2080bd4b96dSDavid Hildenbrand return ret; 2090bd4b96dSDavid Hildenbrand 2100bd4b96dSDavid Hildenbrand for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) 2110bd4b96dSDavid Hildenbrand __ClearPageOffline(pfn_to_page(pfn)); 2120bd4b96dSDavid Hildenbrand 2130bd4b96dSDavid Hildenbrand free_contig_range(start_pfn, nr_pages); 2140bd4b96dSDavid Hildenbrand return 0; 215d3da701dSRashmica Gupta } 216d3da701dSRashmica Gupta 217d3da701dSRashmica Gupta /* 2180bd4b96dSDavid Hildenbrand * Iterate through the chunks of memory we allocated and attempt to expose 2190bd4b96dSDavid Hildenbrand * them back to the kernel. 220d3da701dSRashmica Gupta */ 2210bd4b96dSDavid Hildenbrand static int memtrace_free_regions(void) 222d3da701dSRashmica Gupta { 223d3da701dSRashmica Gupta int i, ret = 0; 224d3da701dSRashmica Gupta struct memtrace_entry *ent; 225d3da701dSRashmica Gupta 226d3da701dSRashmica Gupta for (i = memtrace_array_nr - 1; i >= 0; i--) { 227d3da701dSRashmica Gupta ent = &memtrace_array[i]; 228d3da701dSRashmica Gupta 2290bd4b96dSDavid Hildenbrand /* We have freed this chunk previously */ 23098fa15f3SAnshuman Khandual if (ent->nid == NUMA_NO_NODE) 231d3da701dSRashmica Gupta continue; 232d3da701dSRashmica Gupta 233d3da701dSRashmica Gupta /* Remove from io mappings */ 234d3da701dSRashmica Gupta if (ent->mem) { 235d3da701dSRashmica Gupta iounmap(ent->mem); 236d3da701dSRashmica Gupta ent->mem = 0; 237d3da701dSRashmica Gupta } 238d3da701dSRashmica Gupta 2390bd4b96dSDavid Hildenbrand if (memtrace_free(ent->nid, ent->start, ent->size)) { 2400bd4b96dSDavid Hildenbrand pr_err("Failed to free trace memory on node %d\n", 241d3da701dSRashmica Gupta ent->nid); 242d3da701dSRashmica Gupta ret += 1; 243d3da701dSRashmica Gupta continue; 244d3da701dSRashmica Gupta } 245d3da701dSRashmica Gupta 246d3da701dSRashmica Gupta /* 2470bd4b96dSDavid Hildenbrand * Memory was freed successfully so clean up references to it 2480bd4b96dSDavid Hildenbrand * so on reentry we can tell that this chunk was freed. 249d3da701dSRashmica Gupta */ 250d3da701dSRashmica Gupta debugfs_remove_recursive(ent->dir); 2510bd4b96dSDavid Hildenbrand pr_info("Freed trace memory back on node %d\n", ent->nid); 25298fa15f3SAnshuman Khandual ent->size = ent->start = ent->nid = NUMA_NO_NODE; 253d3da701dSRashmica Gupta } 254d3da701dSRashmica Gupta if (ret) 255d3da701dSRashmica Gupta return ret; 256d3da701dSRashmica Gupta 2570bd4b96dSDavid Hildenbrand /* If all chunks of memory were freed successfully, reset globals */ 258d3da701dSRashmica Gupta kfree(memtrace_array); 259d3da701dSRashmica Gupta memtrace_array = NULL; 260d3da701dSRashmica Gupta memtrace_size = 0; 261d3da701dSRashmica Gupta memtrace_array_nr = 0; 262d3da701dSRashmica Gupta return 0; 263d3da701dSRashmica Gupta } 264d3da701dSRashmica Gupta 2659d5171a8SRashmica Gupta static int memtrace_enable_set(void *data, u64 val) 2669d5171a8SRashmica Gupta { 267d6718941SDavid Hildenbrand int rc = -EAGAIN; 268d3da701dSRashmica Gupta u64 bytes; 269d3da701dSRashmica Gupta 270d3da701dSRashmica Gupta /* 271d3da701dSRashmica Gupta * Don't attempt to do anything if size isn't aligned to a memory 272d3da701dSRashmica Gupta * block or equal to zero. 273d3da701dSRashmica Gupta */ 274d3da701dSRashmica Gupta bytes = memory_block_size_bytes(); 275d3da701dSRashmica Gupta if (val & (bytes - 1)) { 276d3da701dSRashmica Gupta pr_err("Value must be aligned with 0x%llx\n", bytes); 2779d5171a8SRashmica Gupta return -EINVAL; 278d3da701dSRashmica Gupta } 279d3da701dSRashmica Gupta 280d6718941SDavid Hildenbrand mutex_lock(&memtrace_mutex); 281d6718941SDavid Hildenbrand 2820bd4b96dSDavid Hildenbrand /* Free all previously allocated memory. */ 2830bd4b96dSDavid Hildenbrand if (memtrace_size && memtrace_free_regions()) 284d6718941SDavid Hildenbrand goto out_unlock; 2859d5171a8SRashmica Gupta 286d6718941SDavid Hildenbrand if (!val) { 287d6718941SDavid Hildenbrand rc = 0; 288d6718941SDavid Hildenbrand goto out_unlock; 289d6718941SDavid Hildenbrand } 2909d5171a8SRashmica Gupta 2910bd4b96dSDavid Hildenbrand /* Allocate memory. */ 2929d5171a8SRashmica Gupta if (memtrace_init_regions_runtime(val)) 293d6718941SDavid Hildenbrand goto out_unlock; 2949d5171a8SRashmica Gupta 2959d5171a8SRashmica Gupta if (memtrace_init_debugfs()) 296d6718941SDavid Hildenbrand goto out_unlock; 2979d5171a8SRashmica Gupta 2989d5171a8SRashmica Gupta memtrace_size = val; 299d6718941SDavid Hildenbrand rc = 0; 300d6718941SDavid Hildenbrand out_unlock: 301d6718941SDavid Hildenbrand mutex_unlock(&memtrace_mutex); 302d6718941SDavid Hildenbrand return rc; 3039d5171a8SRashmica Gupta } 3049d5171a8SRashmica Gupta 3059d5171a8SRashmica Gupta static int memtrace_enable_get(void *data, u64 *val) 3069d5171a8SRashmica Gupta { 3079d5171a8SRashmica Gupta *val = memtrace_size; 3089d5171a8SRashmica Gupta return 0; 3099d5171a8SRashmica Gupta } 3109d5171a8SRashmica Gupta 3119d5171a8SRashmica Gupta DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get, 3129d5171a8SRashmica Gupta memtrace_enable_set, "0x%016llx\n"); 3139d5171a8SRashmica Gupta 3149d5171a8SRashmica Gupta static int memtrace_init(void) 3159d5171a8SRashmica Gupta { 3169d5171a8SRashmica Gupta memtrace_debugfs_dir = debugfs_create_dir("memtrace", 3179d5171a8SRashmica Gupta powerpc_debugfs_root); 3189d5171a8SRashmica Gupta 3199d5171a8SRashmica Gupta debugfs_create_file("enable", 0600, memtrace_debugfs_dir, 3209d5171a8SRashmica Gupta NULL, &memtrace_init_fops); 3219d5171a8SRashmica Gupta 3229d5171a8SRashmica Gupta return 0; 3239d5171a8SRashmica Gupta } 3249d5171a8SRashmica Gupta machine_device_initcall(powernv, memtrace_init); 325