1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) IBM Corporation, 2014, 2017 4 * Anton Blanchard, Rashmica Gupta. 5 */ 6 7 #define pr_fmt(fmt) "memtrace: " fmt 8 9 #include <linux/bitops.h> 10 #include <linux/string.h> 11 #include <linux/memblock.h> 12 #include <linux/init.h> 13 #include <linux/moduleparam.h> 14 #include <linux/fs.h> 15 #include <linux/debugfs.h> 16 #include <linux/slab.h> 17 #include <linux/memory.h> 18 #include <linux/memory_hotplug.h> 19 #include <linux/numa.h> 20 #include <asm/machdep.h> 21 #include <asm/debugfs.h> 22 #include <asm/cacheflush.h> 23 24 /* This enables us to keep track of the memory removed from each node. */ 25 struct memtrace_entry { 26 void *mem; 27 u64 start; 28 u64 size; 29 u32 nid; 30 struct dentry *dir; 31 char name[16]; 32 }; 33 34 static DEFINE_MUTEX(memtrace_mutex); 35 static u64 memtrace_size; 36 37 static struct memtrace_entry *memtrace_array; 38 static unsigned int memtrace_array_nr; 39 40 41 static ssize_t memtrace_read(struct file *filp, char __user *ubuf, 42 size_t count, loff_t *ppos) 43 { 44 struct memtrace_entry *ent = filp->private_data; 45 46 return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size); 47 } 48 49 static const struct file_operations memtrace_fops = { 50 .llseek = default_llseek, 51 .read = memtrace_read, 52 .open = simple_open, 53 }; 54 55 #define FLUSH_CHUNK_SIZE SZ_1G 56 /** 57 * flush_dcache_range_chunked(): Write any modified data cache blocks out to 58 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE 59 * Does not invalidate the corresponding instruction cache blocks. 60 * 61 * @start: the start address 62 * @stop: the stop address (exclusive) 63 * @chunk: the max size of the chunks 64 */ 65 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, 66 unsigned long chunk) 67 { 68 unsigned long i; 69 70 for (i = start; i < stop; i += chunk) { 71 flush_dcache_range(i, min(stop, i + chunk)); 72 cond_resched(); 73 } 74 } 75 76 static void memtrace_clear_range(unsigned long start_pfn, 77 unsigned long nr_pages) 78 { 79 unsigned long pfn; 80 81 /* As HIGHMEM does not apply, use clear_page() directly. */ 82 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { 83 if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) 84 cond_resched(); 85 clear_page(__va(PFN_PHYS(pfn))); 86 } 87 /* 88 * Before we go ahead and use this range as cache inhibited range 89 * flush the cache. 90 */ 91 flush_dcache_range_chunked(PFN_PHYS(start_pfn), 92 PFN_PHYS(start_pfn + nr_pages), 93 FLUSH_CHUNK_SIZE); 94 } 95 96 static u64 memtrace_alloc_node(u32 nid, u64 size) 97 { 98 const unsigned long nr_pages = PHYS_PFN(size); 99 unsigned long pfn, start_pfn; 100 struct page *page; 101 102 /* 103 * Trace memory needs to be aligned to the size, which is guaranteed 104 * by alloc_contig_pages(). 105 */ 106 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | 107 __GFP_NOWARN, nid, NULL); 108 if (!page) 109 return 0; 110 start_pfn = page_to_pfn(page); 111 112 /* 113 * Clear the range while we still have a linear mapping. 114 * 115 * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. 116 */ 117 memtrace_clear_range(start_pfn, nr_pages); 118 119 /* 120 * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, 121 * dumping, ...) should be touching these pages. 122 */ 123 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) 124 __SetPageOffline(pfn_to_page(pfn)); 125 126 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); 127 128 return PFN_PHYS(start_pfn); 129 } 130 131 static int memtrace_init_regions_runtime(u64 size) 132 { 133 u32 nid; 134 u64 m; 135 136 memtrace_array = kcalloc(num_online_nodes(), 137 sizeof(struct memtrace_entry), GFP_KERNEL); 138 if (!memtrace_array) { 139 pr_err("Failed to allocate memtrace_array\n"); 140 return -EINVAL; 141 } 142 143 for_each_online_node(nid) { 144 m = memtrace_alloc_node(nid, size); 145 146 /* 147 * A node might not have any local memory, so warn but 148 * continue on. 149 */ 150 if (!m) { 151 pr_err("Failed to allocate trace memory on node %d\n", nid); 152 continue; 153 } 154 155 pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m); 156 157 memtrace_array[memtrace_array_nr].start = m; 158 memtrace_array[memtrace_array_nr].size = size; 159 memtrace_array[memtrace_array_nr].nid = nid; 160 memtrace_array_nr++; 161 } 162 163 return 0; 164 } 165 166 static struct dentry *memtrace_debugfs_dir; 167 168 static int memtrace_init_debugfs(void) 169 { 170 int ret = 0; 171 int i; 172 173 for (i = 0; i < memtrace_array_nr; i++) { 174 struct dentry *dir; 175 struct memtrace_entry *ent = &memtrace_array[i]; 176 177 ent->mem = ioremap(ent->start, ent->size); 178 /* Warn but continue on */ 179 if (!ent->mem) { 180 pr_err("Failed to map trace memory at 0x%llx\n", 181 ent->start); 182 ret = -1; 183 continue; 184 } 185 186 snprintf(ent->name, 16, "%08x", ent->nid); 187 dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir); 188 189 ent->dir = dir; 190 debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops); 191 debugfs_create_x64("start", 0400, dir, &ent->start); 192 debugfs_create_x64("size", 0400, dir, &ent->size); 193 } 194 195 return ret; 196 } 197 198 static int memtrace_free(int nid, u64 start, u64 size) 199 { 200 struct mhp_params params = { .pgprot = PAGE_KERNEL }; 201 const unsigned long nr_pages = PHYS_PFN(size); 202 const unsigned long start_pfn = PHYS_PFN(start); 203 unsigned long pfn; 204 int ret; 205 206 ret = arch_create_linear_mapping(nid, start, size, ¶ms); 207 if (ret) 208 return ret; 209 210 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) 211 __ClearPageOffline(pfn_to_page(pfn)); 212 213 free_contig_range(start_pfn, nr_pages); 214 return 0; 215 } 216 217 /* 218 * Iterate through the chunks of memory we allocated and attempt to expose 219 * them back to the kernel. 220 */ 221 static int memtrace_free_regions(void) 222 { 223 int i, ret = 0; 224 struct memtrace_entry *ent; 225 226 for (i = memtrace_array_nr - 1; i >= 0; i--) { 227 ent = &memtrace_array[i]; 228 229 /* We have freed this chunk previously */ 230 if (ent->nid == NUMA_NO_NODE) 231 continue; 232 233 /* Remove from io mappings */ 234 if (ent->mem) { 235 iounmap(ent->mem); 236 ent->mem = 0; 237 } 238 239 if (memtrace_free(ent->nid, ent->start, ent->size)) { 240 pr_err("Failed to free trace memory on node %d\n", 241 ent->nid); 242 ret += 1; 243 continue; 244 } 245 246 /* 247 * Memory was freed successfully so clean up references to it 248 * so on reentry we can tell that this chunk was freed. 249 */ 250 debugfs_remove_recursive(ent->dir); 251 pr_info("Freed trace memory back on node %d\n", ent->nid); 252 ent->size = ent->start = ent->nid = NUMA_NO_NODE; 253 } 254 if (ret) 255 return ret; 256 257 /* If all chunks of memory were freed successfully, reset globals */ 258 kfree(memtrace_array); 259 memtrace_array = NULL; 260 memtrace_size = 0; 261 memtrace_array_nr = 0; 262 return 0; 263 } 264 265 static int memtrace_enable_set(void *data, u64 val) 266 { 267 int rc = -EAGAIN; 268 u64 bytes; 269 270 /* 271 * Don't attempt to do anything if size isn't aligned to a memory 272 * block or equal to zero. 273 */ 274 bytes = memory_block_size_bytes(); 275 if (val & (bytes - 1)) { 276 pr_err("Value must be aligned with 0x%llx\n", bytes); 277 return -EINVAL; 278 } 279 280 mutex_lock(&memtrace_mutex); 281 282 /* Free all previously allocated memory. */ 283 if (memtrace_size && memtrace_free_regions()) 284 goto out_unlock; 285 286 if (!val) { 287 rc = 0; 288 goto out_unlock; 289 } 290 291 /* Allocate memory. */ 292 if (memtrace_init_regions_runtime(val)) 293 goto out_unlock; 294 295 if (memtrace_init_debugfs()) 296 goto out_unlock; 297 298 memtrace_size = val; 299 rc = 0; 300 out_unlock: 301 mutex_unlock(&memtrace_mutex); 302 return rc; 303 } 304 305 static int memtrace_enable_get(void *data, u64 *val) 306 { 307 *val = memtrace_size; 308 return 0; 309 } 310 311 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get, 312 memtrace_enable_set, "0x%016llx\n"); 313 314 static int memtrace_init(void) 315 { 316 memtrace_debugfs_dir = debugfs_create_dir("memtrace", 317 powerpc_debugfs_root); 318 319 debugfs_create_file("enable", 0600, memtrace_debugfs_dir, 320 NULL, &memtrace_init_fops); 321 322 return 0; 323 } 324 machine_device_initcall(powernv, memtrace_init); 325