1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ 3 #include <linux/device.h> 4 #include <linux/io.h> 5 #include <linux/kasan.h> 6 #include <linux/memory_hotplug.h> 7 #include <linux/mm.h> 8 #include <linux/pfn_t.h> 9 #include <linux/swap.h> 10 #include <linux/mmzone.h> 11 #include <linux/swapops.h> 12 #include <linux/types.h> 13 #include <linux/wait_bit.h> 14 #include <linux/xarray.h> 15 16 static DEFINE_XARRAY(pgmap_array); 17 18 /* 19 * The memremap() and memremap_pages() interfaces are alternately used 20 * to map persistent memory namespaces. These interfaces place different 21 * constraints on the alignment and size of the mapping (namespace). 22 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can 23 * only map subsections (2MB), and at least one architecture (PowerPC) 24 * the minimum mapping granularity of memremap_pages() is 16MB. 25 * 26 * The role of memremap_compat_align() is to communicate the minimum 27 * arch supported alignment of a namespace such that it can freely 28 * switch modes without violating the arch constraint. Namely, do not 29 * allow a namespace to be PAGE_SIZE aligned since that namespace may be 30 * reconfigured into a mode that requires SUBSECTION_SIZE alignment. 31 */ 32 #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN 33 unsigned long memremap_compat_align(void) 34 { 35 return SUBSECTION_SIZE; 36 } 37 EXPORT_SYMBOL_GPL(memremap_compat_align); 38 #endif 39 40 #ifdef CONFIG_DEV_PAGEMAP_OPS 41 DEFINE_STATIC_KEY_FALSE(devmap_managed_key); 42 EXPORT_SYMBOL(devmap_managed_key); 43 44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap) 45 { 46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || 47 pgmap->type == MEMORY_DEVICE_FS_DAX) 48 static_branch_dec(&devmap_managed_key); 49 } 50 51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) 52 { 53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || 54 pgmap->type == MEMORY_DEVICE_FS_DAX) 55 static_branch_inc(&devmap_managed_key); 56 } 57 #else 58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) 59 { 60 } 61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap) 62 { 63 } 64 #endif /* CONFIG_DEV_PAGEMAP_OPS */ 65 66 static void pgmap_array_delete(struct range *range) 67 { 68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), 69 NULL, GFP_KERNEL); 70 synchronize_rcu(); 71 } 72 73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) 74 { 75 struct range *range = &pgmap->ranges[range_id]; 76 unsigned long pfn = PHYS_PFN(range->start); 77 78 if (range_id) 79 return pfn; 80 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); 81 } 82 83 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) 84 { 85 int i; 86 87 for (i = 0; i < pgmap->nr_range; i++) { 88 struct range *range = &pgmap->ranges[i]; 89 90 if (pfn >= PHYS_PFN(range->start) && 91 pfn <= PHYS_PFN(range->end)) 92 return pfn >= pfn_first(pgmap, i); 93 } 94 95 return false; 96 } 97 98 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) 99 { 100 const struct range *range = &pgmap->ranges[range_id]; 101 102 return (range->start + range_len(range)) >> PAGE_SHIFT; 103 } 104 105 static unsigned long pfn_next(struct dev_pagemap *pgmap, unsigned long pfn) 106 { 107 if (pfn % (1024 << pgmap->vmemmap_shift)) 108 cond_resched(); 109 return pfn + pgmap_vmemmap_nr(pgmap); 110 } 111 112 static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) 113 { 114 return (pfn_end(pgmap, range_id) - 115 pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; 116 } 117 118 #define for_each_device_pfn(pfn, map, i) \ 119 for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); \ 120 pfn = pfn_next(map, pfn)) 121 122 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) 123 { 124 struct range *range = &pgmap->ranges[range_id]; 125 struct page *first_page; 126 127 /* make sure to access a memmap that was actually initialized */ 128 first_page = pfn_to_page(pfn_first(pgmap, range_id)); 129 130 /* pages are dead and unused, undo the arch mapping */ 131 mem_hotplug_begin(); 132 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), 133 PHYS_PFN(range_len(range))); 134 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 135 __remove_pages(PHYS_PFN(range->start), 136 PHYS_PFN(range_len(range)), NULL); 137 } else { 138 arch_remove_memory(range->start, range_len(range), 139 pgmap_altmap(pgmap)); 140 kasan_remove_zero_shadow(__va(range->start), range_len(range)); 141 } 142 mem_hotplug_done(); 143 144 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); 145 pgmap_array_delete(range); 146 } 147 148 void memunmap_pages(struct dev_pagemap *pgmap) 149 { 150 unsigned long pfn; 151 int i; 152 153 percpu_ref_kill(&pgmap->ref); 154 for (i = 0; i < pgmap->nr_range; i++) 155 for_each_device_pfn(pfn, pgmap, i) 156 put_page(pfn_to_page(pfn)); 157 wait_for_completion(&pgmap->done); 158 percpu_ref_exit(&pgmap->ref); 159 160 for (i = 0; i < pgmap->nr_range; i++) 161 pageunmap_range(pgmap, i); 162 163 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); 164 devmap_managed_enable_put(pgmap); 165 } 166 EXPORT_SYMBOL_GPL(memunmap_pages); 167 168 static void devm_memremap_pages_release(void *data) 169 { 170 memunmap_pages(data); 171 } 172 173 static void dev_pagemap_percpu_release(struct percpu_ref *ref) 174 { 175 struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); 176 177 complete(&pgmap->done); 178 } 179 180 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, 181 int range_id, int nid) 182 { 183 const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; 184 struct range *range = &pgmap->ranges[range_id]; 185 struct dev_pagemap *conflict_pgmap; 186 int error, is_ram; 187 188 if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, 189 "altmap not supported for multiple ranges\n")) 190 return -EINVAL; 191 192 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); 193 if (conflict_pgmap) { 194 WARN(1, "Conflicting mapping in same section\n"); 195 put_dev_pagemap(conflict_pgmap); 196 return -ENOMEM; 197 } 198 199 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); 200 if (conflict_pgmap) { 201 WARN(1, "Conflicting mapping in same section\n"); 202 put_dev_pagemap(conflict_pgmap); 203 return -ENOMEM; 204 } 205 206 is_ram = region_intersects(range->start, range_len(range), 207 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 208 209 if (is_ram != REGION_DISJOINT) { 210 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", 211 is_ram == REGION_MIXED ? "mixed" : "ram", 212 range->start, range->end); 213 return -ENXIO; 214 } 215 216 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), 217 PHYS_PFN(range->end), pgmap, GFP_KERNEL)); 218 if (error) 219 return error; 220 221 if (nid < 0) 222 nid = numa_mem_id(); 223 224 error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, 225 range_len(range)); 226 if (error) 227 goto err_pfn_remap; 228 229 if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { 230 error = -EINVAL; 231 goto err_pfn_remap; 232 } 233 234 mem_hotplug_begin(); 235 236 /* 237 * For device private memory we call add_pages() as we only need to 238 * allocate and initialize struct page for the device memory. More- 239 * over the device memory is un-accessible thus we do not want to 240 * create a linear mapping for the memory like arch_add_memory() 241 * would do. 242 * 243 * For all other device memory types, which are accessible by 244 * the CPU, we do want the linear mapping and thus use 245 * arch_add_memory(). 246 */ 247 if (is_private) { 248 error = add_pages(nid, PHYS_PFN(range->start), 249 PHYS_PFN(range_len(range)), params); 250 } else { 251 error = kasan_add_zero_shadow(__va(range->start), range_len(range)); 252 if (error) { 253 mem_hotplug_done(); 254 goto err_kasan; 255 } 256 257 error = arch_add_memory(nid, range->start, range_len(range), 258 params); 259 } 260 261 if (!error) { 262 struct zone *zone; 263 264 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; 265 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), 266 PHYS_PFN(range_len(range)), params->altmap, 267 MIGRATE_MOVABLE); 268 } 269 270 mem_hotplug_done(); 271 if (error) 272 goto err_add_memory; 273 274 /* 275 * Initialization of the pages has been deferred until now in order 276 * to allow us to do the work while not holding the hotplug lock. 277 */ 278 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 279 PHYS_PFN(range->start), 280 PHYS_PFN(range_len(range)), pgmap); 281 percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); 282 return 0; 283 284 err_add_memory: 285 if (!is_private) 286 kasan_remove_zero_shadow(__va(range->start), range_len(range)); 287 err_kasan: 288 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); 289 err_pfn_remap: 290 pgmap_array_delete(range); 291 return error; 292 } 293 294 295 /* 296 * Not device managed version of dev_memremap_pages, undone by 297 * memunmap_pages(). Please use dev_memremap_pages if you have a struct 298 * device available. 299 */ 300 void *memremap_pages(struct dev_pagemap *pgmap, int nid) 301 { 302 struct mhp_params params = { 303 .altmap = pgmap_altmap(pgmap), 304 .pgprot = PAGE_KERNEL, 305 }; 306 const int nr_range = pgmap->nr_range; 307 int error, i; 308 309 if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) 310 return ERR_PTR(-EINVAL); 311 312 switch (pgmap->type) { 313 case MEMORY_DEVICE_PRIVATE: 314 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { 315 WARN(1, "Device private memory not supported\n"); 316 return ERR_PTR(-EINVAL); 317 } 318 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { 319 WARN(1, "Missing migrate_to_ram method\n"); 320 return ERR_PTR(-EINVAL); 321 } 322 if (!pgmap->ops->page_free) { 323 WARN(1, "Missing page_free method\n"); 324 return ERR_PTR(-EINVAL); 325 } 326 if (!pgmap->owner) { 327 WARN(1, "Missing owner\n"); 328 return ERR_PTR(-EINVAL); 329 } 330 break; 331 case MEMORY_DEVICE_FS_DAX: 332 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) || 333 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { 334 WARN(1, "File system DAX not supported\n"); 335 return ERR_PTR(-EINVAL); 336 } 337 break; 338 case MEMORY_DEVICE_GENERIC: 339 break; 340 case MEMORY_DEVICE_PCI_P2PDMA: 341 params.pgprot = pgprot_noncached(params.pgprot); 342 break; 343 default: 344 WARN(1, "Invalid pgmap type %d\n", pgmap->type); 345 break; 346 } 347 348 init_completion(&pgmap->done); 349 error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0, 350 GFP_KERNEL); 351 if (error) 352 return ERR_PTR(error); 353 354 devmap_managed_enable_get(pgmap); 355 356 /* 357 * Clear the pgmap nr_range as it will be incremented for each 358 * successfully processed range. This communicates how many 359 * regions to unwind in the abort case. 360 */ 361 pgmap->nr_range = 0; 362 error = 0; 363 for (i = 0; i < nr_range; i++) { 364 error = pagemap_range(pgmap, ¶ms, i, nid); 365 if (error) 366 break; 367 pgmap->nr_range++; 368 } 369 370 if (i < nr_range) { 371 memunmap_pages(pgmap); 372 pgmap->nr_range = nr_range; 373 return ERR_PTR(error); 374 } 375 376 return __va(pgmap->ranges[0].start); 377 } 378 EXPORT_SYMBOL_GPL(memremap_pages); 379 380 /** 381 * devm_memremap_pages - remap and provide memmap backing for the given resource 382 * @dev: hosting device for @res 383 * @pgmap: pointer to a struct dev_pagemap 384 * 385 * Notes: 386 * 1/ At a minimum the res and type members of @pgmap must be initialized 387 * by the caller before passing it to this function 388 * 389 * 2/ The altmap field may optionally be initialized, in which case 390 * PGMAP_ALTMAP_VALID must be set in pgmap->flags. 391 * 392 * 3/ The ref field may optionally be provided, in which pgmap->ref must be 393 * 'live' on entry and will be killed and reaped at 394 * devm_memremap_pages_release() time, or if this routine fails. 395 * 396 * 4/ range is expected to be a host memory range that could feasibly be 397 * treated as a "System RAM" range, i.e. not a device mmio range, but 398 * this is not enforced. 399 */ 400 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) 401 { 402 int error; 403 void *ret; 404 405 ret = memremap_pages(pgmap, dev_to_node(dev)); 406 if (IS_ERR(ret)) 407 return ret; 408 409 error = devm_add_action_or_reset(dev, devm_memremap_pages_release, 410 pgmap); 411 if (error) 412 return ERR_PTR(error); 413 return ret; 414 } 415 EXPORT_SYMBOL_GPL(devm_memremap_pages); 416 417 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) 418 { 419 devm_release_action(dev, devm_memremap_pages_release, pgmap); 420 } 421 EXPORT_SYMBOL_GPL(devm_memunmap_pages); 422 423 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) 424 { 425 /* number of pfns from base where pfn_to_page() is valid */ 426 if (altmap) 427 return altmap->reserve + altmap->free; 428 return 0; 429 } 430 431 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) 432 { 433 altmap->alloc -= nr_pfns; 434 } 435 436 /** 437 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn 438 * @pfn: page frame number to lookup page_map 439 * @pgmap: optional known pgmap that already has a reference 440 * 441 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap 442 * is non-NULL but does not cover @pfn the reference to it will be released. 443 */ 444 struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 445 struct dev_pagemap *pgmap) 446 { 447 resource_size_t phys = PFN_PHYS(pfn); 448 449 /* 450 * In the cached case we're already holding a live reference. 451 */ 452 if (pgmap) { 453 if (phys >= pgmap->range.start && phys <= pgmap->range.end) 454 return pgmap; 455 put_dev_pagemap(pgmap); 456 } 457 458 /* fall back to slow path lookup */ 459 rcu_read_lock(); 460 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); 461 if (pgmap && !percpu_ref_tryget_live(&pgmap->ref)) 462 pgmap = NULL; 463 rcu_read_unlock(); 464 465 return pgmap; 466 } 467 EXPORT_SYMBOL_GPL(get_dev_pagemap); 468 469 #ifdef CONFIG_DEV_PAGEMAP_OPS 470 void free_devmap_managed_page(struct page *page) 471 { 472 /* notify page idle for dax */ 473 if (!is_device_private_page(page)) { 474 wake_up_var(&page->_refcount); 475 return; 476 } 477 478 __ClearPageWaiters(page); 479 480 mem_cgroup_uncharge(page_folio(page)); 481 482 /* 483 * When a device_private page is freed, the page->mapping field 484 * may still contain a (stale) mapping value. For example, the 485 * lower bits of page->mapping may still identify the page as an 486 * anonymous page. Ultimately, this entire field is just stale 487 * and wrong, and it will cause errors if not cleared. One 488 * example is: 489 * 490 * migrate_vma_pages() 491 * migrate_vma_insert_page() 492 * page_add_new_anon_rmap() 493 * __page_set_anon_rmap() 494 * ...checks page->mapping, via PageAnon(page) call, 495 * and incorrectly concludes that the page is an 496 * anonymous page. Therefore, it incorrectly, 497 * silently fails to set up the new anon rmap. 498 * 499 * For other types of ZONE_DEVICE pages, migration is either 500 * handled differently or not done at all, so there is no need 501 * to clear page->mapping. 502 */ 503 page->mapping = NULL; 504 page->pgmap->ops->page_free(page); 505 } 506 #endif /* CONFIG_DEV_PAGEMAP_OPS */ 507