1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Device tree based initialization code for reserved memory. 4 * 5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 7 * http://www.samsung.com 8 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 9 * Author: Josh Cartwright <joshc@codeaurora.org> 10 */ 11 12 #define pr_fmt(fmt) "OF: reserved mem: " fmt 13 14 #include <linux/err.h> 15 #include <linux/of.h> 16 #include <linux/of_fdt.h> 17 #include <linux/of_platform.h> 18 #include <linux/mm.h> 19 #include <linux/sizes.h> 20 #include <linux/of_reserved_mem.h> 21 #include <linux/sort.h> 22 #include <linux/slab.h> 23 #include <linux/memblock.h> 24 #include <linux/kmemleak.h> 25 #include <linux/cma.h> 26 27 #include "of_private.h" 28 29 #define MAX_RESERVED_REGIONS 64 30 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; 31 static int reserved_mem_count; 32 33 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 34 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 35 phys_addr_t *res_base) 36 { 37 phys_addr_t base; 38 int err = 0; 39 40 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; 41 align = !align ? SMP_CACHE_BYTES : align; 42 base = memblock_phys_alloc_range(size, align, start, end); 43 if (!base) 44 return -ENOMEM; 45 46 *res_base = base; 47 if (nomap) { 48 err = memblock_mark_nomap(base, size); 49 if (err) 50 memblock_phys_free(base, size); 51 } 52 53 kmemleak_ignore_phys(base); 54 55 return err; 56 } 57 58 /* 59 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization 60 */ 61 void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 62 phys_addr_t base, phys_addr_t size) 63 { 64 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 65 66 if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { 67 pr_err("not enough space for all defined regions.\n"); 68 return; 69 } 70 71 rmem->fdt_node = node; 72 rmem->name = uname; 73 rmem->base = base; 74 rmem->size = size; 75 76 reserved_mem_count++; 77 return; 78 } 79 80 /* 81 * __reserved_mem_alloc_in_range() - allocate reserved memory described with 82 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing 83 * reserved regions to keep the reserved memory contiguous if possible. 84 */ 85 static int __init __reserved_mem_alloc_in_range(phys_addr_t size, 86 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 87 phys_addr_t *res_base) 88 { 89 bool prev_bottom_up = memblock_bottom_up(); 90 bool bottom_up = false, top_down = false; 91 int ret, i; 92 93 for (i = 0; i < reserved_mem_count; i++) { 94 struct reserved_mem *rmem = &reserved_mem[i]; 95 96 /* Skip regions that were not reserved yet */ 97 if (rmem->size == 0) 98 continue; 99 100 /* 101 * If range starts next to an existing reservation, use bottom-up: 102 * |....RRRR................RRRRRRRR..............| 103 * --RRRR------ 104 */ 105 if (start >= rmem->base && start <= (rmem->base + rmem->size)) 106 bottom_up = true; 107 108 /* 109 * If range ends next to an existing reservation, use top-down: 110 * |....RRRR................RRRRRRRR..............| 111 * -------RRRR----- 112 */ 113 if (end >= rmem->base && end <= (rmem->base + rmem->size)) 114 top_down = true; 115 } 116 117 /* Change setting only if either bottom-up or top-down was selected */ 118 if (bottom_up != top_down) 119 memblock_set_bottom_up(bottom_up); 120 121 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 122 start, end, nomap, res_base); 123 124 /* Restore old setting if needed */ 125 if (bottom_up != top_down) 126 memblock_set_bottom_up(prev_bottom_up); 127 128 return ret; 129 } 130 131 /* 132 * __reserved_mem_alloc_size() - allocate reserved memory described by 133 * 'size', 'alignment' and 'alloc-ranges' properties. 134 */ 135 static int __init __reserved_mem_alloc_size(unsigned long node, 136 const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) 137 { 138 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 139 phys_addr_t start = 0, end = 0; 140 phys_addr_t base = 0, align = 0, size; 141 int len; 142 const __be32 *prop; 143 bool nomap; 144 int ret; 145 146 prop = of_get_flat_dt_prop(node, "size", &len); 147 if (!prop) 148 return -EINVAL; 149 150 if (len != dt_root_size_cells * sizeof(__be32)) { 151 pr_err("invalid size property in '%s' node.\n", uname); 152 return -EINVAL; 153 } 154 size = dt_mem_next_cell(dt_root_size_cells, &prop); 155 156 prop = of_get_flat_dt_prop(node, "alignment", &len); 157 if (prop) { 158 if (len != dt_root_addr_cells * sizeof(__be32)) { 159 pr_err("invalid alignment property in '%s' node.\n", 160 uname); 161 return -EINVAL; 162 } 163 align = dt_mem_next_cell(dt_root_addr_cells, &prop); 164 } 165 166 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 167 168 /* Need adjust the alignment to satisfy the CMA requirement */ 169 if (IS_ENABLED(CONFIG_CMA) 170 && of_flat_dt_is_compatible(node, "shared-dma-pool") 171 && of_get_flat_dt_prop(node, "reusable", NULL) 172 && !nomap) 173 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); 174 175 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 176 if (prop) { 177 178 if (len % t_len != 0) { 179 pr_err("invalid alloc-ranges property in '%s', skipping node.\n", 180 uname); 181 return -EINVAL; 182 } 183 184 base = 0; 185 186 while (len > 0) { 187 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 188 end = start + dt_mem_next_cell(dt_root_size_cells, 189 &prop); 190 191 ret = __reserved_mem_alloc_in_range(size, align, 192 start, end, nomap, &base); 193 if (ret == 0) { 194 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 195 uname, &base, 196 (unsigned long)(size / SZ_1M)); 197 break; 198 } 199 len -= t_len; 200 } 201 202 } else { 203 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 204 0, 0, nomap, &base); 205 if (ret == 0) 206 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 207 uname, &base, (unsigned long)(size / SZ_1M)); 208 } 209 210 if (base == 0) { 211 pr_err("failed to allocate memory for node '%s': size %lu MiB\n", 212 uname, (unsigned long)(size / SZ_1M)); 213 return -ENOMEM; 214 } 215 216 *res_base = base; 217 *res_size = size; 218 219 return 0; 220 } 221 222 static const struct of_device_id __rmem_of_table_sentinel 223 __used __section("__reservedmem_of_table_end"); 224 225 /* 226 * __reserved_mem_init_node() - call region specific reserved memory init code 227 */ 228 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 229 { 230 extern const struct of_device_id __reservedmem_of_table[]; 231 const struct of_device_id *i; 232 int ret = -ENOENT; 233 234 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 235 reservedmem_of_init_fn initfn = i->data; 236 const char *compat = i->compatible; 237 238 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 239 continue; 240 241 ret = initfn(rmem); 242 if (ret == 0) { 243 pr_info("initialized node %s, compatible id %s\n", 244 rmem->name, compat); 245 break; 246 } 247 } 248 return ret; 249 } 250 251 static int __init __rmem_cmp(const void *a, const void *b) 252 { 253 const struct reserved_mem *ra = a, *rb = b; 254 255 if (ra->base < rb->base) 256 return -1; 257 258 if (ra->base > rb->base) 259 return 1; 260 261 /* 262 * Put the dynamic allocations (address == 0, size == 0) before static 263 * allocations at address 0x0 so that overlap detection works 264 * correctly. 265 */ 266 if (ra->size < rb->size) 267 return -1; 268 if (ra->size > rb->size) 269 return 1; 270 271 if (ra->fdt_node < rb->fdt_node) 272 return -1; 273 if (ra->fdt_node > rb->fdt_node) 274 return 1; 275 276 return 0; 277 } 278 279 static void __init __rmem_check_for_overlap(void) 280 { 281 int i; 282 283 if (reserved_mem_count < 2) 284 return; 285 286 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 287 __rmem_cmp, NULL); 288 for (i = 0; i < reserved_mem_count - 1; i++) { 289 struct reserved_mem *this, *next; 290 291 this = &reserved_mem[i]; 292 next = &reserved_mem[i + 1]; 293 294 if (this->base + this->size > next->base) { 295 phys_addr_t this_end, next_end; 296 297 this_end = this->base + this->size; 298 next_end = next->base + next->size; 299 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 300 this->name, &this->base, &this_end, 301 next->name, &next->base, &next_end); 302 } 303 } 304 } 305 306 /** 307 * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions 308 */ 309 void __init fdt_init_reserved_mem(void) 310 { 311 int i; 312 313 /* check for overlapping reserved regions */ 314 __rmem_check_for_overlap(); 315 316 for (i = 0; i < reserved_mem_count; i++) { 317 struct reserved_mem *rmem = &reserved_mem[i]; 318 unsigned long node = rmem->fdt_node; 319 int len; 320 const __be32 *prop; 321 int err = 0; 322 bool nomap; 323 324 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 325 prop = of_get_flat_dt_prop(node, "phandle", &len); 326 if (!prop) 327 prop = of_get_flat_dt_prop(node, "linux,phandle", &len); 328 if (prop) 329 rmem->phandle = of_read_number(prop, len/4); 330 331 if (rmem->size == 0) 332 err = __reserved_mem_alloc_size(node, rmem->name, 333 &rmem->base, &rmem->size); 334 if (err == 0) { 335 err = __reserved_mem_init_node(rmem); 336 if (err != 0 && err != -ENOENT) { 337 pr_info("node %s compatible matching fail\n", 338 rmem->name); 339 if (nomap) 340 memblock_clear_nomap(rmem->base, rmem->size); 341 else 342 memblock_phys_free(rmem->base, 343 rmem->size); 344 } else { 345 phys_addr_t end = rmem->base + rmem->size - 1; 346 bool reusable = 347 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL; 348 349 pr_info("%pa..%pa (%lu KiB) %s %s %s\n", 350 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), 351 nomap ? "nomap" : "map", 352 reusable ? "reusable" : "non-reusable", 353 rmem->name ? rmem->name : "unknown"); 354 } 355 } 356 } 357 } 358 359 static inline struct reserved_mem *__find_rmem(struct device_node *node) 360 { 361 unsigned int i; 362 363 if (!node->phandle) 364 return NULL; 365 366 for (i = 0; i < reserved_mem_count; i++) 367 if (reserved_mem[i].phandle == node->phandle) 368 return &reserved_mem[i]; 369 return NULL; 370 } 371 372 struct rmem_assigned_device { 373 struct device *dev; 374 struct reserved_mem *rmem; 375 struct list_head list; 376 }; 377 378 static LIST_HEAD(of_rmem_assigned_device_list); 379 static DEFINE_MUTEX(of_rmem_assigned_device_mutex); 380 381 /** 382 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to 383 * given device 384 * @dev: Pointer to the device to configure 385 * @np: Pointer to the device_node with 'reserved-memory' property 386 * @idx: Index of selected region 387 * 388 * This function assigns respective DMA-mapping operations based on reserved 389 * memory region specified by 'memory-region' property in @np node to the @dev 390 * device. When driver needs to use more than one reserved memory region, it 391 * should allocate child devices and initialize regions by name for each of 392 * child device. 393 * 394 * Returns error code or zero on success. 395 */ 396 int of_reserved_mem_device_init_by_idx(struct device *dev, 397 struct device_node *np, int idx) 398 { 399 struct rmem_assigned_device *rd; 400 struct device_node *target; 401 struct reserved_mem *rmem; 402 int ret; 403 404 if (!np || !dev) 405 return -EINVAL; 406 407 target = of_parse_phandle(np, "memory-region", idx); 408 if (!target) 409 return -ENODEV; 410 411 if (!of_device_is_available(target)) { 412 of_node_put(target); 413 return 0; 414 } 415 416 rmem = __find_rmem(target); 417 of_node_put(target); 418 419 if (!rmem || !rmem->ops || !rmem->ops->device_init) 420 return -EINVAL; 421 422 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); 423 if (!rd) 424 return -ENOMEM; 425 426 ret = rmem->ops->device_init(rmem, dev); 427 if (ret == 0) { 428 rd->dev = dev; 429 rd->rmem = rmem; 430 431 mutex_lock(&of_rmem_assigned_device_mutex); 432 list_add(&rd->list, &of_rmem_assigned_device_list); 433 mutex_unlock(&of_rmem_assigned_device_mutex); 434 435 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 436 } else { 437 kfree(rd); 438 } 439 440 return ret; 441 } 442 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); 443 444 /** 445 * of_reserved_mem_device_init_by_name() - assign named reserved memory region 446 * to given device 447 * @dev: pointer to the device to configure 448 * @np: pointer to the device node with 'memory-region' property 449 * @name: name of the selected memory region 450 * 451 * Returns: 0 on success or a negative error-code on failure. 452 */ 453 int of_reserved_mem_device_init_by_name(struct device *dev, 454 struct device_node *np, 455 const char *name) 456 { 457 int idx = of_property_match_string(np, "memory-region-names", name); 458 459 return of_reserved_mem_device_init_by_idx(dev, np, idx); 460 } 461 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); 462 463 /** 464 * of_reserved_mem_device_release() - release reserved memory device structures 465 * @dev: Pointer to the device to deconfigure 466 * 467 * This function releases structures allocated for memory region handling for 468 * the given device. 469 */ 470 void of_reserved_mem_device_release(struct device *dev) 471 { 472 struct rmem_assigned_device *rd, *tmp; 473 LIST_HEAD(release_list); 474 475 mutex_lock(&of_rmem_assigned_device_mutex); 476 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { 477 if (rd->dev == dev) 478 list_move_tail(&rd->list, &release_list); 479 } 480 mutex_unlock(&of_rmem_assigned_device_mutex); 481 482 list_for_each_entry_safe(rd, tmp, &release_list, list) { 483 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) 484 rd->rmem->ops->device_release(rd->rmem, dev); 485 486 kfree(rd); 487 } 488 } 489 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 490 491 /** 492 * of_reserved_mem_lookup() - acquire reserved_mem from a device node 493 * @np: node pointer of the desired reserved-memory region 494 * 495 * This function allows drivers to acquire a reference to the reserved_mem 496 * struct based on a device node handle. 497 * 498 * Returns a reserved_mem reference, or NULL on error. 499 */ 500 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) 501 { 502 const char *name; 503 int i; 504 505 if (!np->full_name) 506 return NULL; 507 508 name = kbasename(np->full_name); 509 for (i = 0; i < reserved_mem_count; i++) 510 if (!strcmp(reserved_mem[i].name, name)) 511 return &reserved_mem[i]; 512 513 return NULL; 514 } 515 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); 516