Lines Matching +full:reserved +full:- +full:memory
1 // SPDX-License-Identifier: GPL-2.0+
3 * Device tree based initialization code for reserved memory.
5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
12 #define pr_fmt(fmt) "OF: reserved mem: " fmt
44 return -ENOMEM; in early_init_dt_alloc_reserved_memory_arch()
59 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
71 rmem->fdt_node = node; in fdt_reserved_mem_save_node()
72 rmem->name = uname; in fdt_reserved_mem_save_node()
73 rmem->base = base; in fdt_reserved_mem_save_node()
74 rmem->size = size; in fdt_reserved_mem_save_node()
81 * __reserved_mem_alloc_in_range() - allocate reserved memory described with
82 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
83 * reserved regions to keep the reserved memory contiguous if possible.
96 /* Skip regions that were not reserved yet */ in __reserved_mem_alloc_in_range()
97 if (rmem->size == 0) in __reserved_mem_alloc_in_range()
101 * If range starts next to an existing reservation, use bottom-up: in __reserved_mem_alloc_in_range()
103 * --RRRR------ in __reserved_mem_alloc_in_range()
105 if (start >= rmem->base && start <= (rmem->base + rmem->size)) in __reserved_mem_alloc_in_range()
109 * If range ends next to an existing reservation, use top-down: in __reserved_mem_alloc_in_range()
111 * -------RRRR----- in __reserved_mem_alloc_in_range()
113 if (end >= rmem->base && end <= (rmem->base + rmem->size)) in __reserved_mem_alloc_in_range()
117 /* Change setting only if either bottom-up or top-down was selected */ in __reserved_mem_alloc_in_range()
132 * __reserved_mem_alloc_size() - allocate reserved memory described by
133 * 'size', 'alignment' and 'alloc-ranges' properties.
148 return -EINVAL; in __reserved_mem_alloc_size()
152 return -EINVAL; in __reserved_mem_alloc_size()
161 return -EINVAL; in __reserved_mem_alloc_size()
166 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; in __reserved_mem_alloc_size()
170 && of_flat_dt_is_compatible(node, "shared-dma-pool") in __reserved_mem_alloc_size()
175 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); in __reserved_mem_alloc_size()
179 pr_err("invalid alloc-ranges property in '%s', skipping node.\n", in __reserved_mem_alloc_size()
181 return -EINVAL; in __reserved_mem_alloc_size()
194 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", in __reserved_mem_alloc_size()
199 len -= t_len; in __reserved_mem_alloc_size()
206 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", in __reserved_mem_alloc_size()
211 pr_err("failed to allocate memory for node '%s': size %lu MiB\n", in __reserved_mem_alloc_size()
213 return -ENOMEM; in __reserved_mem_alloc_size()
226 * __reserved_mem_init_node() - call region specific reserved memory init code
232 int ret = -ENOENT; in __reserved_mem_init_node()
235 reservedmem_of_init_fn initfn = i->data; in __reserved_mem_init_node()
236 const char *compat = i->compatible; in __reserved_mem_init_node()
238 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) in __reserved_mem_init_node()
244 rmem->name, compat); in __reserved_mem_init_node()
255 if (ra->base < rb->base) in __rmem_cmp()
256 return -1; in __rmem_cmp()
258 if (ra->base > rb->base) in __rmem_cmp()
266 if (ra->size < rb->size) in __rmem_cmp()
267 return -1; in __rmem_cmp()
268 if (ra->size > rb->size) in __rmem_cmp()
271 if (ra->fdt_node < rb->fdt_node) in __rmem_cmp()
272 return -1; in __rmem_cmp()
273 if (ra->fdt_node > rb->fdt_node) in __rmem_cmp()
288 for (i = 0; i < reserved_mem_count - 1; i++) { in __rmem_check_for_overlap()
294 if (this->base + this->size > next->base) { in __rmem_check_for_overlap()
297 this_end = this->base + this->size; in __rmem_check_for_overlap()
298 next_end = next->base + next->size; in __rmem_check_for_overlap()
299 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", in __rmem_check_for_overlap()
300 this->name, &this->base, &this_end, in __rmem_check_for_overlap()
301 next->name, &next->base, &next_end); in __rmem_check_for_overlap()
307 * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
313 /* check for overlapping reserved regions */ in fdt_init_reserved_mem()
318 unsigned long node = rmem->fdt_node; in fdt_init_reserved_mem()
324 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; in fdt_init_reserved_mem()
329 rmem->phandle = of_read_number(prop, len/4); in fdt_init_reserved_mem()
331 if (rmem->size == 0) in fdt_init_reserved_mem()
332 err = __reserved_mem_alloc_size(node, rmem->name, in fdt_init_reserved_mem()
333 &rmem->base, &rmem->size); in fdt_init_reserved_mem()
336 if (err != 0 && err != -ENOENT) { in fdt_init_reserved_mem()
338 rmem->name); in fdt_init_reserved_mem()
340 memblock_clear_nomap(rmem->base, rmem->size); in fdt_init_reserved_mem()
342 memblock_phys_free(rmem->base, in fdt_init_reserved_mem()
343 rmem->size); in fdt_init_reserved_mem()
345 phys_addr_t end = rmem->base + rmem->size - 1; in fdt_init_reserved_mem()
350 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), in fdt_init_reserved_mem()
352 reusable ? "reusable" : "non-reusable", in fdt_init_reserved_mem()
353 rmem->name ? rmem->name : "unknown"); in fdt_init_reserved_mem()
363 if (!node->phandle) in __find_rmem()
367 if (reserved_mem[i].phandle == node->phandle) in __find_rmem()
382 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
385 * @np: Pointer to the device_node with 'reserved-memory' property
388 * This function assigns respective DMA-mapping operations based on reserved
389 * memory region specified by 'memory-region' property in @np node to the @dev
390 * device. When driver needs to use more than one reserved memory region, it
405 return -EINVAL; in of_reserved_mem_device_init_by_idx()
407 target = of_parse_phandle(np, "memory-region", idx); in of_reserved_mem_device_init_by_idx()
409 return -ENODEV; in of_reserved_mem_device_init_by_idx()
419 if (!rmem || !rmem->ops || !rmem->ops->device_init) in of_reserved_mem_device_init_by_idx()
420 return -EINVAL; in of_reserved_mem_device_init_by_idx()
424 return -ENOMEM; in of_reserved_mem_device_init_by_idx()
426 ret = rmem->ops->device_init(rmem, dev); in of_reserved_mem_device_init_by_idx()
428 rd->dev = dev; in of_reserved_mem_device_init_by_idx()
429 rd->rmem = rmem; in of_reserved_mem_device_init_by_idx()
432 list_add(&rd->list, &of_rmem_assigned_device_list); in of_reserved_mem_device_init_by_idx()
435 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); in of_reserved_mem_device_init_by_idx()
445 * of_reserved_mem_device_init_by_name() - assign named reserved memory region
448 * @np: pointer to the device node with 'memory-region' property
449 * @name: name of the selected memory region
451 * Returns: 0 on success or a negative error-code on failure.
457 int idx = of_property_match_string(np, "memory-region-names", name); in of_reserved_mem_device_init_by_name()
464 * of_reserved_mem_device_release() - release reserved memory device structures
467 * This function releases structures allocated for memory region handling for
477 if (rd->dev == dev) in of_reserved_mem_device_release()
478 list_move_tail(&rd->list, &release_list); in of_reserved_mem_device_release()
483 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) in of_reserved_mem_device_release()
484 rd->rmem->ops->device_release(rd->rmem, dev); in of_reserved_mem_device_release()
492 * of_reserved_mem_lookup() - acquire reserved_mem from a device node
493 * @np: node pointer of the desired reserved-memory region
505 if (!np->full_name) in of_reserved_mem_lookup()
508 name = kbasename(np->full_name); in of_reserved_mem_lookup()