1 /* 2 * Device tree based initialization code for reserved memory. 3 * 4 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 5 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 8 * Author: Josh Cartwright <joshc@codeaurora.org> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License as 12 * published by the Free Software Foundation; either version 2 of the 13 * License or (at your optional) any later version of the license. 14 */ 15 16 #include <linux/err.h> 17 #include <linux/of.h> 18 #include <linux/of_fdt.h> 19 #include <linux/of_platform.h> 20 #include <linux/mm.h> 21 #include <linux/sizes.h> 22 #include <linux/of_reserved_mem.h> 23 #include <linux/sort.h> 24 25 #define MAX_RESERVED_REGIONS 16 26 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; 27 static int reserved_mem_count; 28 29 #if defined(CONFIG_HAVE_MEMBLOCK) 30 #include <linux/memblock.h> 31 int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 32 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 33 phys_addr_t *res_base) 34 { 35 /* 36 * We use __memblock_alloc_base() because memblock_alloc_base() 37 * panic()s on allocation failure. 38 */ 39 phys_addr_t base = __memblock_alloc_base(size, align, end); 40 if (!base) 41 return -ENOMEM; 42 43 /* 44 * Check if the allocated region fits in to start..end window 45 */ 46 if (base < start) { 47 memblock_free(base, size); 48 return -ENOMEM; 49 } 50 51 *res_base = base; 52 if (nomap) 53 return memblock_remove(base, size); 54 return 0; 55 } 56 #else 57 int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 58 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 59 phys_addr_t *res_base) 60 { 61 pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n", 62 size, nomap ? " (nomap)" : ""); 63 return -ENOSYS; 64 } 65 #endif 66 67 /** 68 * res_mem_save_node() - save fdt node for second pass initialization 69 */ 70 void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 71 phys_addr_t base, phys_addr_t size) 72 { 73 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 74 75 if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { 76 pr_err("Reserved memory: not enough space all defined regions.\n"); 77 return; 78 } 79 80 rmem->fdt_node = node; 81 rmem->name = uname; 82 rmem->base = base; 83 rmem->size = size; 84 85 reserved_mem_count++; 86 return; 87 } 88 89 /** 90 * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align' 91 * and 'alloc-ranges' properties 92 */ 93 static int __init __reserved_mem_alloc_size(unsigned long node, 94 const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) 95 { 96 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 97 phys_addr_t start = 0, end = 0; 98 phys_addr_t base = 0, align = 0, size; 99 int len; 100 const __be32 *prop; 101 int nomap; 102 int ret; 103 104 prop = of_get_flat_dt_prop(node, "size", &len); 105 if (!prop) 106 return -EINVAL; 107 108 if (len != dt_root_size_cells * sizeof(__be32)) { 109 pr_err("Reserved memory: invalid size property in '%s' node.\n", 110 uname); 111 return -EINVAL; 112 } 113 size = dt_mem_next_cell(dt_root_size_cells, &prop); 114 115 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 116 117 prop = of_get_flat_dt_prop(node, "alignment", &len); 118 if (prop) { 119 if (len != dt_root_addr_cells * sizeof(__be32)) { 120 pr_err("Reserved memory: invalid alignment property in '%s' node.\n", 121 uname); 122 return -EINVAL; 123 } 124 align = dt_mem_next_cell(dt_root_addr_cells, &prop); 125 } 126 127 /* Need adjust the alignment to satisfy the CMA requirement */ 128 if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) 129 align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 130 131 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 132 if (prop) { 133 134 if (len % t_len != 0) { 135 pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n", 136 uname); 137 return -EINVAL; 138 } 139 140 base = 0; 141 142 while (len > 0) { 143 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 144 end = start + dt_mem_next_cell(dt_root_size_cells, 145 &prop); 146 147 ret = early_init_dt_alloc_reserved_memory_arch(size, 148 align, start, end, nomap, &base); 149 if (ret == 0) { 150 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", 151 uname, &base, 152 (unsigned long)size / SZ_1M); 153 break; 154 } 155 len -= t_len; 156 } 157 158 } else { 159 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 160 0, 0, nomap, &base); 161 if (ret == 0) 162 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", 163 uname, &base, (unsigned long)size / SZ_1M); 164 } 165 166 if (base == 0) { 167 pr_info("Reserved memory: failed to allocate memory for node '%s'\n", 168 uname); 169 return -ENOMEM; 170 } 171 172 *res_base = base; 173 *res_size = size; 174 175 return 0; 176 } 177 178 static const struct of_device_id __rmem_of_table_sentinel 179 __used __section(__reservedmem_of_table_end); 180 181 /** 182 * res_mem_init_node() - call region specific reserved memory init code 183 */ 184 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 185 { 186 extern const struct of_device_id __reservedmem_of_table[]; 187 const struct of_device_id *i; 188 189 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 190 reservedmem_of_init_fn initfn = i->data; 191 const char *compat = i->compatible; 192 193 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 194 continue; 195 196 if (initfn(rmem) == 0) { 197 pr_info("Reserved memory: initialized node %s, compatible id %s\n", 198 rmem->name, compat); 199 return 0; 200 } 201 } 202 return -ENOENT; 203 } 204 205 static int __init __rmem_cmp(const void *a, const void *b) 206 { 207 const struct reserved_mem *ra = a, *rb = b; 208 209 return ra->base - rb->base; 210 } 211 212 static void __init __rmem_check_for_overlap(void) 213 { 214 int i; 215 216 if (reserved_mem_count < 2) 217 return; 218 219 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 220 __rmem_cmp, NULL); 221 for (i = 0; i < reserved_mem_count - 1; i++) { 222 struct reserved_mem *this, *next; 223 224 this = &reserved_mem[i]; 225 next = &reserved_mem[i + 1]; 226 if (!(this->base && next->base)) 227 continue; 228 if (this->base + this->size > next->base) { 229 phys_addr_t this_end, next_end; 230 231 this_end = this->base + this->size; 232 next_end = next->base + next->size; 233 pr_err("Reserved memory: OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 234 this->name, &this->base, &this_end, 235 next->name, &next->base, &next_end); 236 } 237 } 238 } 239 240 /** 241 * fdt_init_reserved_mem - allocate and init all saved reserved memory regions 242 */ 243 void __init fdt_init_reserved_mem(void) 244 { 245 int i; 246 247 /* check for overlapping reserved regions */ 248 __rmem_check_for_overlap(); 249 250 for (i = 0; i < reserved_mem_count; i++) { 251 struct reserved_mem *rmem = &reserved_mem[i]; 252 unsigned long node = rmem->fdt_node; 253 int len; 254 const __be32 *prop; 255 int err = 0; 256 257 prop = of_get_flat_dt_prop(node, "phandle", &len); 258 if (!prop) 259 prop = of_get_flat_dt_prop(node, "linux,phandle", &len); 260 if (prop) 261 rmem->phandle = of_read_number(prop, len/4); 262 263 if (rmem->size == 0) 264 err = __reserved_mem_alloc_size(node, rmem->name, 265 &rmem->base, &rmem->size); 266 if (err == 0) 267 __reserved_mem_init_node(rmem); 268 } 269 } 270 271 static inline struct reserved_mem *__find_rmem(struct device_node *node) 272 { 273 unsigned int i; 274 275 if (!node->phandle) 276 return NULL; 277 278 for (i = 0; i < reserved_mem_count; i++) 279 if (reserved_mem[i].phandle == node->phandle) 280 return &reserved_mem[i]; 281 return NULL; 282 } 283 284 /** 285 * of_reserved_mem_device_init() - assign reserved memory region to given device 286 * 287 * This function assign memory region pointed by "memory-region" device tree 288 * property to the given device. 289 */ 290 int of_reserved_mem_device_init(struct device *dev) 291 { 292 struct reserved_mem *rmem; 293 struct device_node *np; 294 int ret; 295 296 np = of_parse_phandle(dev->of_node, "memory-region", 0); 297 if (!np) 298 return -ENODEV; 299 300 rmem = __find_rmem(np); 301 of_node_put(np); 302 303 if (!rmem || !rmem->ops || !rmem->ops->device_init) 304 return -EINVAL; 305 306 ret = rmem->ops->device_init(rmem, dev); 307 if (ret == 0) 308 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 309 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init); 313 314 /** 315 * of_reserved_mem_device_release() - release reserved memory device structures 316 * 317 * This function releases structures allocated for memory region handling for 318 * the given device. 319 */ 320 void of_reserved_mem_device_release(struct device *dev) 321 { 322 struct reserved_mem *rmem; 323 struct device_node *np; 324 325 np = of_parse_phandle(dev->of_node, "memory-region", 0); 326 if (!np) 327 return; 328 329 rmem = __find_rmem(np); 330 of_node_put(np); 331 332 if (!rmem || !rmem->ops || !rmem->ops->device_release) 333 return; 334 335 rmem->ops->device_release(rmem, dev); 336 } 337 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 338