1 /* 2 * Dynamic reconfiguration memory support 3 * 4 * Copyright 2017 IBM Corporation 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) "drmem: " fmt 13 14 #include <linux/kernel.h> 15 #include <linux/of.h> 16 #include <linux/of_fdt.h> 17 #include <linux/memblock.h> 18 #include <asm/prom.h> 19 #include <asm/drmem.h> 20 21 static struct drmem_lmb_info __drmem_info; 22 struct drmem_lmb_info *drmem_info = &__drmem_info; 23 24 u64 drmem_lmb_memory_max(void) 25 { 26 struct drmem_lmb *last_lmb; 27 28 last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1]; 29 return last_lmb->base_addr + drmem_lmb_size(); 30 } 31 32 static u32 drmem_lmb_flags(struct drmem_lmb *lmb) 33 { 34 /* 35 * Return the value of the lmb flags field minus the reserved 36 * bit used internally for hotplug processing. 37 */ 38 return lmb->flags & ~DRMEM_LMB_RESERVED; 39 } 40 41 static struct property *clone_property(struct property *prop, u32 prop_sz) 42 { 43 struct property *new_prop; 44 45 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); 46 if (!new_prop) 47 return NULL; 48 49 new_prop->name = kstrdup(prop->name, GFP_KERNEL); 50 new_prop->value = kzalloc(prop_sz, GFP_KERNEL); 51 if (!new_prop->name || !new_prop->value) { 52 kfree(new_prop->name); 53 kfree(new_prop->value); 54 kfree(new_prop); 55 return NULL; 56 } 57 58 new_prop->length = prop_sz; 59 #if defined(CONFIG_OF_DYNAMIC) 60 of_property_set_flag(new_prop, OF_DYNAMIC); 61 #endif 62 return new_prop; 63 } 64 65 static int drmem_update_dt_v1(struct device_node *memory, 66 struct property *prop) 67 { 68 struct property *new_prop; 69 struct of_drconf_cell_v1 *dr_cell; 70 struct drmem_lmb *lmb; 71 u32 *p; 72 73 new_prop = clone_property(prop, prop->length); 74 if (!new_prop) 75 return -1; 76 77 p = new_prop->value; 78 *p++ = cpu_to_be32(drmem_info->n_lmbs); 79 80 dr_cell = (struct of_drconf_cell_v1 *)p; 81 82 for_each_drmem_lmb(lmb) { 83 dr_cell->base_addr = cpu_to_be64(lmb->base_addr); 84 dr_cell->drc_index = cpu_to_be32(lmb->drc_index); 85 dr_cell->aa_index = cpu_to_be32(lmb->aa_index); 86 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); 87 88 dr_cell++; 89 } 90 91 of_update_property(memory, new_prop); 92 return 0; 93 } 94 95 static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, 96 struct drmem_lmb *lmb) 97 { 98 dr_cell->base_addr = cpu_to_be64(lmb->base_addr); 99 dr_cell->drc_index = cpu_to_be32(lmb->drc_index); 100 dr_cell->aa_index = cpu_to_be32(lmb->aa_index); 101 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); 102 } 103 104 static int drmem_update_dt_v2(struct device_node *memory, 105 struct property *prop) 106 { 107 struct property *new_prop; 108 struct of_drconf_cell_v2 *dr_cell; 109 struct drmem_lmb *lmb, *prev_lmb; 110 u32 lmb_sets, prop_sz, seq_lmbs; 111 u32 *p; 112 113 /* First pass, determine how many LMB sets are needed. */ 114 lmb_sets = 0; 115 prev_lmb = NULL; 116 for_each_drmem_lmb(lmb) { 117 if (!prev_lmb) { 118 prev_lmb = lmb; 119 lmb_sets++; 120 continue; 121 } 122 123 if (prev_lmb->aa_index != lmb->aa_index || 124 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) 125 lmb_sets++; 126 127 prev_lmb = lmb; 128 } 129 130 prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32); 131 new_prop = clone_property(prop, prop_sz); 132 if (!new_prop) 133 return -1; 134 135 p = new_prop->value; 136 *p++ = cpu_to_be32(lmb_sets); 137 138 dr_cell = (struct of_drconf_cell_v2 *)p; 139 140 /* Second pass, populate the LMB set data */ 141 prev_lmb = NULL; 142 seq_lmbs = 0; 143 for_each_drmem_lmb(lmb) { 144 if (prev_lmb == NULL) { 145 /* Start of first LMB set */ 146 prev_lmb = lmb; 147 init_drconf_v2_cell(dr_cell, lmb); 148 seq_lmbs++; 149 continue; 150 } 151 152 if (prev_lmb->aa_index != lmb->aa_index || 153 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) { 154 /* end of one set, start of another */ 155 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); 156 dr_cell++; 157 158 init_drconf_v2_cell(dr_cell, lmb); 159 seq_lmbs = 1; 160 } else { 161 seq_lmbs++; 162 } 163 164 prev_lmb = lmb; 165 } 166 167 /* close out last LMB set */ 168 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); 169 of_update_property(memory, new_prop); 170 return 0; 171 } 172 173 int drmem_update_dt(void) 174 { 175 struct device_node *memory; 176 struct property *prop; 177 int rc = -1; 178 179 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 180 if (!memory) 181 return -1; 182 183 prop = of_find_property(memory, "ibm,dynamic-memory", NULL); 184 if (prop) { 185 rc = drmem_update_dt_v1(memory, prop); 186 } else { 187 prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL); 188 if (prop) 189 rc = drmem_update_dt_v2(memory, prop); 190 } 191 192 of_node_put(memory); 193 return rc; 194 } 195 196 static void __init read_drconf_v1_cell(struct drmem_lmb *lmb, 197 const __be32 **prop) 198 { 199 const __be32 *p = *prop; 200 201 lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p); 202 lmb->drc_index = of_read_number(p++, 1); 203 204 p++; /* skip reserved field */ 205 206 lmb->aa_index = of_read_number(p++, 1); 207 lmb->flags = of_read_number(p++, 1); 208 209 *prop = p; 210 } 211 212 static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, 213 void (*func)(struct drmem_lmb *, const __be32 **)) 214 { 215 struct drmem_lmb lmb; 216 u32 i, n_lmbs; 217 218 n_lmbs = of_read_number(prop++, 1); 219 if (n_lmbs == 0) 220 return; 221 222 for (i = 0; i < n_lmbs; i++) { 223 read_drconf_v1_cell(&lmb, &prop); 224 func(&lmb, &usm); 225 } 226 } 227 228 static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, 229 const __be32 **prop) 230 { 231 const __be32 *p = *prop; 232 233 dr_cell->seq_lmbs = of_read_number(p++, 1); 234 dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p); 235 dr_cell->drc_index = of_read_number(p++, 1); 236 dr_cell->aa_index = of_read_number(p++, 1); 237 dr_cell->flags = of_read_number(p++, 1); 238 239 *prop = p; 240 } 241 242 static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, 243 void (*func)(struct drmem_lmb *, const __be32 **)) 244 { 245 struct of_drconf_cell_v2 dr_cell; 246 struct drmem_lmb lmb; 247 u32 i, j, lmb_sets; 248 249 lmb_sets = of_read_number(prop++, 1); 250 if (lmb_sets == 0) 251 return; 252 253 for (i = 0; i < lmb_sets; i++) { 254 read_drconf_v2_cell(&dr_cell, &prop); 255 256 for (j = 0; j < dr_cell.seq_lmbs; j++) { 257 lmb.base_addr = dr_cell.base_addr; 258 dr_cell.base_addr += drmem_lmb_size(); 259 260 lmb.drc_index = dr_cell.drc_index; 261 dr_cell.drc_index++; 262 263 lmb.aa_index = dr_cell.aa_index; 264 lmb.flags = dr_cell.flags; 265 266 func(&lmb, &usm); 267 } 268 } 269 } 270 271 #ifdef CONFIG_PPC_PSERIES 272 void __init walk_drmem_lmbs_early(unsigned long node, 273 void (*func)(struct drmem_lmb *, const __be32 **)) 274 { 275 const __be32 *prop, *usm; 276 int len; 277 278 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); 279 if (!prop || len < dt_root_size_cells * sizeof(__be32)) 280 return; 281 282 drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop); 283 284 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len); 285 286 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len); 287 if (prop) { 288 __walk_drmem_v1_lmbs(prop, usm, func); 289 } else { 290 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2", 291 &len); 292 if (prop) 293 __walk_drmem_v2_lmbs(prop, usm, func); 294 } 295 296 memblock_dump_all(); 297 } 298 299 #endif 300 301 static int __init init_drmem_lmb_size(struct device_node *dn) 302 { 303 const __be32 *prop; 304 int len; 305 306 if (drmem_info->lmb_size) 307 return 0; 308 309 prop = of_get_property(dn, "ibm,lmb-size", &len); 310 if (!prop || len < dt_root_size_cells * sizeof(__be32)) { 311 pr_info("Could not determine LMB size\n"); 312 return -1; 313 } 314 315 drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop); 316 return 0; 317 } 318 319 /* 320 * Returns the property linux,drconf-usable-memory if 321 * it exists (the property exists only in kexec/kdump kernels, 322 * added by kexec-tools) 323 */ 324 static const __be32 *of_get_usable_memory(struct device_node *dn) 325 { 326 const __be32 *prop; 327 u32 len; 328 329 prop = of_get_property(dn, "linux,drconf-usable-memory", &len); 330 if (!prop || len < sizeof(unsigned int)) 331 return NULL; 332 333 return prop; 334 } 335 336 void __init walk_drmem_lmbs(struct device_node *dn, 337 void (*func)(struct drmem_lmb *, const __be32 **)) 338 { 339 const __be32 *prop, *usm; 340 341 if (init_drmem_lmb_size(dn)) 342 return; 343 344 usm = of_get_usable_memory(dn); 345 346 prop = of_get_property(dn, "ibm,dynamic-memory", NULL); 347 if (prop) { 348 __walk_drmem_v1_lmbs(prop, usm, func); 349 } else { 350 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); 351 if (prop) 352 __walk_drmem_v2_lmbs(prop, usm, func); 353 } 354 } 355 356 static void __init init_drmem_v1_lmbs(const __be32 *prop) 357 { 358 struct drmem_lmb *lmb; 359 360 drmem_info->n_lmbs = of_read_number(prop++, 1); 361 if (drmem_info->n_lmbs == 0) 362 return; 363 364 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), 365 GFP_KERNEL); 366 if (!drmem_info->lmbs) 367 return; 368 369 for_each_drmem_lmb(lmb) { 370 read_drconf_v1_cell(lmb, &prop); 371 lmb_set_nid(lmb); 372 } 373 } 374 375 static void __init init_drmem_v2_lmbs(const __be32 *prop) 376 { 377 struct drmem_lmb *lmb; 378 struct of_drconf_cell_v2 dr_cell; 379 const __be32 *p; 380 u32 i, j, lmb_sets; 381 int lmb_index; 382 383 lmb_sets = of_read_number(prop++, 1); 384 if (lmb_sets == 0) 385 return; 386 387 /* first pass, calculate the number of LMBs */ 388 p = prop; 389 for (i = 0; i < lmb_sets; i++) { 390 read_drconf_v2_cell(&dr_cell, &p); 391 drmem_info->n_lmbs += dr_cell.seq_lmbs; 392 } 393 394 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), 395 GFP_KERNEL); 396 if (!drmem_info->lmbs) 397 return; 398 399 /* second pass, read in the LMB information */ 400 lmb_index = 0; 401 p = prop; 402 403 for (i = 0; i < lmb_sets; i++) { 404 read_drconf_v2_cell(&dr_cell, &p); 405 406 for (j = 0; j < dr_cell.seq_lmbs; j++) { 407 lmb = &drmem_info->lmbs[lmb_index++]; 408 409 lmb->base_addr = dr_cell.base_addr; 410 dr_cell.base_addr += drmem_info->lmb_size; 411 412 lmb->drc_index = dr_cell.drc_index; 413 dr_cell.drc_index++; 414 415 lmb->aa_index = dr_cell.aa_index; 416 lmb->flags = dr_cell.flags; 417 418 lmb_set_nid(lmb); 419 } 420 } 421 } 422 423 static int __init drmem_init(void) 424 { 425 struct device_node *dn; 426 const __be32 *prop; 427 428 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 429 if (!dn) { 430 pr_info("No dynamic reconfiguration memory found\n"); 431 return 0; 432 } 433 434 if (init_drmem_lmb_size(dn)) { 435 of_node_put(dn); 436 return 0; 437 } 438 439 prop = of_get_property(dn, "ibm,dynamic-memory", NULL); 440 if (prop) { 441 init_drmem_v1_lmbs(prop); 442 } else { 443 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); 444 if (prop) 445 init_drmem_v2_lmbs(prop); 446 } 447 448 of_node_put(dn); 449 return 0; 450 } 451 late_initcall(drmem_init); 452