1 /* 2 * inventory.c 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries) 10 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard 11 * 12 * These are the routines to discover what hardware exists in this box. 13 * This task is complicated by there being 3 different ways of 14 * performing an inventory, depending largely on the age of the box. 15 * The recommended way to do this is to check to see whether the machine 16 * is a `Snake' first, then try System Map, then try PAT. We try System 17 * Map before checking for a Snake -- this probably doesn't cause any 18 * problems, but... 19 */ 20 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <asm/hardware.h> 27 #include <asm/io.h> 28 #include <asm/mmzone.h> 29 #include <asm/pdc.h> 30 #include <asm/pdcpat.h> 31 #include <asm/processor.h> 32 #include <asm/page.h> 33 #include <asm/parisc-device.h> 34 35 /* 36 ** Debug options 37 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices. 38 */ 39 #undef DEBUG_PAT 40 41 int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; 42 43 /* cell number and location (PAT firmware only) */ 44 unsigned long parisc_cell_num __read_mostly; 45 unsigned long parisc_cell_loc __read_mostly; 46 47 48 void __init setup_pdc(void) 49 { 50 long status; 51 unsigned int bus_id; 52 struct pdc_system_map_mod_info module_result; 53 struct pdc_module_path module_path; 54 struct pdc_model model; 55 #ifdef CONFIG_64BIT 56 struct pdc_pat_cell_num cell_info; 57 #endif 58 59 /* Determine the pdc "type" used on this machine */ 60 61 printk(KERN_INFO "Determining PDC firmware type: "); 62 63 status = pdc_system_map_find_mods(&module_result, &module_path, 0); 64 if (status == PDC_OK) { 65 pdc_type = PDC_TYPE_SYSTEM_MAP; 66 pr_cont("System Map.\n"); 67 return; 68 } 69 70 /* 71 * If the machine doesn't support PDC_SYSTEM_MAP then either it 72 * is a pdc pat box, or it is an older box. All 64 bit capable 73 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP. 74 */ 75 76 /* 77 * TODO: We should test for 64 bit capability and give a 78 * clearer message. 79 */ 80 81 #ifdef CONFIG_64BIT 82 status = pdc_pat_cell_get_number(&cell_info); 83 if (status == PDC_OK) { 84 pdc_type = PDC_TYPE_PAT; 85 pr_cont("64 bit PAT.\n"); 86 parisc_cell_num = cell_info.cell_num; 87 parisc_cell_loc = cell_info.cell_loc; 88 pr_info("PAT: Running on cell %lu and location %lu.\n", 89 parisc_cell_num, parisc_cell_loc); 90 return; 91 } 92 #endif 93 94 /* Check the CPU's bus ID. There's probably a better test. */ 95 96 status = pdc_model_info(&model); 97 98 bus_id = (model.hversion >> (4 + 7)) & 0x1f; 99 100 switch (bus_id) { 101 case 0x4: /* 720, 730, 750, 735, 755 */ 102 case 0x6: /* 705, 710 */ 103 case 0x7: /* 715, 725 */ 104 case 0x8: /* 745, 747, 742 */ 105 case 0xA: /* 712 and similar */ 106 case 0xC: /* 715/64, at least */ 107 108 pdc_type = PDC_TYPE_SNAKE; 109 pr_cont("Snake.\n"); 110 return; 111 112 default: /* Everything else */ 113 114 pr_cont("Unsupported.\n"); 115 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); 116 } 117 } 118 119 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */ 120 121 static void __init 122 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start, 123 unsigned long pages4k) 124 { 125 /* Rather than aligning and potentially throwing away 126 * memory, we'll assume that any ranges are already 127 * nicely aligned with any reasonable page size, and 128 * panic if they are not (it's more likely that the 129 * pdc info is bad in this case). 130 */ 131 132 if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) 133 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) { 134 135 panic("Memory range doesn't align with page size!\n"); 136 } 137 138 pmem_ptr->start_pfn = (start >> PAGE_SHIFT); 139 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT); 140 } 141 142 static void __init pagezero_memconfig(void) 143 { 144 unsigned long npages; 145 146 /* Use the 32 bit information from page zero to create a single 147 * entry in the pmem_ranges[] table. 148 * 149 * We currently don't support machines with contiguous memory 150 * >= 4 Gb, who report that memory using 64 bit only fields 151 * on page zero. It's not worth doing until it can be tested, 152 * and it is not clear we can support those machines for other 153 * reasons. 154 * 155 * If that support is done in the future, this is where it 156 * should be done. 157 */ 158 159 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); 160 set_pmem_entry(pmem_ranges,0UL,npages); 161 npmem_ranges = 1; 162 } 163 164 #ifdef CONFIG_64BIT 165 166 /* All of the PDC PAT specific code is 64-bit only */ 167 168 /* 169 ** The module object is filled via PDC_PAT_CELL[Return Cell Module]. 170 ** If a module is found, register module will get the IODC bytes via 171 ** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter. 172 ** 173 ** The IO view can be used by PDC_PAT_CELL[Return Cell Module] 174 ** only for SBAs and LBAs. This view will cause an invalid 175 ** argument error for all other cell module types. 176 ** 177 */ 178 179 static int __init 180 pat_query_module(ulong pcell_loc, ulong mod_index) 181 { 182 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; 183 unsigned long bytecnt; 184 unsigned long temp; /* 64-bit scratch value */ 185 long status; /* PDC return value status */ 186 struct parisc_device *dev; 187 188 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); 189 if (!pa_pdc_cell) 190 panic("couldn't allocate memory for PDC_PAT_CELL!"); 191 192 /* return cell module (PA or Processor view) */ 193 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, 194 PA_VIEW, pa_pdc_cell); 195 196 if (status != PDC_OK) { 197 /* no more cell modules or error */ 198 kfree(pa_pdc_cell); 199 return status; 200 } 201 202 temp = pa_pdc_cell->cba; 203 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); 204 if (!dev) { 205 kfree(pa_pdc_cell); 206 return PDC_OK; 207 } 208 209 /* alloc_pa_dev sets dev->hpa */ 210 211 /* 212 ** save parameters in the parisc_device 213 ** (The idea being the device driver will call pdc_pat_cell_module() 214 ** and store the results in its own data structure.) 215 */ 216 dev->pcell_loc = pcell_loc; 217 dev->mod_index = mod_index; 218 219 /* save generic info returned from the call */ 220 /* REVISIT: who is the consumer of this? not sure yet... */ 221 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ 222 dev->pmod_loc = pa_pdc_cell->mod_location; 223 dev->mod0 = pa_pdc_cell->mod[0]; 224 225 register_parisc_device(dev); /* advertise device */ 226 227 #ifdef DEBUG_PAT 228 /* dump what we see so far... */ 229 switch (PAT_GET_ENTITY(dev->mod_info)) { 230 pdc_pat_cell_mod_maddr_block_t io_pdc_cell; 231 unsigned long i; 232 233 case PAT_ENTITY_PROC: 234 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", 235 pa_pdc_cell->mod[0]); 236 break; 237 238 case PAT_ENTITY_MEM: 239 printk(KERN_DEBUG 240 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", 241 pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], 242 pa_pdc_cell->mod[2]); 243 break; 244 case PAT_ENTITY_CA: 245 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); 246 break; 247 248 case PAT_ENTITY_PBC: 249 printk(KERN_DEBUG "PAT_ENTITY_PBC: "); 250 goto print_ranges; 251 252 case PAT_ENTITY_SBA: 253 printk(KERN_DEBUG "PAT_ENTITY_SBA: "); 254 goto print_ranges; 255 256 case PAT_ENTITY_LBA: 257 printk(KERN_DEBUG "PAT_ENTITY_LBA: "); 258 259 print_ranges: 260 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, 261 IO_VIEW, &io_pdc_cell); 262 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]); 263 for (i = 0; i < pa_pdc_cell->mod[1]; i++) { 264 printk(KERN_DEBUG 265 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 266 i, pa_pdc_cell->mod[2 + i * 3], /* type */ 267 pa_pdc_cell->mod[3 + i * 3], /* start */ 268 pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ 269 printk(KERN_DEBUG 270 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 271 i, io_pdc_cell.mod[2 + i * 3], /* type */ 272 io_pdc_cell.mod[3 + i * 3], /* start */ 273 io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ 274 } 275 printk(KERN_DEBUG "\n"); 276 break; 277 } 278 #endif /* DEBUG_PAT */ 279 280 kfree(pa_pdc_cell); 281 282 return PDC_OK; 283 } 284 285 286 /* pat pdc can return information about a variety of different 287 * types of memory (e.g. firmware,i/o, etc) but we only care about 288 * the usable physical ram right now. Since the firmware specific 289 * information is allocated on the stack, we'll be generous, in 290 * case there is a lot of other information we don't care about. 291 */ 292 293 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES) 294 295 static void __init pat_memconfig(void) 296 { 297 unsigned long actual_len; 298 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1]; 299 struct pdc_pat_pd_addr_map_entry *mtbl_ptr; 300 physmem_range_t *pmem_ptr; 301 long status; 302 int entries; 303 unsigned long length; 304 int i; 305 306 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry); 307 308 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L); 309 310 if ((status != PDC_OK) 311 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) { 312 313 /* The above pdc call shouldn't fail, but, just in 314 * case, just use the PAGE0 info. 315 */ 316 317 printk("\n\n\n"); 318 printk(KERN_WARNING "WARNING! Could not get full memory configuration. " 319 "All memory may not be used!\n\n\n"); 320 pagezero_memconfig(); 321 return; 322 } 323 324 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); 325 326 if (entries > PAT_MAX_RANGES) { 327 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); 328 printk(KERN_WARNING "Some memory may not be used!\n"); 329 } 330 331 /* Copy information into the firmware independent pmem_ranges 332 * array, skipping types we don't care about. Notice we said 333 * "may" above. We'll use all the entries that were returned. 334 */ 335 336 npmem_ranges = 0; 337 mtbl_ptr = mem_table; 338 pmem_ptr = pmem_ranges; /* Global firmware independent table */ 339 for (i = 0; i < entries; i++,mtbl_ptr++) { 340 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR) 341 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY) 342 || (mtbl_ptr->pages == 0) 343 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL) 344 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI) 345 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) { 346 347 continue; 348 } 349 350 if (npmem_ranges == MAX_PHYSMEM_RANGES) { 351 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); 352 printk(KERN_WARNING "Some memory will not be used!\n"); 353 break; 354 } 355 356 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); 357 npmem_ranges++; 358 } 359 } 360 361 static int __init pat_inventory(void) 362 { 363 int status; 364 ulong mod_index = 0; 365 struct pdc_pat_cell_num cell_info; 366 367 /* 368 ** Note: Prelude (and it's successors: Lclass, A400/500) only 369 ** implement PDC_PAT_CELL sub-options 0 and 2. 370 */ 371 status = pdc_pat_cell_get_number(&cell_info); 372 if (status != PDC_OK) { 373 return 0; 374 } 375 376 #ifdef DEBUG_PAT 377 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num, 378 cell_info.cell_loc); 379 #endif 380 381 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) { 382 mod_index++; 383 } 384 385 return mod_index; 386 } 387 388 /* We only look for extended memory ranges on a 64 bit capable box */ 389 static void __init sprockets_memconfig(void) 390 { 391 struct pdc_memory_table_raddr r_addr; 392 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES]; 393 struct pdc_memory_table *mtbl_ptr; 394 physmem_range_t *pmem_ptr; 395 long status; 396 int entries; 397 int i; 398 399 status = pdc_mem_mem_table(&r_addr,mem_table, 400 (unsigned long)MAX_PHYSMEM_RANGES); 401 402 if (status != PDC_OK) { 403 404 /* The above pdc call only works on boxes with sprockets 405 * firmware (newer B,C,J class). Other non PAT PDC machines 406 * do support more than 3.75 Gb of memory, but we don't 407 * support them yet. 408 */ 409 410 pagezero_memconfig(); 411 return; 412 } 413 414 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) { 415 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); 416 printk(KERN_WARNING "Some memory will not be used!\n"); 417 } 418 419 entries = (int)r_addr.entries_returned; 420 421 npmem_ranges = 0; 422 mtbl_ptr = mem_table; 423 pmem_ptr = pmem_ranges; /* Global firmware independent table */ 424 for (i = 0; i < entries; i++,mtbl_ptr++) { 425 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); 426 npmem_ranges++; 427 } 428 } 429 430 #else /* !CONFIG_64BIT */ 431 432 #define pat_inventory() do { } while (0) 433 #define pat_memconfig() do { } while (0) 434 #define sprockets_memconfig() pagezero_memconfig() 435 436 #endif /* !CONFIG_64BIT */ 437 438 439 #ifndef CONFIG_PA20 440 441 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */ 442 443 static struct parisc_device * __init 444 legacy_create_device(struct pdc_memory_map *r_addr, 445 struct pdc_module_path *module_path) 446 { 447 struct parisc_device *dev; 448 int status = pdc_mem_map_hpa(r_addr, module_path); 449 if (status != PDC_OK) 450 return NULL; 451 452 dev = alloc_pa_dev(r_addr->hpa, &module_path->path); 453 if (dev == NULL) 454 return NULL; 455 456 register_parisc_device(dev); 457 return dev; 458 } 459 460 /** 461 * snake_inventory 462 * 463 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used. 464 * To use it, we initialise the mod_path.bc to 0xff and try all values of 465 * mod to get the HPA for the top-level devices. Bus adapters may have 466 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the 467 * module, then trying all possible functions. 468 */ 469 static void __init snake_inventory(void) 470 { 471 int mod; 472 for (mod = 0; mod < 16; mod++) { 473 struct parisc_device *dev; 474 struct pdc_module_path module_path; 475 struct pdc_memory_map r_addr; 476 unsigned int func; 477 478 memset(module_path.path.bc, 0xff, 6); 479 module_path.path.mod = mod; 480 dev = legacy_create_device(&r_addr, &module_path); 481 if ((!dev) || (dev->id.hw_type != HPHW_BA)) 482 continue; 483 484 memset(module_path.path.bc, 0xff, 4); 485 module_path.path.bc[4] = mod; 486 487 for (func = 0; func < 16; func++) { 488 module_path.path.bc[5] = 0; 489 module_path.path.mod = func; 490 legacy_create_device(&r_addr, &module_path); 491 } 492 } 493 } 494 495 #else /* CONFIG_PA20 */ 496 #define snake_inventory() do { } while (0) 497 #endif /* CONFIG_PA20 */ 498 499 /* Common 32/64 bit based code goes here */ 500 501 /** 502 * add_system_map_addresses - Add additional addresses to the parisc device. 503 * @dev: The parisc device. 504 * @num_addrs: Then number of addresses to add; 505 * @module_instance: The system_map module instance. 506 * 507 * This function adds any additional addresses reported by the system_map 508 * firmware to the parisc device. 509 */ 510 static void __init 511 add_system_map_addresses(struct parisc_device *dev, int num_addrs, 512 int module_instance) 513 { 514 int i; 515 long status; 516 struct pdc_system_map_addr_info addr_result; 517 518 dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL); 519 if(!dev->addr) { 520 printk(KERN_ERR "%s %s(): memory allocation failure\n", 521 __FILE__, __func__); 522 return; 523 } 524 525 for(i = 1; i <= num_addrs; ++i) { 526 status = pdc_system_map_find_addrs(&addr_result, 527 module_instance, i); 528 if(PDC_OK == status) { 529 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr; 530 dev->num_addrs++; 531 } else { 532 printk(KERN_WARNING 533 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n", 534 status, i); 535 } 536 } 537 } 538 539 /** 540 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP. 541 * 542 * This function attempts to retrieve and register all the devices firmware 543 * knows about via the SYSTEM_MAP PDC call. 544 */ 545 static void __init system_map_inventory(void) 546 { 547 int i; 548 long status = PDC_OK; 549 550 for (i = 0; i < 256; i++) { 551 struct parisc_device *dev; 552 struct pdc_system_map_mod_info module_result; 553 struct pdc_module_path module_path; 554 555 status = pdc_system_map_find_mods(&module_result, 556 &module_path, i); 557 if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD)) 558 break; 559 if (status != PDC_OK) 560 continue; 561 562 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path); 563 if (!dev) 564 continue; 565 566 register_parisc_device(dev); 567 568 /* if available, get the additional addresses for a module */ 569 if (!module_result.add_addrs) 570 continue; 571 572 add_system_map_addresses(dev, module_result.add_addrs, i); 573 } 574 575 walk_central_bus(); 576 return; 577 } 578 579 void __init do_memory_inventory(void) 580 { 581 switch (pdc_type) { 582 583 case PDC_TYPE_PAT: 584 pat_memconfig(); 585 break; 586 587 case PDC_TYPE_SYSTEM_MAP: 588 sprockets_memconfig(); 589 break; 590 591 case PDC_TYPE_SNAKE: 592 pagezero_memconfig(); 593 return; 594 595 default: 596 panic("Unknown PDC type!\n"); 597 } 598 599 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) { 600 printk(KERN_WARNING "Bad memory configuration returned!\n"); 601 printk(KERN_WARNING "Some memory may not be used!\n"); 602 pagezero_memconfig(); 603 } 604 } 605 606 void __init do_device_inventory(void) 607 { 608 printk(KERN_INFO "Searching for devices...\n"); 609 610 init_parisc_bus(); 611 612 switch (pdc_type) { 613 614 case PDC_TYPE_PAT: 615 pat_inventory(); 616 break; 617 618 case PDC_TYPE_SYSTEM_MAP: 619 system_map_inventory(); 620 break; 621 622 case PDC_TYPE_SNAKE: 623 snake_inventory(); 624 break; 625 626 default: 627 panic("Unknown PDC type!\n"); 628 } 629 printk(KERN_INFO "Found devices:\n"); 630 print_parisc_devices(); 631 } 632