1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/alpha/kernel/core_wildfire.c 4 * 5 * Wildfire support. 6 * 7 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 8 */ 9 10 #define __EXTERN_INLINE inline 11 #include <asm/io.h> 12 #include <asm/core_wildfire.h> 13 #undef __EXTERN_INLINE 14 15 #include <linux/types.h> 16 #include <linux/pci.h> 17 #include <linux/sched.h> 18 #include <linux/init.h> 19 20 #include <asm/ptrace.h> 21 #include <asm/smp.h> 22 23 #include "proto.h" 24 #include "pci_impl.h" 25 26 #define DEBUG_CONFIG 0 27 #define DEBUG_DUMP_REGS 0 28 #define DEBUG_DUMP_CONFIG 1 29 30 #if DEBUG_CONFIG 31 # define DBG_CFG(args) printk args 32 #else 33 # define DBG_CFG(args) 34 #endif 35 36 #if DEBUG_DUMP_REGS 37 static void wildfire_dump_pci_regs(int qbbno, int hoseno); 38 static void wildfire_dump_pca_regs(int qbbno, int pcano); 39 static void wildfire_dump_qsa_regs(int qbbno); 40 static void wildfire_dump_qsd_regs(int qbbno); 41 static void wildfire_dump_iop_regs(int qbbno); 42 static void wildfire_dump_gp_regs(int qbbno); 43 #endif 44 #if DEBUG_DUMP_CONFIG 45 static void wildfire_dump_hardware_config(void); 46 #endif 47 48 unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; 49 unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; 50 #define QBB_MAP_EMPTY 0xff 51 52 unsigned long wildfire_hard_qbb_mask; 53 unsigned long wildfire_soft_qbb_mask; 54 unsigned long wildfire_gp_mask; 55 unsigned long wildfire_hs_mask; 56 unsigned long wildfire_iop_mask; 57 unsigned long wildfire_ior_mask; 58 unsigned long wildfire_pca_mask; 59 unsigned long wildfire_cpu_mask; 60 unsigned long wildfire_mem_mask; 61 62 void __init 63 wildfire_init_hose(int qbbno, int hoseno) 64 { 65 struct pci_controller *hose; 66 wildfire_pci *pci; 67 68 hose = alloc_pci_controller(); 69 hose->io_space = alloc_resource(); 70 hose->mem_space = alloc_resource(); 71 72 /* This is for userland consumption. */ 73 hose->sparse_mem_base = 0; 74 hose->sparse_io_base = 0; 75 hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno); 76 hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno); 77 78 hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno); 79 hose->index = (qbbno << 3) + hoseno; 80 81 hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS; 82 hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1; 83 hose->io_space->name = pci_io_names[hoseno]; 84 hose->io_space->flags = IORESOURCE_IO; 85 86 hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS; 87 hose->mem_space->end = hose->mem_space->start + 0xffffffff; 88 hose->mem_space->name = pci_mem_names[hoseno]; 89 hose->mem_space->flags = IORESOURCE_MEM; 90 91 if (request_resource(&ioport_resource, hose->io_space) < 0) 92 printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n", 93 qbbno, hoseno); 94 if (request_resource(&iomem_resource, hose->mem_space) < 0) 95 printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n", 96 qbbno, hoseno); 97 98 #if DEBUG_DUMP_REGS 99 wildfire_dump_pci_regs(qbbno, hoseno); 100 #endif 101 102 /* 103 * Set up the PCI to main memory translation windows. 104 * 105 * Note: Window 3 is scatter-gather only 106 * 107 * Window 0 is scatter-gather 8MB at 8MB (for isa) 108 * Window 1 is direct access 1GB at 1GB 109 * Window 2 is direct access 1GB at 2GB 110 * Window 3 is scatter-gather 128MB at 3GB 111 * ??? We ought to scale window 3 memory. 112 * 113 */ 114 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 115 SMP_CACHE_BYTES); 116 hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 117 SMP_CACHE_BYTES); 118 119 pci = WILDFIRE_pci(qbbno, hoseno); 120 121 pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3; 122 pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000; 123 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); 124 125 pci->pci_window[1].wbase.csr = 0x40000000 | 1; 126 pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000; 127 pci->pci_window[1].tbase.csr = 0; 128 129 pci->pci_window[2].wbase.csr = 0x80000000 | 1; 130 pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000; 131 pci->pci_window[2].tbase.csr = 0x40000000; 132 133 pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3; 134 pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000; 135 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); 136 137 wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */ 138 } 139 140 void __init 141 wildfire_init_pca(int qbbno, int pcano) 142 { 143 144 /* Test for PCA existence first. */ 145 if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) 146 return; 147 148 #if DEBUG_DUMP_REGS 149 wildfire_dump_pca_regs(qbbno, pcano); 150 #endif 151 152 /* Do both hoses of the PCA. */ 153 wildfire_init_hose(qbbno, (pcano << 1) + 0); 154 wildfire_init_hose(qbbno, (pcano << 1) + 1); 155 } 156 157 void __init 158 wildfire_init_qbb(int qbbno) 159 { 160 int pcano; 161 162 /* Test for QBB existence first. */ 163 if (!WILDFIRE_QBB_EXISTS(qbbno)) 164 return; 165 166 #if DEBUG_DUMP_REGS 167 wildfire_dump_qsa_regs(qbbno); 168 wildfire_dump_qsd_regs(qbbno); 169 wildfire_dump_iop_regs(qbbno); 170 wildfire_dump_gp_regs(qbbno); 171 #endif 172 173 /* Init all PCAs here. */ 174 for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { 175 wildfire_init_pca(qbbno, pcano); 176 } 177 } 178 179 void __init 180 wildfire_hardware_probe(void) 181 { 182 unsigned long temp; 183 unsigned int hard_qbb, soft_qbb; 184 wildfire_fast_qsd *fast = WILDFIRE_fast_qsd(); 185 wildfire_qsd *qsd; 186 wildfire_qsa *qsa; 187 wildfire_iop *iop; 188 wildfire_gp *gp; 189 wildfire_ne *ne; 190 wildfire_fe *fe; 191 int i; 192 193 temp = fast->qsd_whami.csr; 194 #if 0 195 printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp); 196 #endif 197 198 hard_qbb = (temp >> 8) & 7; 199 soft_qbb = (temp >> 4) & 7; 200 201 /* Init the HW configuration variables. */ 202 wildfire_hard_qbb_mask = (1 << hard_qbb); 203 wildfire_soft_qbb_mask = (1 << soft_qbb); 204 205 wildfire_gp_mask = 0; 206 wildfire_hs_mask = 0; 207 wildfire_iop_mask = 0; 208 wildfire_ior_mask = 0; 209 wildfire_pca_mask = 0; 210 211 wildfire_cpu_mask = 0; 212 wildfire_mem_mask = 0; 213 214 memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); 215 memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); 216 217 /* First, determine which QBBs are present. */ 218 qsa = WILDFIRE_qsa(soft_qbb); 219 220 temp = qsa->qsa_qbb_id.csr; 221 #if 0 222 printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp); 223 #endif 224 225 if (temp & 0x40) /* Is there an HS? */ 226 wildfire_hs_mask = 1; 227 228 if (temp & 0x20) { /* Is there a GP? */ 229 gp = WILDFIRE_gp(soft_qbb); 230 temp = 0; 231 for (i = 0; i < 4; i++) { 232 temp |= gp->gpa_qbb_map[i].csr << (i * 8); 233 #if 0 234 printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n", 235 i, gp, temp); 236 #endif 237 } 238 239 for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) { 240 if (temp & 8) { /* Is there a QBB? */ 241 soft_qbb = temp & 7; 242 wildfire_hard_qbb_mask |= (1 << hard_qbb); 243 wildfire_soft_qbb_mask |= (1 << soft_qbb); 244 } 245 temp >>= 4; 246 } 247 wildfire_gp_mask = wildfire_soft_qbb_mask; 248 } 249 250 /* Next determine each QBBs resources. */ 251 for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) { 252 if (WILDFIRE_QBB_EXISTS(soft_qbb)) { 253 qsd = WILDFIRE_qsd(soft_qbb); 254 temp = qsd->qsd_whami.csr; 255 #if 0 256 printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp); 257 #endif 258 hard_qbb = (temp >> 8) & 7; 259 wildfire_hard_qbb_map[hard_qbb] = soft_qbb; 260 wildfire_soft_qbb_map[soft_qbb] = hard_qbb; 261 262 qsa = WILDFIRE_qsa(soft_qbb); 263 temp = qsa->qsa_qbb_pop[0].csr; 264 #if 0 265 printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp); 266 #endif 267 wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2); 268 wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); 269 270 temp = qsa->qsa_qbb_pop[1].csr; 271 #if 0 272 printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp); 273 #endif 274 wildfire_iop_mask |= (1 << soft_qbb); 275 wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); 276 277 temp = qsa->qsa_qbb_id.csr; 278 #if 0 279 printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp); 280 #endif 281 if (temp & 0x20) 282 wildfire_gp_mask |= (1 << soft_qbb); 283 284 /* Probe for PCA existence here. */ 285 for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) { 286 iop = WILDFIRE_iop(soft_qbb); 287 ne = WILDFIRE_ne(soft_qbb, i); 288 fe = WILDFIRE_fe(soft_qbb, i); 289 290 if ((iop->iop_hose[i].init.csr & 1) == 1 && 291 ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) && 292 ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL)) 293 { 294 wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i); 295 } 296 } 297 298 } 299 } 300 #if DEBUG_DUMP_CONFIG 301 wildfire_dump_hardware_config(); 302 #endif 303 } 304 305 void __init 306 wildfire_init_arch(void) 307 { 308 int qbbno; 309 310 /* With multiple PCI buses, we play with I/O as physical addrs. */ 311 ioport_resource.end = ~0UL; 312 313 314 /* Probe the hardware for info about configuration. */ 315 wildfire_hardware_probe(); 316 317 /* Now init all the found QBBs. */ 318 for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { 319 wildfire_init_qbb(qbbno); 320 } 321 322 /* Normal direct PCI DMA mapping. */ 323 __direct_map_base = 0x40000000UL; 324 __direct_map_size = 0x80000000UL; 325 } 326 327 void 328 wildfire_machine_check(unsigned long vector, unsigned long la_ptr) 329 { 330 mb(); 331 mb(); /* magic */ 332 draina(); 333 /* FIXME: clear pci errors */ 334 wrmces(0x7); 335 mb(); 336 337 process_mcheck_info(vector, la_ptr, "WILDFIRE", 338 mcheck_expected(smp_processor_id())); 339 } 340 341 void 342 wildfire_kill_arch(int mode) 343 { 344 } 345 346 void 347 wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) 348 { 349 int qbbno = hose->index >> 3; 350 int hoseno = hose->index & 7; 351 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); 352 353 mb(); 354 pci->pci_flush_tlb.csr; /* reading does the trick */ 355 } 356 357 static int 358 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, 359 unsigned long *pci_addr, unsigned char *type1) 360 { 361 struct pci_controller *hose = pbus->sysdata; 362 unsigned long addr; 363 u8 bus = pbus->number; 364 365 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " 366 "pci_addr=0x%p, type1=0x%p)\n", 367 bus, device_fn, where, pci_addr, type1)); 368 369 if (!pbus->parent) /* No parent means peer PCI bus. */ 370 bus = 0; 371 *type1 = (bus != 0); 372 373 addr = (bus << 16) | (device_fn << 8) | where; 374 addr |= hose->config_space_base; 375 376 *pci_addr = addr; 377 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); 378 return 0; 379 } 380 381 static int 382 wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where, 383 int size, u32 *value) 384 { 385 unsigned long addr; 386 unsigned char type1; 387 388 if (mk_conf_addr(bus, devfn, where, &addr, &type1)) 389 return PCIBIOS_DEVICE_NOT_FOUND; 390 391 switch (size) { 392 case 1: 393 *value = __kernel_ldbu(*(vucp)addr); 394 break; 395 case 2: 396 *value = __kernel_ldwu(*(vusp)addr); 397 break; 398 case 4: 399 *value = *(vuip)addr; 400 break; 401 } 402 403 return PCIBIOS_SUCCESSFUL; 404 } 405 406 static int 407 wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where, 408 int size, u32 value) 409 { 410 unsigned long addr; 411 unsigned char type1; 412 413 if (mk_conf_addr(bus, devfn, where, &addr, &type1)) 414 return PCIBIOS_DEVICE_NOT_FOUND; 415 416 switch (size) { 417 case 1: 418 __kernel_stb(value, *(vucp)addr); 419 mb(); 420 __kernel_ldbu(*(vucp)addr); 421 break; 422 case 2: 423 __kernel_stw(value, *(vusp)addr); 424 mb(); 425 __kernel_ldwu(*(vusp)addr); 426 break; 427 case 4: 428 *(vuip)addr = value; 429 mb(); 430 *(vuip)addr; 431 break; 432 } 433 434 return PCIBIOS_SUCCESSFUL; 435 } 436 437 struct pci_ops wildfire_pci_ops = 438 { 439 .read = wildfire_read_config, 440 .write = wildfire_write_config, 441 }; 442 443 #if DEBUG_DUMP_REGS 444 445 static void __init 446 wildfire_dump_pci_regs(int qbbno, int hoseno) 447 { 448 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); 449 int i; 450 451 printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n", 452 qbbno, hoseno, pci); 453 454 printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n", 455 pci->pci_io_addr_ext.csr); 456 printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr); 457 printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr); 458 printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr); 459 printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr); 460 printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr); 461 printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr); 462 463 printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n", 464 qbbno, hoseno, pci); 465 for (i = 0; i < 4; i++) { 466 printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i, 467 pci->pci_window[i].wbase.csr, 468 pci->pci_window[i].wmask.csr, 469 pci->pci_window[i].tbase.csr); 470 } 471 printk(KERN_ERR "\n"); 472 } 473 474 static void __init 475 wildfire_dump_pca_regs(int qbbno, int pcano) 476 { 477 wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano); 478 int i; 479 480 printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n", 481 qbbno, pcano, pca); 482 483 printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr); 484 printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr); 485 printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr); 486 printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr); 487 printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n", 488 pca->pca_stdio_edge_level.csr); 489 490 printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n", 491 qbbno, pcano, pca); 492 for (i = 0; i < 4; i++) { 493 printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i, 494 pca->pca_int[i].target.csr, 495 pca->pca_int[i].enable.csr); 496 } 497 498 printk(KERN_ERR "\n"); 499 } 500 501 static void __init 502 wildfire_dump_qsa_regs(int qbbno) 503 { 504 wildfire_qsa *qsa = WILDFIRE_qsa(qbbno); 505 int i; 506 507 printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa); 508 509 printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr); 510 printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr); 511 printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr); 512 513 for (i = 0; i < 5; i++) 514 printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n", 515 i, qsa->qsa_config[i].csr); 516 517 for (i = 0; i < 2; i++) 518 printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n", 519 i, qsa->qsa_qbb_pop[0].csr); 520 521 printk(KERN_ERR "\n"); 522 } 523 524 static void __init 525 wildfire_dump_qsd_regs(int qbbno) 526 { 527 wildfire_qsd *qsd = WILDFIRE_qsd(qbbno); 528 529 printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd); 530 531 printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr); 532 printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr); 533 printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n", 534 qsd->qsd_port_present.csr); 535 printk(KERN_ERR " QSD_PORT_ACTIVE: 0x%16lx\n", 536 qsd->qsd_port_active.csr); 537 printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n", 538 qsd->qsd_fault_ena.csr); 539 printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n", 540 qsd->qsd_cpu_int_ena.csr); 541 printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n", 542 qsd->qsd_mem_config.csr); 543 printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n", 544 qsd->qsd_err_sum.csr); 545 546 printk(KERN_ERR "\n"); 547 } 548 549 static void __init 550 wildfire_dump_iop_regs(int qbbno) 551 { 552 wildfire_iop *iop = WILDFIRE_iop(qbbno); 553 int i; 554 555 printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop); 556 557 printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr); 558 printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr); 559 printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n", 560 iop->iop_switch_credits.csr); 561 printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n", 562 iop->iop_hose_credits.csr); 563 564 for (i = 0; i < 4; i++) 565 printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n", 566 i, iop->iop_hose[i].init.csr); 567 for (i = 0; i < 4; i++) 568 printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n", 569 i, iop->iop_dev_int[i].target.csr); 570 571 printk(KERN_ERR "\n"); 572 } 573 574 static void __init 575 wildfire_dump_gp_regs(int qbbno) 576 { 577 wildfire_gp *gp = WILDFIRE_gp(qbbno); 578 int i; 579 580 printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp); 581 for (i = 0; i < 4; i++) 582 printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n", 583 i, gp->gpa_qbb_map[i].csr); 584 585 printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n", 586 gp->gpa_mem_pop_map.csr); 587 printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr); 588 printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr); 589 printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr); 590 printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr); 591 printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr); 592 593 printk(KERN_ERR "\n"); 594 } 595 #endif /* DUMP_REGS */ 596 597 #if DEBUG_DUMP_CONFIG 598 static void __init 599 wildfire_dump_hardware_config(void) 600 { 601 int i; 602 603 printk(KERN_ERR "Probed Hardware Configuration\n"); 604 605 printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask); 606 printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask); 607 608 printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask); 609 printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask); 610 printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask); 611 printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask); 612 printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask); 613 614 printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask); 615 printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask); 616 617 printk(" hard_qbb_map: "); 618 for (i = 0; i < WILDFIRE_MAX_QBB; i++) 619 if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY) 620 printk("--- "); 621 else 622 printk("%3d ", wildfire_hard_qbb_map[i]); 623 printk("\n"); 624 625 printk(" soft_qbb_map: "); 626 for (i = 0; i < WILDFIRE_MAX_QBB; i++) 627 if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY) 628 printk("--- "); 629 else 630 printk("%3d ", wildfire_soft_qbb_map[i]); 631 printk("\n"); 632 } 633 #endif /* DUMP_CONFIG */ 634