1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/list.h> 8 #include <linux/ssb/ssb_regs.h> 9 #include <linux/bcma/bcma.h> 10 #include <linux/bcma/bcma_regs.h> 11 12 #include <defs.h> 13 #include <soc.h> 14 #include <brcm_hw_ids.h> 15 #include <brcmu_utils.h> 16 #include <chipcommon.h> 17 #include "debug.h" 18 #include "chip.h" 19 20 /* SOC Interconnect types (aka chip types) */ 21 #define SOCI_SB 0 22 #define SOCI_AI 1 23 24 /* PL-368 DMP definitions */ 25 #define DMP_DESC_TYPE_MSK 0x0000000F 26 #define DMP_DESC_EMPTY 0x00000000 27 #define DMP_DESC_VALID 0x00000001 28 #define DMP_DESC_COMPONENT 0x00000001 29 #define DMP_DESC_MASTER_PORT 0x00000003 30 #define DMP_DESC_ADDRESS 0x00000005 31 #define DMP_DESC_ADDRSIZE_GT32 0x00000008 32 #define DMP_DESC_EOT 0x0000000F 33 34 #define DMP_COMP_DESIGNER 0xFFF00000 35 #define DMP_COMP_DESIGNER_S 20 36 #define DMP_COMP_PARTNUM 0x000FFF00 37 #define DMP_COMP_PARTNUM_S 8 38 #define DMP_COMP_CLASS 0x000000F0 39 #define DMP_COMP_CLASS_S 4 40 #define DMP_COMP_REVISION 0xFF000000 41 #define DMP_COMP_REVISION_S 24 42 #define DMP_COMP_NUM_SWRAP 0x00F80000 43 #define DMP_COMP_NUM_SWRAP_S 19 44 #define DMP_COMP_NUM_MWRAP 0x0007C000 45 #define DMP_COMP_NUM_MWRAP_S 14 46 #define DMP_COMP_NUM_SPORT 0x00003E00 47 #define DMP_COMP_NUM_SPORT_S 9 48 #define DMP_COMP_NUM_MPORT 0x000001F0 49 #define DMP_COMP_NUM_MPORT_S 4 50 51 #define DMP_MASTER_PORT_UID 0x0000FF00 52 #define DMP_MASTER_PORT_UID_S 8 53 #define DMP_MASTER_PORT_NUM 0x000000F0 54 #define DMP_MASTER_PORT_NUM_S 4 55 56 #define DMP_SLAVE_ADDR_BASE 0xFFFFF000 57 #define DMP_SLAVE_ADDR_BASE_S 12 58 #define DMP_SLAVE_PORT_NUM 0x00000F00 59 #define DMP_SLAVE_PORT_NUM_S 8 60 #define DMP_SLAVE_TYPE 0x000000C0 61 #define DMP_SLAVE_TYPE_S 6 62 #define DMP_SLAVE_TYPE_SLAVE 0 63 #define DMP_SLAVE_TYPE_BRIDGE 1 64 #define DMP_SLAVE_TYPE_SWRAP 2 65 #define DMP_SLAVE_TYPE_MWRAP 3 66 #define DMP_SLAVE_SIZE_TYPE 0x00000030 67 #define DMP_SLAVE_SIZE_TYPE_S 4 68 #define DMP_SLAVE_SIZE_4K 0 69 #define DMP_SLAVE_SIZE_8K 1 70 #define DMP_SLAVE_SIZE_16K 2 71 #define DMP_SLAVE_SIZE_DESC 3 72 73 /* EROM CompIdentB */ 74 #define CIB_REV_MASK 0xff000000 75 #define CIB_REV_SHIFT 24 76 77 /* ARM CR4 core specific control flag bits */ 78 #define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020 79 80 /* D11 core specific control flag bits */ 81 #define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004 82 #define D11_BCMA_IOCTL_PHYRESET 0x0008 83 84 /* chip core base & ramsize */ 85 /* bcm4329 */ 86 /* SDIO device core, ID 0x829 */ 87 #define BCM4329_CORE_BUS_BASE 0x18011000 88 /* internal memory core, ID 0x80e */ 89 #define BCM4329_CORE_SOCRAM_BASE 0x18003000 90 /* ARM Cortex M3 core, ID 0x82a */ 91 #define BCM4329_CORE_ARM_BASE 0x18002000 92 93 /* Max possibly supported memory size (limited by IO mapped memory) */ 94 #define BRCMF_CHIP_MAX_MEMSIZE (4 * 1024 * 1024) 95 96 #define CORE_SB(base, field) \ 97 (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) 98 #define SBCOREREV(sbidh) \ 99 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \ 100 ((sbidh) & SSB_IDHIGH_RCLO)) 101 102 struct sbconfig { 103 u32 PAD[2]; 104 u32 sbipsflag; /* initiator port ocp slave flag */ 105 u32 PAD[3]; 106 u32 sbtpsflag; /* target port ocp slave flag */ 107 u32 PAD[11]; 108 u32 sbtmerrloga; /* (sonics >= 2.3) */ 109 u32 PAD; 110 u32 sbtmerrlog; /* (sonics >= 2.3) */ 111 u32 PAD[3]; 112 u32 sbadmatch3; /* address match3 */ 113 u32 PAD; 114 u32 sbadmatch2; /* address match2 */ 115 u32 PAD; 116 u32 sbadmatch1; /* address match1 */ 117 u32 PAD[7]; 118 u32 sbimstate; /* initiator agent state */ 119 u32 sbintvec; /* interrupt mask */ 120 u32 sbtmstatelow; /* target state */ 121 u32 sbtmstatehigh; /* target state */ 122 u32 sbbwa0; /* bandwidth allocation table0 */ 123 u32 PAD; 124 u32 sbimconfiglow; /* initiator configuration */ 125 u32 sbimconfighigh; /* initiator configuration */ 126 u32 sbadmatch0; /* address match0 */ 127 u32 PAD; 128 u32 sbtmconfiglow; /* target configuration */ 129 u32 sbtmconfighigh; /* target configuration */ 130 u32 sbbconfig; /* broadcast configuration */ 131 u32 PAD; 132 u32 sbbstate; /* broadcast state */ 133 u32 PAD[3]; 134 u32 sbactcnfg; /* activate configuration */ 135 u32 PAD[3]; 136 u32 sbflagst; /* current sbflags */ 137 u32 PAD[3]; 138 u32 sbidlow; /* identification */ 139 u32 sbidhigh; /* identification */ 140 }; 141 142 /* bankidx and bankinfo reg defines corerev >= 8 */ 143 #define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 144 #define SOCRAM_BANKINFO_SZMASK 0x0000007f 145 #define SOCRAM_BANKIDX_ROM_MASK 0x00000100 146 147 #define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 148 /* socram bankinfo memtype */ 149 #define SOCRAM_MEMTYPE_RAM 0 150 #define SOCRAM_MEMTYPE_R0M 1 151 #define SOCRAM_MEMTYPE_DEVRAM 2 152 153 #define SOCRAM_BANKINFO_SZBASE 8192 154 #define SRCI_LSS_MASK 0x00f00000 155 #define SRCI_LSS_SHIFT 20 156 #define SRCI_SRNB_MASK 0xf0 157 #define SRCI_SRNB_MASK_EXT 0x100 158 #define SRCI_SRNB_SHIFT 4 159 #define SRCI_SRBSZ_MASK 0xf 160 #define SRCI_SRBSZ_SHIFT 0 161 #define SR_BSZ_BASE 14 162 163 struct sbsocramregs { 164 u32 coreinfo; 165 u32 bwalloc; 166 u32 extracoreinfo; 167 u32 biststat; 168 u32 bankidx; 169 u32 standbyctrl; 170 171 u32 errlogstatus; /* rev 6 */ 172 u32 errlogaddr; /* rev 6 */ 173 /* used for patching rev 3 & 5 */ 174 u32 cambankidx; 175 u32 cambankstandbyctrl; 176 u32 cambankpatchctrl; 177 u32 cambankpatchtblbaseaddr; 178 u32 cambankcmdreg; 179 u32 cambankdatareg; 180 u32 cambankmaskreg; 181 u32 PAD[1]; 182 u32 bankinfo; /* corev 8 */ 183 u32 bankpda; 184 u32 PAD[14]; 185 u32 extmemconfig; 186 u32 extmemparitycsr; 187 u32 extmemparityerrdata; 188 u32 extmemparityerrcnt; 189 u32 extmemwrctrlandsize; 190 u32 PAD[84]; 191 u32 workaround; 192 u32 pwrctl; /* corerev >= 2 */ 193 u32 PAD[133]; 194 u32 sr_control; /* corerev >= 15 */ 195 u32 sr_status; /* corerev >= 15 */ 196 u32 sr_address; /* corerev >= 15 */ 197 u32 sr_data; /* corerev >= 15 */ 198 }; 199 200 #define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 201 #define SYSMEMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 202 203 #define ARMCR4_CAP (0x04) 204 #define ARMCR4_BANKIDX (0x40) 205 #define ARMCR4_BANKINFO (0x44) 206 #define ARMCR4_BANKPDA (0x4C) 207 208 #define ARMCR4_TCBBNB_MASK 0xf0 209 #define ARMCR4_TCBBNB_SHIFT 4 210 #define ARMCR4_TCBANB_MASK 0xf 211 #define ARMCR4_TCBANB_SHIFT 0 212 213 #define ARMCR4_BSZ_MASK 0x3f 214 #define ARMCR4_BSZ_MULT 8192 215 216 struct brcmf_core_priv { 217 struct brcmf_core pub; 218 u32 wrapbase; 219 struct list_head list; 220 struct brcmf_chip_priv *chip; 221 }; 222 223 struct brcmf_chip_priv { 224 struct brcmf_chip pub; 225 const struct brcmf_buscore_ops *ops; 226 void *ctx; 227 /* assured first core is chipcommon, second core is buscore */ 228 struct list_head cores; 229 u16 num_cores; 230 231 bool (*iscoreup)(struct brcmf_core_priv *core); 232 void (*coredisable)(struct brcmf_core_priv *core, u32 prereset, 233 u32 reset); 234 void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset, 235 u32 postreset); 236 }; 237 238 static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci, 239 struct brcmf_core *core) 240 { 241 u32 regdata; 242 243 regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh)); 244 core->rev = SBCOREREV(regdata); 245 } 246 247 static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core) 248 { 249 struct brcmf_chip_priv *ci; 250 u32 regdata; 251 u32 address; 252 253 ci = core->chip; 254 address = CORE_SB(core->pub.base, sbtmstatelow); 255 regdata = ci->ops->read32(ci->ctx, address); 256 regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT | 257 SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK); 258 return SSB_TMSLOW_CLOCK == regdata; 259 } 260 261 static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core) 262 { 263 struct brcmf_chip_priv *ci; 264 u32 regdata; 265 bool ret; 266 267 ci = core->chip; 268 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 269 ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK; 270 271 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 272 ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0); 273 274 return ret; 275 } 276 277 static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core, 278 u32 prereset, u32 reset) 279 { 280 struct brcmf_chip_priv *ci; 281 u32 val, base; 282 283 ci = core->chip; 284 base = core->pub.base; 285 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 286 if (val & SSB_TMSLOW_RESET) 287 return; 288 289 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 290 if ((val & SSB_TMSLOW_CLOCK) != 0) { 291 /* 292 * set target reject and spin until busy is clear 293 * (preserve core-specific bits) 294 */ 295 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 296 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 297 val | SSB_TMSLOW_REJECT); 298 299 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 300 udelay(1); 301 SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)) 302 & SSB_TMSHIGH_BUSY), 100000); 303 304 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)); 305 if (val & SSB_TMSHIGH_BUSY) 306 brcmf_err("core state still busy\n"); 307 308 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow)); 309 if (val & SSB_IDLOW_INITIATOR) { 310 val = ci->ops->read32(ci->ctx, 311 CORE_SB(base, sbimstate)); 312 val |= SSB_IMSTATE_REJECT; 313 ci->ops->write32(ci->ctx, 314 CORE_SB(base, sbimstate), val); 315 val = ci->ops->read32(ci->ctx, 316 CORE_SB(base, sbimstate)); 317 udelay(1); 318 SPINWAIT((ci->ops->read32(ci->ctx, 319 CORE_SB(base, sbimstate)) & 320 SSB_IMSTATE_BUSY), 100000); 321 } 322 323 /* set reset and reject while enabling the clocks */ 324 val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 325 SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET; 326 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val); 327 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 328 udelay(10); 329 330 /* clear the initiator reject bit */ 331 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow)); 332 if (val & SSB_IDLOW_INITIATOR) { 333 val = ci->ops->read32(ci->ctx, 334 CORE_SB(base, sbimstate)); 335 val &= ~SSB_IMSTATE_REJECT; 336 ci->ops->write32(ci->ctx, 337 CORE_SB(base, sbimstate), val); 338 } 339 } 340 341 /* leave reset and reject asserted */ 342 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 343 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); 344 udelay(1); 345 } 346 347 static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core, 348 u32 prereset, u32 reset) 349 { 350 struct brcmf_chip_priv *ci; 351 u32 regdata; 352 353 ci = core->chip; 354 355 /* if core is already in reset, skip reset */ 356 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 357 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 358 goto in_reset_configure; 359 360 /* configure reset */ 361 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 362 prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 363 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 364 365 /* put in reset */ 366 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 367 BCMA_RESET_CTL_RESET); 368 usleep_range(10, 20); 369 370 /* wait till reset is 1 */ 371 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 372 BCMA_RESET_CTL_RESET, 300); 373 374 in_reset_configure: 375 /* in-reset configure */ 376 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 377 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 378 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 379 } 380 381 static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset, 382 u32 reset, u32 postreset) 383 { 384 struct brcmf_chip_priv *ci; 385 u32 regdata; 386 u32 base; 387 388 ci = core->chip; 389 base = core->pub.base; 390 /* 391 * Must do the disable sequence first to work for 392 * arbitrary current core state. 393 */ 394 brcmf_chip_sb_coredisable(core, 0, 0); 395 396 /* 397 * Now do the initialization sequence. 398 * set reset while enabling the clock and 399 * forcing them on throughout the core 400 */ 401 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 402 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 403 SSB_TMSLOW_RESET); 404 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 405 udelay(1); 406 407 /* clear any serror */ 408 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)); 409 if (regdata & SSB_TMSHIGH_SERR) 410 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0); 411 412 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate)); 413 if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) { 414 regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO); 415 ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata); 416 } 417 418 /* clear reset and allow it to propagate throughout the core */ 419 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 420 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK); 421 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 422 udelay(1); 423 424 /* leave clock enabled */ 425 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 426 SSB_TMSLOW_CLOCK); 427 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 428 udelay(1); 429 } 430 431 static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset, 432 u32 reset, u32 postreset) 433 { 434 struct brcmf_chip_priv *ci; 435 int count; 436 struct brcmf_core *d11core2 = NULL; 437 struct brcmf_core_priv *d11priv2 = NULL; 438 439 ci = core->chip; 440 441 /* special handle two D11 cores reset */ 442 if (core->pub.id == BCMA_CORE_80211) { 443 d11core2 = brcmf_chip_get_d11core(&ci->pub, 1); 444 if (d11core2) { 445 brcmf_dbg(INFO, "found two d11 cores, reset both\n"); 446 d11priv2 = container_of(d11core2, 447 struct brcmf_core_priv, pub); 448 } 449 } 450 451 /* must disable first to work for arbitrary current core state */ 452 brcmf_chip_ai_coredisable(core, prereset, reset); 453 if (d11priv2) 454 brcmf_chip_ai_coredisable(d11priv2, prereset, reset); 455 456 count = 0; 457 while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) & 458 BCMA_RESET_CTL_RESET) { 459 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0); 460 count++; 461 if (count > 50) 462 break; 463 usleep_range(40, 60); 464 } 465 466 if (d11priv2) { 467 count = 0; 468 while (ci->ops->read32(ci->ctx, 469 d11priv2->wrapbase + BCMA_RESET_CTL) & 470 BCMA_RESET_CTL_RESET) { 471 ci->ops->write32(ci->ctx, 472 d11priv2->wrapbase + BCMA_RESET_CTL, 473 0); 474 count++; 475 if (count > 50) 476 break; 477 usleep_range(40, 60); 478 } 479 } 480 481 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 482 postreset | BCMA_IOCTL_CLK); 483 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 484 485 if (d11priv2) { 486 ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL, 487 postreset | BCMA_IOCTL_CLK); 488 ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL); 489 } 490 } 491 492 char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len) 493 { 494 const char *fmt; 495 496 fmt = ((id > 0xa000) || (id < 0x4000)) ? "BCM%d/%u" : "BCM%x/%u"; 497 snprintf(buf, len, fmt, id, rev); 498 return buf; 499 } 500 501 static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci, 502 u16 coreid, u32 base, 503 u32 wrapbase) 504 { 505 struct brcmf_core_priv *core; 506 507 core = kzalloc(sizeof(*core), GFP_KERNEL); 508 if (!core) 509 return ERR_PTR(-ENOMEM); 510 511 core->pub.id = coreid; 512 core->pub.base = base; 513 core->chip = ci; 514 core->wrapbase = wrapbase; 515 516 list_add_tail(&core->list, &ci->cores); 517 return &core->pub; 518 } 519 520 /* safety check for chipinfo */ 521 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 522 { 523 struct brcmf_core_priv *core; 524 bool need_socram = false; 525 bool has_socram = false; 526 bool cpu_found = false; 527 int idx = 1; 528 529 list_for_each_entry(core, &ci->cores, list) { 530 brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n", 531 idx++, core->pub.id, core->pub.rev, core->pub.base, 532 core->wrapbase); 533 534 switch (core->pub.id) { 535 case BCMA_CORE_ARM_CM3: 536 cpu_found = true; 537 need_socram = true; 538 break; 539 case BCMA_CORE_INTERNAL_MEM: 540 has_socram = true; 541 break; 542 case BCMA_CORE_ARM_CR4: 543 cpu_found = true; 544 break; 545 case BCMA_CORE_ARM_CA7: 546 cpu_found = true; 547 break; 548 default: 549 break; 550 } 551 } 552 553 if (!cpu_found) { 554 brcmf_err("CPU core not detected\n"); 555 return -ENXIO; 556 } 557 /* check RAM core presence for ARM CM3 core */ 558 if (need_socram && !has_socram) { 559 brcmf_err("RAM core not provided with ARM CM3 core\n"); 560 return -ENODEV; 561 } 562 return 0; 563 } 564 565 static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg) 566 { 567 return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg); 568 } 569 570 static void brcmf_chip_core_write32(struct brcmf_core_priv *core, 571 u16 reg, u32 val) 572 { 573 core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val); 574 } 575 576 static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx, 577 u32 *banksize) 578 { 579 u32 bankinfo; 580 u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); 581 582 bankidx |= idx; 583 brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx); 584 bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo)); 585 *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1; 586 *banksize *= SOCRAM_BANKINFO_SZBASE; 587 return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK); 588 } 589 590 static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize, 591 u32 *srsize) 592 { 593 u32 coreinfo; 594 uint nb, banksize, lss; 595 bool retent; 596 int i; 597 598 *ramsize = 0; 599 *srsize = 0; 600 601 if (WARN_ON(sr->pub.rev < 4)) 602 return; 603 604 if (!brcmf_chip_iscoreup(&sr->pub)) 605 brcmf_chip_resetcore(&sr->pub, 0, 0, 0); 606 607 /* Get info for determining size */ 608 coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo)); 609 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 610 611 if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) { 612 banksize = (coreinfo & SRCI_SRBSZ_MASK); 613 lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; 614 if (lss != 0) 615 nb--; 616 *ramsize = nb * (1 << (banksize + SR_BSZ_BASE)); 617 if (lss != 0) 618 *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE)); 619 } else { 620 /* length of SRAM Banks increased for corerev greater than 23 */ 621 if (sr->pub.rev >= 23) { 622 nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) 623 >> SRCI_SRNB_SHIFT; 624 } else { 625 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 626 } 627 for (i = 0; i < nb; i++) { 628 retent = brcmf_chip_socram_banksize(sr, i, &banksize); 629 *ramsize += banksize; 630 if (retent) 631 *srsize += banksize; 632 } 633 } 634 635 /* hardcoded save&restore memory sizes */ 636 switch (sr->chip->pub.chip) { 637 case BRCM_CC_4334_CHIP_ID: 638 if (sr->chip->pub.chiprev < 2) 639 *srsize = (32 * 1024); 640 break; 641 case BRCM_CC_43430_CHIP_ID: 642 /* assume sr for now as we can not check 643 * firmware sr capability at this point. 644 */ 645 *srsize = (64 * 1024); 646 break; 647 default: 648 break; 649 } 650 } 651 652 /** Return the SYS MEM size */ 653 static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem) 654 { 655 u32 memsize = 0; 656 u32 coreinfo; 657 u32 idx; 658 u32 nb; 659 u32 banksize; 660 661 if (!brcmf_chip_iscoreup(&sysmem->pub)) 662 brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0); 663 664 coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo)); 665 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 666 667 for (idx = 0; idx < nb; idx++) { 668 brcmf_chip_socram_banksize(sysmem, idx, &banksize); 669 memsize += banksize; 670 } 671 672 return memsize; 673 } 674 675 /** Return the TCM-RAM size of the ARMCR4 core. */ 676 static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) 677 { 678 u32 corecap; 679 u32 memsize = 0; 680 u32 nab; 681 u32 nbb; 682 u32 totb; 683 u32 bxinfo; 684 u32 idx; 685 686 corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP); 687 688 nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; 689 nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; 690 totb = nab + nbb; 691 692 for (idx = 0; idx < totb; idx++) { 693 brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx); 694 bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO); 695 memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; 696 } 697 698 return memsize; 699 } 700 701 static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) 702 { 703 switch (ci->pub.chip) { 704 case BRCM_CC_4345_CHIP_ID: 705 return 0x198000; 706 case BRCM_CC_4335_CHIP_ID: 707 case BRCM_CC_4339_CHIP_ID: 708 case BRCM_CC_4350_CHIP_ID: 709 case BRCM_CC_4354_CHIP_ID: 710 case BRCM_CC_4356_CHIP_ID: 711 case BRCM_CC_43567_CHIP_ID: 712 case BRCM_CC_43569_CHIP_ID: 713 case BRCM_CC_43570_CHIP_ID: 714 case BRCM_CC_4358_CHIP_ID: 715 case BRCM_CC_43602_CHIP_ID: 716 case BRCM_CC_4371_CHIP_ID: 717 return 0x180000; 718 case BRCM_CC_43465_CHIP_ID: 719 case BRCM_CC_43525_CHIP_ID: 720 case BRCM_CC_4365_CHIP_ID: 721 case BRCM_CC_4366_CHIP_ID: 722 case BRCM_CC_43664_CHIP_ID: 723 return 0x200000; 724 case BRCM_CC_4359_CHIP_ID: 725 return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000; 726 case CY_CC_4373_CHIP_ID: 727 return 0x160000; 728 default: 729 brcmf_err("unknown chip: %s\n", ci->pub.name); 730 break; 731 } 732 return 0; 733 } 734 735 int brcmf_chip_get_raminfo(struct brcmf_chip *pub) 736 { 737 struct brcmf_chip_priv *ci = container_of(pub, struct brcmf_chip_priv, 738 pub); 739 struct brcmf_core_priv *mem_core; 740 struct brcmf_core *mem; 741 742 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4); 743 if (mem) { 744 mem_core = container_of(mem, struct brcmf_core_priv, pub); 745 ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core); 746 ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 747 if (!ci->pub.rambase) { 748 brcmf_err("RAM base not provided with ARM CR4 core\n"); 749 return -EINVAL; 750 } 751 } else { 752 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM); 753 if (mem) { 754 mem_core = container_of(mem, struct brcmf_core_priv, 755 pub); 756 ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core); 757 ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 758 if (!ci->pub.rambase) { 759 brcmf_err("RAM base not provided with ARM CA7 core\n"); 760 return -EINVAL; 761 } 762 } else { 763 mem = brcmf_chip_get_core(&ci->pub, 764 BCMA_CORE_INTERNAL_MEM); 765 if (!mem) { 766 brcmf_err("No memory cores found\n"); 767 return -ENOMEM; 768 } 769 mem_core = container_of(mem, struct brcmf_core_priv, 770 pub); 771 brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize, 772 &ci->pub.srsize); 773 } 774 } 775 brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n", 776 ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize, 777 ci->pub.srsize, ci->pub.srsize); 778 779 if (!ci->pub.ramsize) { 780 brcmf_err("RAM size is undetermined\n"); 781 return -ENOMEM; 782 } 783 784 if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) { 785 brcmf_err("RAM size is incorrect\n"); 786 return -ENOMEM; 787 } 788 789 return 0; 790 } 791 792 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr, 793 u8 *type) 794 { 795 u32 val; 796 797 /* read next descriptor */ 798 val = ci->ops->read32(ci->ctx, *eromaddr); 799 *eromaddr += 4; 800 801 if (!type) 802 return val; 803 804 /* determine descriptor type */ 805 *type = (val & DMP_DESC_TYPE_MSK); 806 if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS) 807 *type = DMP_DESC_ADDRESS; 808 809 return val; 810 } 811 812 static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, 813 u32 *regbase, u32 *wrapbase) 814 { 815 u8 desc; 816 u32 val, szdesc; 817 u8 stype, sztype, wraptype; 818 819 *regbase = 0; 820 *wrapbase = 0; 821 822 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc); 823 if (desc == DMP_DESC_MASTER_PORT) { 824 wraptype = DMP_SLAVE_TYPE_MWRAP; 825 } else if (desc == DMP_DESC_ADDRESS) { 826 /* revert erom address */ 827 *eromaddr -= 4; 828 wraptype = DMP_SLAVE_TYPE_SWRAP; 829 } else { 830 *eromaddr -= 4; 831 return -EILSEQ; 832 } 833 834 do { 835 /* locate address descriptor */ 836 do { 837 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc); 838 /* unexpected table end */ 839 if (desc == DMP_DESC_EOT) { 840 *eromaddr -= 4; 841 return -EFAULT; 842 } 843 } while (desc != DMP_DESC_ADDRESS && 844 desc != DMP_DESC_COMPONENT); 845 846 /* stop if we crossed current component border */ 847 if (desc == DMP_DESC_COMPONENT) { 848 *eromaddr -= 4; 849 return 0; 850 } 851 852 /* skip upper 32-bit address descriptor */ 853 if (val & DMP_DESC_ADDRSIZE_GT32) 854 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 855 856 sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S; 857 858 /* next size descriptor can be skipped */ 859 if (sztype == DMP_SLAVE_SIZE_DESC) { 860 szdesc = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 861 /* skip upper size descriptor if present */ 862 if (szdesc & DMP_DESC_ADDRSIZE_GT32) 863 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 864 } 865 866 /* look for 4K or 8K register regions */ 867 if (sztype != DMP_SLAVE_SIZE_4K && 868 sztype != DMP_SLAVE_SIZE_8K) 869 continue; 870 871 stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S; 872 873 /* only regular slave and wrapper */ 874 if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE) 875 *regbase = val & DMP_SLAVE_ADDR_BASE; 876 if (*wrapbase == 0 && stype == wraptype) 877 *wrapbase = val & DMP_SLAVE_ADDR_BASE; 878 } while (*regbase == 0 || *wrapbase == 0); 879 880 return 0; 881 } 882 883 static 884 int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) 885 { 886 struct brcmf_core *core; 887 u32 eromaddr; 888 u8 desc_type = 0; 889 u32 val; 890 u16 id; 891 u8 nmw, nsw, rev; 892 u32 base, wrap; 893 int err; 894 895 eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr)); 896 897 while (desc_type != DMP_DESC_EOT) { 898 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); 899 if (!(val & DMP_DESC_VALID)) 900 continue; 901 902 if (desc_type == DMP_DESC_EMPTY) 903 continue; 904 905 /* need a component descriptor */ 906 if (desc_type != DMP_DESC_COMPONENT) 907 continue; 908 909 id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S; 910 911 /* next descriptor must be component as well */ 912 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); 913 if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT)) 914 return -EFAULT; 915 916 /* only look at cores with master port(s) */ 917 nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S; 918 nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S; 919 rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; 920 921 /* need core with ports */ 922 if (nmw + nsw == 0 && 923 id != BCMA_CORE_PMU && 924 id != BCMA_CORE_GCI) 925 continue; 926 927 /* try to obtain register address info */ 928 err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap); 929 if (err) 930 continue; 931 932 /* finally a core to be added */ 933 core = brcmf_chip_add_core(ci, id, base, wrap); 934 if (IS_ERR(core)) 935 return PTR_ERR(core); 936 937 core->rev = rev; 938 } 939 940 return 0; 941 } 942 943 static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) 944 { 945 struct brcmf_core *core; 946 u32 regdata; 947 u32 socitype; 948 int ret; 949 950 /* Get CC core rev 951 * Chipid is assume to be at offset 0 from SI_ENUM_BASE 952 * For different chiptypes or old sdio hosts w/o chipcommon, 953 * other ways of recognition should be added here. 954 */ 955 regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid)); 956 ci->pub.chip = regdata & CID_ID_MASK; 957 ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; 958 socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; 959 960 brcmf_chip_name(ci->pub.chip, ci->pub.chiprev, 961 ci->pub.name, sizeof(ci->pub.name)); 962 brcmf_dbg(INFO, "found %s chip: %s\n", 963 socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name); 964 965 if (socitype == SOCI_SB) { 966 if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) { 967 brcmf_err("SB chip is not supported\n"); 968 return -ENODEV; 969 } 970 ci->iscoreup = brcmf_chip_sb_iscoreup; 971 ci->coredisable = brcmf_chip_sb_coredisable; 972 ci->resetcore = brcmf_chip_sb_resetcore; 973 974 core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON, 975 SI_ENUM_BASE, 0); 976 brcmf_chip_sb_corerev(ci, core); 977 core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV, 978 BCM4329_CORE_BUS_BASE, 0); 979 brcmf_chip_sb_corerev(ci, core); 980 core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM, 981 BCM4329_CORE_SOCRAM_BASE, 0); 982 brcmf_chip_sb_corerev(ci, core); 983 core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3, 984 BCM4329_CORE_ARM_BASE, 0); 985 brcmf_chip_sb_corerev(ci, core); 986 987 core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0); 988 brcmf_chip_sb_corerev(ci, core); 989 } else if (socitype == SOCI_AI) { 990 ci->iscoreup = brcmf_chip_ai_iscoreup; 991 ci->coredisable = brcmf_chip_ai_coredisable; 992 ci->resetcore = brcmf_chip_ai_resetcore; 993 994 brcmf_chip_dmp_erom_scan(ci); 995 } else { 996 brcmf_err("chip backplane type %u is not supported\n", 997 socitype); 998 return -ENODEV; 999 } 1000 1001 ret = brcmf_chip_cores_check(ci); 1002 if (ret) 1003 return ret; 1004 1005 /* assure chip is passive for core access */ 1006 brcmf_chip_set_passive(&ci->pub); 1007 1008 /* Call bus specific reset function now. Cores have been determined 1009 * but further access may require a chip specific reset at this point. 1010 */ 1011 if (ci->ops->reset) { 1012 ci->ops->reset(ci->ctx, &ci->pub); 1013 brcmf_chip_set_passive(&ci->pub); 1014 } 1015 1016 return brcmf_chip_get_raminfo(&ci->pub); 1017 } 1018 1019 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) 1020 { 1021 struct brcmf_core *core; 1022 struct brcmf_core_priv *cpu; 1023 u32 val; 1024 1025 1026 core = brcmf_chip_get_core(&chip->pub, id); 1027 if (!core) 1028 return; 1029 1030 switch (id) { 1031 case BCMA_CORE_ARM_CM3: 1032 brcmf_chip_coredisable(core, 0, 0); 1033 break; 1034 case BCMA_CORE_ARM_CR4: 1035 case BCMA_CORE_ARM_CA7: 1036 cpu = container_of(core, struct brcmf_core_priv, pub); 1037 1038 /* clear all IOCTL bits except HALT bit */ 1039 val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL); 1040 val &= ARMCR4_BCMA_IOCTL_CPUHALT; 1041 brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT, 1042 ARMCR4_BCMA_IOCTL_CPUHALT); 1043 break; 1044 default: 1045 brcmf_err("unknown id: %u\n", id); 1046 break; 1047 } 1048 } 1049 1050 static int brcmf_chip_setup(struct brcmf_chip_priv *chip) 1051 { 1052 struct brcmf_chip *pub; 1053 struct brcmf_core_priv *cc; 1054 struct brcmf_core *pmu; 1055 u32 base; 1056 u32 val; 1057 int ret = 0; 1058 1059 pub = &chip->pub; 1060 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list); 1061 base = cc->pub.base; 1062 1063 /* get chipcommon capabilites */ 1064 pub->cc_caps = chip->ops->read32(chip->ctx, 1065 CORE_CC_REG(base, capabilities)); 1066 pub->cc_caps_ext = chip->ops->read32(chip->ctx, 1067 CORE_CC_REG(base, 1068 capabilities_ext)); 1069 1070 /* get pmu caps & rev */ 1071 pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */ 1072 if (pub->cc_caps & CC_CAP_PMU) { 1073 val = chip->ops->read32(chip->ctx, 1074 CORE_CC_REG(pmu->base, pmucapabilities)); 1075 pub->pmurev = val & PCAP_REV_MASK; 1076 pub->pmucaps = val; 1077 } 1078 1079 brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n", 1080 cc->pub.rev, pub->pmurev, pub->pmucaps); 1081 1082 /* execute bus core specific setup */ 1083 if (chip->ops->setup) 1084 ret = chip->ops->setup(chip->ctx, pub); 1085 1086 return ret; 1087 } 1088 1089 struct brcmf_chip *brcmf_chip_attach(void *ctx, 1090 const struct brcmf_buscore_ops *ops) 1091 { 1092 struct brcmf_chip_priv *chip; 1093 int err = 0; 1094 1095 if (WARN_ON(!ops->read32)) 1096 err = -EINVAL; 1097 if (WARN_ON(!ops->write32)) 1098 err = -EINVAL; 1099 if (WARN_ON(!ops->prepare)) 1100 err = -EINVAL; 1101 if (WARN_ON(!ops->activate)) 1102 err = -EINVAL; 1103 if (err < 0) 1104 return ERR_PTR(-EINVAL); 1105 1106 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1107 if (!chip) 1108 return ERR_PTR(-ENOMEM); 1109 1110 INIT_LIST_HEAD(&chip->cores); 1111 chip->num_cores = 0; 1112 chip->ops = ops; 1113 chip->ctx = ctx; 1114 1115 err = ops->prepare(ctx); 1116 if (err < 0) 1117 goto fail; 1118 1119 err = brcmf_chip_recognition(chip); 1120 if (err < 0) 1121 goto fail; 1122 1123 err = brcmf_chip_setup(chip); 1124 if (err < 0) 1125 goto fail; 1126 1127 return &chip->pub; 1128 1129 fail: 1130 brcmf_chip_detach(&chip->pub); 1131 return ERR_PTR(err); 1132 } 1133 1134 void brcmf_chip_detach(struct brcmf_chip *pub) 1135 { 1136 struct brcmf_chip_priv *chip; 1137 struct brcmf_core_priv *core; 1138 struct brcmf_core_priv *tmp; 1139 1140 chip = container_of(pub, struct brcmf_chip_priv, pub); 1141 list_for_each_entry_safe(core, tmp, &chip->cores, list) { 1142 list_del(&core->list); 1143 kfree(core); 1144 } 1145 kfree(chip); 1146 } 1147 1148 struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit) 1149 { 1150 struct brcmf_chip_priv *chip; 1151 struct brcmf_core_priv *core; 1152 1153 chip = container_of(pub, struct brcmf_chip_priv, pub); 1154 list_for_each_entry(core, &chip->cores, list) { 1155 if (core->pub.id == BCMA_CORE_80211) { 1156 if (unit-- == 0) 1157 return &core->pub; 1158 } 1159 } 1160 return NULL; 1161 } 1162 1163 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid) 1164 { 1165 struct brcmf_chip_priv *chip; 1166 struct brcmf_core_priv *core; 1167 1168 chip = container_of(pub, struct brcmf_chip_priv, pub); 1169 list_for_each_entry(core, &chip->cores, list) 1170 if (core->pub.id == coreid) 1171 return &core->pub; 1172 1173 return NULL; 1174 } 1175 1176 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub) 1177 { 1178 struct brcmf_chip_priv *chip; 1179 struct brcmf_core_priv *cc; 1180 1181 chip = container_of(pub, struct brcmf_chip_priv, pub); 1182 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list); 1183 if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON)) 1184 return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON); 1185 return &cc->pub; 1186 } 1187 1188 struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub) 1189 { 1190 struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub); 1191 struct brcmf_core *pmu; 1192 1193 /* See if there is separated PMU core available */ 1194 if (cc->rev >= 35 && 1195 pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { 1196 pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU); 1197 if (pmu) 1198 return pmu; 1199 } 1200 1201 /* Fallback to ChipCommon core for older hardware */ 1202 return cc; 1203 } 1204 1205 bool brcmf_chip_iscoreup(struct brcmf_core *pub) 1206 { 1207 struct brcmf_core_priv *core; 1208 1209 core = container_of(pub, struct brcmf_core_priv, pub); 1210 return core->chip->iscoreup(core); 1211 } 1212 1213 void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset) 1214 { 1215 struct brcmf_core_priv *core; 1216 1217 core = container_of(pub, struct brcmf_core_priv, pub); 1218 core->chip->coredisable(core, prereset, reset); 1219 } 1220 1221 void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset, 1222 u32 postreset) 1223 { 1224 struct brcmf_core_priv *core; 1225 1226 core = container_of(pub, struct brcmf_core_priv, pub); 1227 core->chip->resetcore(core, prereset, reset, postreset); 1228 } 1229 1230 static void 1231 brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip) 1232 { 1233 struct brcmf_core *core; 1234 struct brcmf_core_priv *sr; 1235 1236 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3); 1237 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1238 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1239 D11_BCMA_IOCTL_PHYCLOCKEN, 1240 D11_BCMA_IOCTL_PHYCLOCKEN, 1241 D11_BCMA_IOCTL_PHYCLOCKEN); 1242 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1243 brcmf_chip_resetcore(core, 0, 0, 0); 1244 1245 /* disable bank #3 remap for this device */ 1246 if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) { 1247 sr = container_of(core, struct brcmf_core_priv, pub); 1248 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3); 1249 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0); 1250 } 1251 } 1252 1253 static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip) 1254 { 1255 struct brcmf_core *core; 1256 1257 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1258 if (!brcmf_chip_iscoreup(core)) { 1259 brcmf_err("SOCRAM core is down after reset?\n"); 1260 return false; 1261 } 1262 1263 chip->ops->activate(chip->ctx, &chip->pub, 0); 1264 1265 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3); 1266 brcmf_chip_resetcore(core, 0, 0, 0); 1267 1268 return true; 1269 } 1270 1271 static inline void 1272 brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip) 1273 { 1274 struct brcmf_core *core; 1275 1276 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4); 1277 1278 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1279 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1280 D11_BCMA_IOCTL_PHYCLOCKEN, 1281 D11_BCMA_IOCTL_PHYCLOCKEN, 1282 D11_BCMA_IOCTL_PHYCLOCKEN); 1283 } 1284 1285 static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1286 { 1287 struct brcmf_core *core; 1288 1289 chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1290 1291 /* restore ARM */ 1292 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4); 1293 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0); 1294 1295 return true; 1296 } 1297 1298 static inline void 1299 brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip) 1300 { 1301 struct brcmf_core *core; 1302 1303 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7); 1304 1305 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1306 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1307 D11_BCMA_IOCTL_PHYCLOCKEN, 1308 D11_BCMA_IOCTL_PHYCLOCKEN, 1309 D11_BCMA_IOCTL_PHYCLOCKEN); 1310 } 1311 1312 static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1313 { 1314 struct brcmf_core *core; 1315 1316 chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1317 1318 /* restore ARM */ 1319 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7); 1320 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0); 1321 1322 return true; 1323 } 1324 1325 void brcmf_chip_set_passive(struct brcmf_chip *pub) 1326 { 1327 struct brcmf_chip_priv *chip; 1328 struct brcmf_core *arm; 1329 1330 brcmf_dbg(TRACE, "Enter\n"); 1331 1332 chip = container_of(pub, struct brcmf_chip_priv, pub); 1333 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1334 if (arm) { 1335 brcmf_chip_cr4_set_passive(chip); 1336 return; 1337 } 1338 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7); 1339 if (arm) { 1340 brcmf_chip_ca7_set_passive(chip); 1341 return; 1342 } 1343 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3); 1344 if (arm) { 1345 brcmf_chip_cm3_set_passive(chip); 1346 return; 1347 } 1348 } 1349 1350 bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec) 1351 { 1352 struct brcmf_chip_priv *chip; 1353 struct brcmf_core *arm; 1354 1355 brcmf_dbg(TRACE, "Enter\n"); 1356 1357 chip = container_of(pub, struct brcmf_chip_priv, pub); 1358 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1359 if (arm) 1360 return brcmf_chip_cr4_set_active(chip, rstvec); 1361 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7); 1362 if (arm) 1363 return brcmf_chip_ca7_set_active(chip, rstvec); 1364 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3); 1365 if (arm) 1366 return brcmf_chip_cm3_set_active(chip); 1367 1368 return false; 1369 } 1370 1371 bool brcmf_chip_sr_capable(struct brcmf_chip *pub) 1372 { 1373 u32 base, addr, reg, pmu_cc3_mask = ~0; 1374 struct brcmf_chip_priv *chip; 1375 struct brcmf_core *pmu = brcmf_chip_get_pmu(pub); 1376 1377 brcmf_dbg(TRACE, "Enter\n"); 1378 1379 /* old chips with PMU version less than 17 don't support save restore */ 1380 if (pub->pmurev < 17) 1381 return false; 1382 1383 base = brcmf_chip_get_chipcommon(pub)->base; 1384 chip = container_of(pub, struct brcmf_chip_priv, pub); 1385 1386 switch (pub->chip) { 1387 case BRCM_CC_4354_CHIP_ID: 1388 case BRCM_CC_4356_CHIP_ID: 1389 case BRCM_CC_4345_CHIP_ID: 1390 /* explicitly check SR engine enable bit */ 1391 pmu_cc3_mask = BIT(2); 1392 /* fall-through */ 1393 case BRCM_CC_43241_CHIP_ID: 1394 case BRCM_CC_4335_CHIP_ID: 1395 case BRCM_CC_4339_CHIP_ID: 1396 /* read PMU chipcontrol register 3 */ 1397 addr = CORE_CC_REG(pmu->base, chipcontrol_addr); 1398 chip->ops->write32(chip->ctx, addr, 3); 1399 addr = CORE_CC_REG(pmu->base, chipcontrol_data); 1400 reg = chip->ops->read32(chip->ctx, addr); 1401 return (reg & pmu_cc3_mask) != 0; 1402 case BRCM_CC_43430_CHIP_ID: 1403 addr = CORE_CC_REG(base, sr_control1); 1404 reg = chip->ops->read32(chip->ctx, addr); 1405 return reg != 0; 1406 case CY_CC_4373_CHIP_ID: 1407 /* explicitly check SR engine enable bit */ 1408 addr = CORE_CC_REG(base, sr_control0); 1409 reg = chip->ops->read32(chip->ctx, addr); 1410 return (reg & CC_SR_CTL0_ENABLE_MASK) != 0; 1411 case BRCM_CC_4359_CHIP_ID: 1412 case CY_CC_43012_CHIP_ID: 1413 addr = CORE_CC_REG(pmu->base, retention_ctl); 1414 reg = chip->ops->read32(chip->ctx, addr); 1415 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1416 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; 1417 default: 1418 addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); 1419 reg = chip->ops->read32(chip->ctx, addr); 1420 if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0) 1421 return false; 1422 1423 addr = CORE_CC_REG(pmu->base, retention_ctl); 1424 reg = chip->ops->read32(chip->ctx, addr); 1425 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1426 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; 1427 } 1428 } 1429