1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/list.h> 8 #include <linux/ssb/ssb_regs.h> 9 #include <linux/bcma/bcma.h> 10 #include <linux/bcma/bcma_regs.h> 11 12 #include <defs.h> 13 #include <soc.h> 14 #include <brcm_hw_ids.h> 15 #include <brcmu_utils.h> 16 #include <chipcommon.h> 17 #include "debug.h" 18 #include "chip.h" 19 20 /* SOC Interconnect types (aka chip types) */ 21 #define SOCI_SB 0 22 #define SOCI_AI 1 23 24 /* PL-368 DMP definitions */ 25 #define DMP_DESC_TYPE_MSK 0x0000000F 26 #define DMP_DESC_EMPTY 0x00000000 27 #define DMP_DESC_VALID 0x00000001 28 #define DMP_DESC_COMPONENT 0x00000001 29 #define DMP_DESC_MASTER_PORT 0x00000003 30 #define DMP_DESC_ADDRESS 0x00000005 31 #define DMP_DESC_ADDRSIZE_GT32 0x00000008 32 #define DMP_DESC_EOT 0x0000000F 33 34 #define DMP_COMP_DESIGNER 0xFFF00000 35 #define DMP_COMP_DESIGNER_S 20 36 #define DMP_COMP_PARTNUM 0x000FFF00 37 #define DMP_COMP_PARTNUM_S 8 38 #define DMP_COMP_CLASS 0x000000F0 39 #define DMP_COMP_CLASS_S 4 40 #define DMP_COMP_REVISION 0xFF000000 41 #define DMP_COMP_REVISION_S 24 42 #define DMP_COMP_NUM_SWRAP 0x00F80000 43 #define DMP_COMP_NUM_SWRAP_S 19 44 #define DMP_COMP_NUM_MWRAP 0x0007C000 45 #define DMP_COMP_NUM_MWRAP_S 14 46 #define DMP_COMP_NUM_SPORT 0x00003E00 47 #define DMP_COMP_NUM_SPORT_S 9 48 #define DMP_COMP_NUM_MPORT 0x000001F0 49 #define DMP_COMP_NUM_MPORT_S 4 50 51 #define DMP_MASTER_PORT_UID 0x0000FF00 52 #define DMP_MASTER_PORT_UID_S 8 53 #define DMP_MASTER_PORT_NUM 0x000000F0 54 #define DMP_MASTER_PORT_NUM_S 4 55 56 #define DMP_SLAVE_ADDR_BASE 0xFFFFF000 57 #define DMP_SLAVE_ADDR_BASE_S 12 58 #define DMP_SLAVE_PORT_NUM 0x00000F00 59 #define DMP_SLAVE_PORT_NUM_S 8 60 #define DMP_SLAVE_TYPE 0x000000C0 61 #define DMP_SLAVE_TYPE_S 6 62 #define DMP_SLAVE_TYPE_SLAVE 0 63 #define DMP_SLAVE_TYPE_BRIDGE 1 64 #define DMP_SLAVE_TYPE_SWRAP 2 65 #define DMP_SLAVE_TYPE_MWRAP 3 66 #define DMP_SLAVE_SIZE_TYPE 0x00000030 67 #define DMP_SLAVE_SIZE_TYPE_S 4 68 #define DMP_SLAVE_SIZE_4K 0 69 #define DMP_SLAVE_SIZE_8K 1 70 #define DMP_SLAVE_SIZE_16K 2 71 #define DMP_SLAVE_SIZE_DESC 3 72 73 /* EROM CompIdentB */ 74 #define CIB_REV_MASK 0xff000000 75 #define CIB_REV_SHIFT 24 76 77 /* ARM CR4 core specific control flag bits */ 78 #define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020 79 80 /* D11 core specific control flag bits */ 81 #define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004 82 #define D11_BCMA_IOCTL_PHYRESET 0x0008 83 84 /* chip core base & ramsize */ 85 /* bcm4329 */ 86 /* SDIO device core, ID 0x829 */ 87 #define BCM4329_CORE_BUS_BASE 0x18011000 88 /* internal memory core, ID 0x80e */ 89 #define BCM4329_CORE_SOCRAM_BASE 0x18003000 90 /* ARM Cortex M3 core, ID 0x82a */ 91 #define BCM4329_CORE_ARM_BASE 0x18002000 92 93 /* Max possibly supported memory size (limited by IO mapped memory) */ 94 #define BRCMF_CHIP_MAX_MEMSIZE (4 * 1024 * 1024) 95 96 #define CORE_SB(base, field) \ 97 (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) 98 #define SBCOREREV(sbidh) \ 99 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \ 100 ((sbidh) & SSB_IDHIGH_RCLO)) 101 102 struct sbconfig { 103 u32 PAD[2]; 104 u32 sbipsflag; /* initiator port ocp slave flag */ 105 u32 PAD[3]; 106 u32 sbtpsflag; /* target port ocp slave flag */ 107 u32 PAD[11]; 108 u32 sbtmerrloga; /* (sonics >= 2.3) */ 109 u32 PAD; 110 u32 sbtmerrlog; /* (sonics >= 2.3) */ 111 u32 PAD[3]; 112 u32 sbadmatch3; /* address match3 */ 113 u32 PAD; 114 u32 sbadmatch2; /* address match2 */ 115 u32 PAD; 116 u32 sbadmatch1; /* address match1 */ 117 u32 PAD[7]; 118 u32 sbimstate; /* initiator agent state */ 119 u32 sbintvec; /* interrupt mask */ 120 u32 sbtmstatelow; /* target state */ 121 u32 sbtmstatehigh; /* target state */ 122 u32 sbbwa0; /* bandwidth allocation table0 */ 123 u32 PAD; 124 u32 sbimconfiglow; /* initiator configuration */ 125 u32 sbimconfighigh; /* initiator configuration */ 126 u32 sbadmatch0; /* address match0 */ 127 u32 PAD; 128 u32 sbtmconfiglow; /* target configuration */ 129 u32 sbtmconfighigh; /* target configuration */ 130 u32 sbbconfig; /* broadcast configuration */ 131 u32 PAD; 132 u32 sbbstate; /* broadcast state */ 133 u32 PAD[3]; 134 u32 sbactcnfg; /* activate configuration */ 135 u32 PAD[3]; 136 u32 sbflagst; /* current sbflags */ 137 u32 PAD[3]; 138 u32 sbidlow; /* identification */ 139 u32 sbidhigh; /* identification */ 140 }; 141 142 /* bankidx and bankinfo reg defines corerev >= 8 */ 143 #define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 144 #define SOCRAM_BANKINFO_SZMASK 0x0000007f 145 #define SOCRAM_BANKIDX_ROM_MASK 0x00000100 146 147 #define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 148 /* socram bankinfo memtype */ 149 #define SOCRAM_MEMTYPE_RAM 0 150 #define SOCRAM_MEMTYPE_R0M 1 151 #define SOCRAM_MEMTYPE_DEVRAM 2 152 153 #define SOCRAM_BANKINFO_SZBASE 8192 154 #define SRCI_LSS_MASK 0x00f00000 155 #define SRCI_LSS_SHIFT 20 156 #define SRCI_SRNB_MASK 0xf0 157 #define SRCI_SRNB_MASK_EXT 0x100 158 #define SRCI_SRNB_SHIFT 4 159 #define SRCI_SRBSZ_MASK 0xf 160 #define SRCI_SRBSZ_SHIFT 0 161 #define SR_BSZ_BASE 14 162 163 struct sbsocramregs { 164 u32 coreinfo; 165 u32 bwalloc; 166 u32 extracoreinfo; 167 u32 biststat; 168 u32 bankidx; 169 u32 standbyctrl; 170 171 u32 errlogstatus; /* rev 6 */ 172 u32 errlogaddr; /* rev 6 */ 173 /* used for patching rev 3 & 5 */ 174 u32 cambankidx; 175 u32 cambankstandbyctrl; 176 u32 cambankpatchctrl; 177 u32 cambankpatchtblbaseaddr; 178 u32 cambankcmdreg; 179 u32 cambankdatareg; 180 u32 cambankmaskreg; 181 u32 PAD[1]; 182 u32 bankinfo; /* corev 8 */ 183 u32 bankpda; 184 u32 PAD[14]; 185 u32 extmemconfig; 186 u32 extmemparitycsr; 187 u32 extmemparityerrdata; 188 u32 extmemparityerrcnt; 189 u32 extmemwrctrlandsize; 190 u32 PAD[84]; 191 u32 workaround; 192 u32 pwrctl; /* corerev >= 2 */ 193 u32 PAD[133]; 194 u32 sr_control; /* corerev >= 15 */ 195 u32 sr_status; /* corerev >= 15 */ 196 u32 sr_address; /* corerev >= 15 */ 197 u32 sr_data; /* corerev >= 15 */ 198 }; 199 200 #define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 201 #define SYSMEMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 202 203 #define ARMCR4_CAP (0x04) 204 #define ARMCR4_BANKIDX (0x40) 205 #define ARMCR4_BANKINFO (0x44) 206 #define ARMCR4_BANKPDA (0x4C) 207 208 #define ARMCR4_TCBBNB_MASK 0xf0 209 #define ARMCR4_TCBBNB_SHIFT 4 210 #define ARMCR4_TCBANB_MASK 0xf 211 #define ARMCR4_TCBANB_SHIFT 0 212 213 #define ARMCR4_BSZ_MASK 0x3f 214 #define ARMCR4_BSZ_MULT 8192 215 216 struct brcmf_core_priv { 217 struct brcmf_core pub; 218 u32 wrapbase; 219 struct list_head list; 220 struct brcmf_chip_priv *chip; 221 }; 222 223 struct brcmf_chip_priv { 224 struct brcmf_chip pub; 225 const struct brcmf_buscore_ops *ops; 226 void *ctx; 227 /* assured first core is chipcommon, second core is buscore */ 228 struct list_head cores; 229 u16 num_cores; 230 231 bool (*iscoreup)(struct brcmf_core_priv *core); 232 void (*coredisable)(struct brcmf_core_priv *core, u32 prereset, 233 u32 reset); 234 void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset, 235 u32 postreset); 236 }; 237 238 static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci, 239 struct brcmf_core *core) 240 { 241 u32 regdata; 242 243 regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh)); 244 core->rev = SBCOREREV(regdata); 245 } 246 247 static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core) 248 { 249 struct brcmf_chip_priv *ci; 250 u32 regdata; 251 u32 address; 252 253 ci = core->chip; 254 address = CORE_SB(core->pub.base, sbtmstatelow); 255 regdata = ci->ops->read32(ci->ctx, address); 256 regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT | 257 SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK); 258 return SSB_TMSLOW_CLOCK == regdata; 259 } 260 261 static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core) 262 { 263 struct brcmf_chip_priv *ci; 264 u32 regdata; 265 bool ret; 266 267 ci = core->chip; 268 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 269 ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK; 270 271 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 272 ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0); 273 274 return ret; 275 } 276 277 static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core, 278 u32 prereset, u32 reset) 279 { 280 struct brcmf_chip_priv *ci; 281 u32 val, base; 282 283 ci = core->chip; 284 base = core->pub.base; 285 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 286 if (val & SSB_TMSLOW_RESET) 287 return; 288 289 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 290 if ((val & SSB_TMSLOW_CLOCK) != 0) { 291 /* 292 * set target reject and spin until busy is clear 293 * (preserve core-specific bits) 294 */ 295 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 296 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 297 val | SSB_TMSLOW_REJECT); 298 299 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 300 udelay(1); 301 SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)) 302 & SSB_TMSHIGH_BUSY), 100000); 303 304 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)); 305 if (val & SSB_TMSHIGH_BUSY) 306 brcmf_err("core state still busy\n"); 307 308 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow)); 309 if (val & SSB_IDLOW_INITIATOR) { 310 val = ci->ops->read32(ci->ctx, 311 CORE_SB(base, sbimstate)); 312 val |= SSB_IMSTATE_REJECT; 313 ci->ops->write32(ci->ctx, 314 CORE_SB(base, sbimstate), val); 315 val = ci->ops->read32(ci->ctx, 316 CORE_SB(base, sbimstate)); 317 udelay(1); 318 SPINWAIT((ci->ops->read32(ci->ctx, 319 CORE_SB(base, sbimstate)) & 320 SSB_IMSTATE_BUSY), 100000); 321 } 322 323 /* set reset and reject while enabling the clocks */ 324 val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 325 SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET; 326 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val); 327 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 328 udelay(10); 329 330 /* clear the initiator reject bit */ 331 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow)); 332 if (val & SSB_IDLOW_INITIATOR) { 333 val = ci->ops->read32(ci->ctx, 334 CORE_SB(base, sbimstate)); 335 val &= ~SSB_IMSTATE_REJECT; 336 ci->ops->write32(ci->ctx, 337 CORE_SB(base, sbimstate), val); 338 } 339 } 340 341 /* leave reset and reject asserted */ 342 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 343 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); 344 udelay(1); 345 } 346 347 static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core, 348 u32 prereset, u32 reset) 349 { 350 struct brcmf_chip_priv *ci; 351 u32 regdata; 352 353 ci = core->chip; 354 355 /* if core is already in reset, skip reset */ 356 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 357 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 358 goto in_reset_configure; 359 360 /* configure reset */ 361 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 362 prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 363 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 364 365 /* put in reset */ 366 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 367 BCMA_RESET_CTL_RESET); 368 usleep_range(10, 20); 369 370 /* wait till reset is 1 */ 371 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 372 BCMA_RESET_CTL_RESET, 300); 373 374 in_reset_configure: 375 /* in-reset configure */ 376 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 377 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 378 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 379 } 380 381 static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset, 382 u32 reset, u32 postreset) 383 { 384 struct brcmf_chip_priv *ci; 385 u32 regdata; 386 u32 base; 387 388 ci = core->chip; 389 base = core->pub.base; 390 /* 391 * Must do the disable sequence first to work for 392 * arbitrary current core state. 393 */ 394 brcmf_chip_sb_coredisable(core, 0, 0); 395 396 /* 397 * Now do the initialization sequence. 398 * set reset while enabling the clock and 399 * forcing them on throughout the core 400 */ 401 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 402 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 403 SSB_TMSLOW_RESET); 404 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 405 udelay(1); 406 407 /* clear any serror */ 408 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)); 409 if (regdata & SSB_TMSHIGH_SERR) 410 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0); 411 412 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate)); 413 if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) { 414 regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO); 415 ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata); 416 } 417 418 /* clear reset and allow it to propagate throughout the core */ 419 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 420 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK); 421 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 422 udelay(1); 423 424 /* leave clock enabled */ 425 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 426 SSB_TMSLOW_CLOCK); 427 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 428 udelay(1); 429 } 430 431 static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset, 432 u32 reset, u32 postreset) 433 { 434 struct brcmf_chip_priv *ci; 435 int count; 436 struct brcmf_core *d11core2 = NULL; 437 struct brcmf_core_priv *d11priv2 = NULL; 438 439 ci = core->chip; 440 441 /* special handle two D11 cores reset */ 442 if (core->pub.id == BCMA_CORE_80211) { 443 d11core2 = brcmf_chip_get_d11core(&ci->pub, 1); 444 if (d11core2) { 445 brcmf_dbg(INFO, "found two d11 cores, reset both\n"); 446 d11priv2 = container_of(d11core2, 447 struct brcmf_core_priv, pub); 448 } 449 } 450 451 /* must disable first to work for arbitrary current core state */ 452 brcmf_chip_ai_coredisable(core, prereset, reset); 453 if (d11priv2) 454 brcmf_chip_ai_coredisable(d11priv2, prereset, reset); 455 456 count = 0; 457 while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) & 458 BCMA_RESET_CTL_RESET) { 459 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0); 460 count++; 461 if (count > 50) 462 break; 463 usleep_range(40, 60); 464 } 465 466 if (d11priv2) { 467 count = 0; 468 while (ci->ops->read32(ci->ctx, 469 d11priv2->wrapbase + BCMA_RESET_CTL) & 470 BCMA_RESET_CTL_RESET) { 471 ci->ops->write32(ci->ctx, 472 d11priv2->wrapbase + BCMA_RESET_CTL, 473 0); 474 count++; 475 if (count > 50) 476 break; 477 usleep_range(40, 60); 478 } 479 } 480 481 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 482 postreset | BCMA_IOCTL_CLK); 483 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 484 485 if (d11priv2) { 486 ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL, 487 postreset | BCMA_IOCTL_CLK); 488 ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL); 489 } 490 } 491 492 char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len) 493 { 494 const char *fmt; 495 496 fmt = ((id > 0xa000) || (id < 0x4000)) ? "BCM%d/%u" : "BCM%x/%u"; 497 snprintf(buf, len, fmt, id, rev); 498 return buf; 499 } 500 501 static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci, 502 u16 coreid, u32 base, 503 u32 wrapbase) 504 { 505 struct brcmf_core_priv *core; 506 507 core = kzalloc(sizeof(*core), GFP_KERNEL); 508 if (!core) 509 return ERR_PTR(-ENOMEM); 510 511 core->pub.id = coreid; 512 core->pub.base = base; 513 core->chip = ci; 514 core->wrapbase = wrapbase; 515 516 list_add_tail(&core->list, &ci->cores); 517 return &core->pub; 518 } 519 520 /* safety check for chipinfo */ 521 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 522 { 523 struct brcmf_core_priv *core; 524 bool need_socram = false; 525 bool has_socram = false; 526 bool cpu_found = false; 527 int idx = 1; 528 529 list_for_each_entry(core, &ci->cores, list) { 530 brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n", 531 idx++, core->pub.id, core->pub.rev, core->pub.base, 532 core->wrapbase); 533 534 switch (core->pub.id) { 535 case BCMA_CORE_ARM_CM3: 536 cpu_found = true; 537 need_socram = true; 538 break; 539 case BCMA_CORE_INTERNAL_MEM: 540 has_socram = true; 541 break; 542 case BCMA_CORE_ARM_CR4: 543 cpu_found = true; 544 break; 545 case BCMA_CORE_ARM_CA7: 546 cpu_found = true; 547 break; 548 default: 549 break; 550 } 551 } 552 553 if (!cpu_found) { 554 brcmf_err("CPU core not detected\n"); 555 return -ENXIO; 556 } 557 /* check RAM core presence for ARM CM3 core */ 558 if (need_socram && !has_socram) { 559 brcmf_err("RAM core not provided with ARM CM3 core\n"); 560 return -ENODEV; 561 } 562 return 0; 563 } 564 565 static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg) 566 { 567 return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg); 568 } 569 570 static void brcmf_chip_core_write32(struct brcmf_core_priv *core, 571 u16 reg, u32 val) 572 { 573 core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val); 574 } 575 576 static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx, 577 u32 *banksize) 578 { 579 u32 bankinfo; 580 u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); 581 582 bankidx |= idx; 583 brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx); 584 bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo)); 585 *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1; 586 *banksize *= SOCRAM_BANKINFO_SZBASE; 587 return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK); 588 } 589 590 static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize, 591 u32 *srsize) 592 { 593 u32 coreinfo; 594 uint nb, banksize, lss; 595 bool retent; 596 int i; 597 598 *ramsize = 0; 599 *srsize = 0; 600 601 if (WARN_ON(sr->pub.rev < 4)) 602 return; 603 604 if (!brcmf_chip_iscoreup(&sr->pub)) 605 brcmf_chip_resetcore(&sr->pub, 0, 0, 0); 606 607 /* Get info for determining size */ 608 coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo)); 609 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 610 611 if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) { 612 banksize = (coreinfo & SRCI_SRBSZ_MASK); 613 lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; 614 if (lss != 0) 615 nb--; 616 *ramsize = nb * (1 << (banksize + SR_BSZ_BASE)); 617 if (lss != 0) 618 *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE)); 619 } else { 620 /* length of SRAM Banks increased for corerev greater than 23 */ 621 if (sr->pub.rev >= 23) { 622 nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) 623 >> SRCI_SRNB_SHIFT; 624 } else { 625 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 626 } 627 for (i = 0; i < nb; i++) { 628 retent = brcmf_chip_socram_banksize(sr, i, &banksize); 629 *ramsize += banksize; 630 if (retent) 631 *srsize += banksize; 632 } 633 } 634 635 /* hardcoded save&restore memory sizes */ 636 switch (sr->chip->pub.chip) { 637 case BRCM_CC_4334_CHIP_ID: 638 if (sr->chip->pub.chiprev < 2) 639 *srsize = (32 * 1024); 640 break; 641 case BRCM_CC_43430_CHIP_ID: 642 /* assume sr for now as we can not check 643 * firmware sr capability at this point. 644 */ 645 *srsize = (64 * 1024); 646 break; 647 default: 648 break; 649 } 650 } 651 652 /** Return the SYS MEM size */ 653 static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem) 654 { 655 u32 memsize = 0; 656 u32 coreinfo; 657 u32 idx; 658 u32 nb; 659 u32 banksize; 660 661 if (!brcmf_chip_iscoreup(&sysmem->pub)) 662 brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0); 663 664 coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo)); 665 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 666 667 for (idx = 0; idx < nb; idx++) { 668 brcmf_chip_socram_banksize(sysmem, idx, &banksize); 669 memsize += banksize; 670 } 671 672 return memsize; 673 } 674 675 /** Return the TCM-RAM size of the ARMCR4 core. */ 676 static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) 677 { 678 u32 corecap; 679 u32 memsize = 0; 680 u32 nab; 681 u32 nbb; 682 u32 totb; 683 u32 bxinfo; 684 u32 idx; 685 686 corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP); 687 688 nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; 689 nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; 690 totb = nab + nbb; 691 692 for (idx = 0; idx < totb; idx++) { 693 brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx); 694 bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO); 695 memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; 696 } 697 698 return memsize; 699 } 700 701 static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) 702 { 703 switch (ci->pub.chip) { 704 case BRCM_CC_4345_CHIP_ID: 705 return 0x198000; 706 case BRCM_CC_4335_CHIP_ID: 707 case BRCM_CC_4339_CHIP_ID: 708 case BRCM_CC_4350_CHIP_ID: 709 case BRCM_CC_4354_CHIP_ID: 710 case BRCM_CC_4356_CHIP_ID: 711 case BRCM_CC_43567_CHIP_ID: 712 case BRCM_CC_43569_CHIP_ID: 713 case BRCM_CC_43570_CHIP_ID: 714 case BRCM_CC_4358_CHIP_ID: 715 case BRCM_CC_43602_CHIP_ID: 716 case BRCM_CC_4371_CHIP_ID: 717 return 0x180000; 718 case BRCM_CC_43465_CHIP_ID: 719 case BRCM_CC_43525_CHIP_ID: 720 case BRCM_CC_4365_CHIP_ID: 721 case BRCM_CC_4366_CHIP_ID: 722 case BRCM_CC_43664_CHIP_ID: 723 return 0x200000; 724 case BRCM_CC_4359_CHIP_ID: 725 return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000; 726 case BRCM_CC_4364_CHIP_ID: 727 case CY_CC_4373_CHIP_ID: 728 return 0x160000; 729 default: 730 brcmf_err("unknown chip: %s\n", ci->pub.name); 731 break; 732 } 733 return 0; 734 } 735 736 int brcmf_chip_get_raminfo(struct brcmf_chip *pub) 737 { 738 struct brcmf_chip_priv *ci = container_of(pub, struct brcmf_chip_priv, 739 pub); 740 struct brcmf_core_priv *mem_core; 741 struct brcmf_core *mem; 742 743 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4); 744 if (mem) { 745 mem_core = container_of(mem, struct brcmf_core_priv, pub); 746 ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core); 747 ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 748 if (!ci->pub.rambase) { 749 brcmf_err("RAM base not provided with ARM CR4 core\n"); 750 return -EINVAL; 751 } 752 } else { 753 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM); 754 if (mem) { 755 mem_core = container_of(mem, struct brcmf_core_priv, 756 pub); 757 ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core); 758 ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 759 if (!ci->pub.rambase) { 760 brcmf_err("RAM base not provided with ARM CA7 core\n"); 761 return -EINVAL; 762 } 763 } else { 764 mem = brcmf_chip_get_core(&ci->pub, 765 BCMA_CORE_INTERNAL_MEM); 766 if (!mem) { 767 brcmf_err("No memory cores found\n"); 768 return -ENOMEM; 769 } 770 mem_core = container_of(mem, struct brcmf_core_priv, 771 pub); 772 brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize, 773 &ci->pub.srsize); 774 } 775 } 776 brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n", 777 ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize, 778 ci->pub.srsize, ci->pub.srsize); 779 780 if (!ci->pub.ramsize) { 781 brcmf_err("RAM size is undetermined\n"); 782 return -ENOMEM; 783 } 784 785 if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) { 786 brcmf_err("RAM size is incorrect\n"); 787 return -ENOMEM; 788 } 789 790 return 0; 791 } 792 793 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr, 794 u8 *type) 795 { 796 u32 val; 797 798 /* read next descriptor */ 799 val = ci->ops->read32(ci->ctx, *eromaddr); 800 *eromaddr += 4; 801 802 if (!type) 803 return val; 804 805 /* determine descriptor type */ 806 *type = (val & DMP_DESC_TYPE_MSK); 807 if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS) 808 *type = DMP_DESC_ADDRESS; 809 810 return val; 811 } 812 813 static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, 814 u32 *regbase, u32 *wrapbase) 815 { 816 u8 desc; 817 u32 val, szdesc; 818 u8 stype, sztype, wraptype; 819 820 *regbase = 0; 821 *wrapbase = 0; 822 823 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc); 824 if (desc == DMP_DESC_MASTER_PORT) { 825 wraptype = DMP_SLAVE_TYPE_MWRAP; 826 } else if (desc == DMP_DESC_ADDRESS) { 827 /* revert erom address */ 828 *eromaddr -= 4; 829 wraptype = DMP_SLAVE_TYPE_SWRAP; 830 } else { 831 *eromaddr -= 4; 832 return -EILSEQ; 833 } 834 835 do { 836 /* locate address descriptor */ 837 do { 838 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc); 839 /* unexpected table end */ 840 if (desc == DMP_DESC_EOT) { 841 *eromaddr -= 4; 842 return -EFAULT; 843 } 844 } while (desc != DMP_DESC_ADDRESS && 845 desc != DMP_DESC_COMPONENT); 846 847 /* stop if we crossed current component border */ 848 if (desc == DMP_DESC_COMPONENT) { 849 *eromaddr -= 4; 850 return 0; 851 } 852 853 /* skip upper 32-bit address descriptor */ 854 if (val & DMP_DESC_ADDRSIZE_GT32) 855 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 856 857 sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S; 858 859 /* next size descriptor can be skipped */ 860 if (sztype == DMP_SLAVE_SIZE_DESC) { 861 szdesc = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 862 /* skip upper size descriptor if present */ 863 if (szdesc & DMP_DESC_ADDRSIZE_GT32) 864 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 865 } 866 867 /* look for 4K or 8K register regions */ 868 if (sztype != DMP_SLAVE_SIZE_4K && 869 sztype != DMP_SLAVE_SIZE_8K) 870 continue; 871 872 stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S; 873 874 /* only regular slave and wrapper */ 875 if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE) 876 *regbase = val & DMP_SLAVE_ADDR_BASE; 877 if (*wrapbase == 0 && stype == wraptype) 878 *wrapbase = val & DMP_SLAVE_ADDR_BASE; 879 } while (*regbase == 0 || *wrapbase == 0); 880 881 return 0; 882 } 883 884 static 885 int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) 886 { 887 struct brcmf_core *core; 888 u32 eromaddr; 889 u8 desc_type = 0; 890 u32 val; 891 u16 id; 892 u8 nmw, nsw, rev; 893 u32 base, wrap; 894 int err; 895 896 eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr)); 897 898 while (desc_type != DMP_DESC_EOT) { 899 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); 900 if (!(val & DMP_DESC_VALID)) 901 continue; 902 903 if (desc_type == DMP_DESC_EMPTY) 904 continue; 905 906 /* need a component descriptor */ 907 if (desc_type != DMP_DESC_COMPONENT) 908 continue; 909 910 id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S; 911 912 /* next descriptor must be component as well */ 913 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); 914 if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT)) 915 return -EFAULT; 916 917 /* only look at cores with master port(s) */ 918 nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S; 919 nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S; 920 rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; 921 922 /* need core with ports */ 923 if (nmw + nsw == 0 && 924 id != BCMA_CORE_PMU && 925 id != BCMA_CORE_GCI) 926 continue; 927 928 /* try to obtain register address info */ 929 err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap); 930 if (err) 931 continue; 932 933 /* finally a core to be added */ 934 core = brcmf_chip_add_core(ci, id, base, wrap); 935 if (IS_ERR(core)) 936 return PTR_ERR(core); 937 938 core->rev = rev; 939 } 940 941 return 0; 942 } 943 944 static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) 945 { 946 struct brcmf_core *core; 947 u32 regdata; 948 u32 socitype; 949 int ret; 950 951 /* Get CC core rev 952 * Chipid is assume to be at offset 0 from SI_ENUM_BASE 953 * For different chiptypes or old sdio hosts w/o chipcommon, 954 * other ways of recognition should be added here. 955 */ 956 regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid)); 957 ci->pub.chip = regdata & CID_ID_MASK; 958 ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; 959 socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; 960 961 brcmf_chip_name(ci->pub.chip, ci->pub.chiprev, 962 ci->pub.name, sizeof(ci->pub.name)); 963 brcmf_dbg(INFO, "found %s chip: %s\n", 964 socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name); 965 966 if (socitype == SOCI_SB) { 967 if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) { 968 brcmf_err("SB chip is not supported\n"); 969 return -ENODEV; 970 } 971 ci->iscoreup = brcmf_chip_sb_iscoreup; 972 ci->coredisable = brcmf_chip_sb_coredisable; 973 ci->resetcore = brcmf_chip_sb_resetcore; 974 975 core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON, 976 SI_ENUM_BASE, 0); 977 brcmf_chip_sb_corerev(ci, core); 978 core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV, 979 BCM4329_CORE_BUS_BASE, 0); 980 brcmf_chip_sb_corerev(ci, core); 981 core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM, 982 BCM4329_CORE_SOCRAM_BASE, 0); 983 brcmf_chip_sb_corerev(ci, core); 984 core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3, 985 BCM4329_CORE_ARM_BASE, 0); 986 brcmf_chip_sb_corerev(ci, core); 987 988 core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0); 989 brcmf_chip_sb_corerev(ci, core); 990 } else if (socitype == SOCI_AI) { 991 ci->iscoreup = brcmf_chip_ai_iscoreup; 992 ci->coredisable = brcmf_chip_ai_coredisable; 993 ci->resetcore = brcmf_chip_ai_resetcore; 994 995 brcmf_chip_dmp_erom_scan(ci); 996 } else { 997 brcmf_err("chip backplane type %u is not supported\n", 998 socitype); 999 return -ENODEV; 1000 } 1001 1002 ret = brcmf_chip_cores_check(ci); 1003 if (ret) 1004 return ret; 1005 1006 /* assure chip is passive for core access */ 1007 brcmf_chip_set_passive(&ci->pub); 1008 1009 /* Call bus specific reset function now. Cores have been determined 1010 * but further access may require a chip specific reset at this point. 1011 */ 1012 if (ci->ops->reset) { 1013 ci->ops->reset(ci->ctx, &ci->pub); 1014 brcmf_chip_set_passive(&ci->pub); 1015 } 1016 1017 return brcmf_chip_get_raminfo(&ci->pub); 1018 } 1019 1020 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) 1021 { 1022 struct brcmf_core *core; 1023 struct brcmf_core_priv *cpu; 1024 u32 val; 1025 1026 1027 core = brcmf_chip_get_core(&chip->pub, id); 1028 if (!core) 1029 return; 1030 1031 switch (id) { 1032 case BCMA_CORE_ARM_CM3: 1033 brcmf_chip_coredisable(core, 0, 0); 1034 break; 1035 case BCMA_CORE_ARM_CR4: 1036 case BCMA_CORE_ARM_CA7: 1037 cpu = container_of(core, struct brcmf_core_priv, pub); 1038 1039 /* clear all IOCTL bits except HALT bit */ 1040 val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL); 1041 val &= ARMCR4_BCMA_IOCTL_CPUHALT; 1042 brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT, 1043 ARMCR4_BCMA_IOCTL_CPUHALT); 1044 break; 1045 default: 1046 brcmf_err("unknown id: %u\n", id); 1047 break; 1048 } 1049 } 1050 1051 static int brcmf_chip_setup(struct brcmf_chip_priv *chip) 1052 { 1053 struct brcmf_chip *pub; 1054 struct brcmf_core_priv *cc; 1055 struct brcmf_core *pmu; 1056 u32 base; 1057 u32 val; 1058 int ret = 0; 1059 1060 pub = &chip->pub; 1061 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list); 1062 base = cc->pub.base; 1063 1064 /* get chipcommon capabilites */ 1065 pub->cc_caps = chip->ops->read32(chip->ctx, 1066 CORE_CC_REG(base, capabilities)); 1067 pub->cc_caps_ext = chip->ops->read32(chip->ctx, 1068 CORE_CC_REG(base, 1069 capabilities_ext)); 1070 1071 /* get pmu caps & rev */ 1072 pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */ 1073 if (pub->cc_caps & CC_CAP_PMU) { 1074 val = chip->ops->read32(chip->ctx, 1075 CORE_CC_REG(pmu->base, pmucapabilities)); 1076 pub->pmurev = val & PCAP_REV_MASK; 1077 pub->pmucaps = val; 1078 } 1079 1080 brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n", 1081 cc->pub.rev, pub->pmurev, pub->pmucaps); 1082 1083 /* execute bus core specific setup */ 1084 if (chip->ops->setup) 1085 ret = chip->ops->setup(chip->ctx, pub); 1086 1087 return ret; 1088 } 1089 1090 struct brcmf_chip *brcmf_chip_attach(void *ctx, 1091 const struct brcmf_buscore_ops *ops) 1092 { 1093 struct brcmf_chip_priv *chip; 1094 int err = 0; 1095 1096 if (WARN_ON(!ops->read32)) 1097 err = -EINVAL; 1098 if (WARN_ON(!ops->write32)) 1099 err = -EINVAL; 1100 if (WARN_ON(!ops->prepare)) 1101 err = -EINVAL; 1102 if (WARN_ON(!ops->activate)) 1103 err = -EINVAL; 1104 if (err < 0) 1105 return ERR_PTR(-EINVAL); 1106 1107 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1108 if (!chip) 1109 return ERR_PTR(-ENOMEM); 1110 1111 INIT_LIST_HEAD(&chip->cores); 1112 chip->num_cores = 0; 1113 chip->ops = ops; 1114 chip->ctx = ctx; 1115 1116 err = ops->prepare(ctx); 1117 if (err < 0) 1118 goto fail; 1119 1120 err = brcmf_chip_recognition(chip); 1121 if (err < 0) 1122 goto fail; 1123 1124 err = brcmf_chip_setup(chip); 1125 if (err < 0) 1126 goto fail; 1127 1128 return &chip->pub; 1129 1130 fail: 1131 brcmf_chip_detach(&chip->pub); 1132 return ERR_PTR(err); 1133 } 1134 1135 void brcmf_chip_detach(struct brcmf_chip *pub) 1136 { 1137 struct brcmf_chip_priv *chip; 1138 struct brcmf_core_priv *core; 1139 struct brcmf_core_priv *tmp; 1140 1141 chip = container_of(pub, struct brcmf_chip_priv, pub); 1142 list_for_each_entry_safe(core, tmp, &chip->cores, list) { 1143 list_del(&core->list); 1144 kfree(core); 1145 } 1146 kfree(chip); 1147 } 1148 1149 struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit) 1150 { 1151 struct brcmf_chip_priv *chip; 1152 struct brcmf_core_priv *core; 1153 1154 chip = container_of(pub, struct brcmf_chip_priv, pub); 1155 list_for_each_entry(core, &chip->cores, list) { 1156 if (core->pub.id == BCMA_CORE_80211) { 1157 if (unit-- == 0) 1158 return &core->pub; 1159 } 1160 } 1161 return NULL; 1162 } 1163 1164 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid) 1165 { 1166 struct brcmf_chip_priv *chip; 1167 struct brcmf_core_priv *core; 1168 1169 chip = container_of(pub, struct brcmf_chip_priv, pub); 1170 list_for_each_entry(core, &chip->cores, list) 1171 if (core->pub.id == coreid) 1172 return &core->pub; 1173 1174 return NULL; 1175 } 1176 1177 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub) 1178 { 1179 struct brcmf_chip_priv *chip; 1180 struct brcmf_core_priv *cc; 1181 1182 chip = container_of(pub, struct brcmf_chip_priv, pub); 1183 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list); 1184 if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON)) 1185 return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON); 1186 return &cc->pub; 1187 } 1188 1189 struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub) 1190 { 1191 struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub); 1192 struct brcmf_core *pmu; 1193 1194 /* See if there is separated PMU core available */ 1195 if (cc->rev >= 35 && 1196 pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { 1197 pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU); 1198 if (pmu) 1199 return pmu; 1200 } 1201 1202 /* Fallback to ChipCommon core for older hardware */ 1203 return cc; 1204 } 1205 1206 bool brcmf_chip_iscoreup(struct brcmf_core *pub) 1207 { 1208 struct brcmf_core_priv *core; 1209 1210 core = container_of(pub, struct brcmf_core_priv, pub); 1211 return core->chip->iscoreup(core); 1212 } 1213 1214 void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset) 1215 { 1216 struct brcmf_core_priv *core; 1217 1218 core = container_of(pub, struct brcmf_core_priv, pub); 1219 core->chip->coredisable(core, prereset, reset); 1220 } 1221 1222 void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset, 1223 u32 postreset) 1224 { 1225 struct brcmf_core_priv *core; 1226 1227 core = container_of(pub, struct brcmf_core_priv, pub); 1228 core->chip->resetcore(core, prereset, reset, postreset); 1229 } 1230 1231 static void 1232 brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip) 1233 { 1234 struct brcmf_core *core; 1235 struct brcmf_core_priv *sr; 1236 1237 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3); 1238 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1239 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1240 D11_BCMA_IOCTL_PHYCLOCKEN, 1241 D11_BCMA_IOCTL_PHYCLOCKEN, 1242 D11_BCMA_IOCTL_PHYCLOCKEN); 1243 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1244 brcmf_chip_resetcore(core, 0, 0, 0); 1245 1246 /* disable bank #3 remap for this device */ 1247 if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) { 1248 sr = container_of(core, struct brcmf_core_priv, pub); 1249 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3); 1250 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0); 1251 } 1252 } 1253 1254 static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip) 1255 { 1256 struct brcmf_core *core; 1257 1258 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1259 if (!brcmf_chip_iscoreup(core)) { 1260 brcmf_err("SOCRAM core is down after reset?\n"); 1261 return false; 1262 } 1263 1264 chip->ops->activate(chip->ctx, &chip->pub, 0); 1265 1266 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3); 1267 brcmf_chip_resetcore(core, 0, 0, 0); 1268 1269 return true; 1270 } 1271 1272 static inline void 1273 brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip) 1274 { 1275 struct brcmf_core *core; 1276 1277 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4); 1278 1279 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1280 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1281 D11_BCMA_IOCTL_PHYCLOCKEN, 1282 D11_BCMA_IOCTL_PHYCLOCKEN, 1283 D11_BCMA_IOCTL_PHYCLOCKEN); 1284 } 1285 1286 static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1287 { 1288 struct brcmf_core *core; 1289 1290 chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1291 1292 /* restore ARM */ 1293 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4); 1294 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0); 1295 1296 return true; 1297 } 1298 1299 static inline void 1300 brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip) 1301 { 1302 struct brcmf_core *core; 1303 1304 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7); 1305 1306 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1307 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1308 D11_BCMA_IOCTL_PHYCLOCKEN, 1309 D11_BCMA_IOCTL_PHYCLOCKEN, 1310 D11_BCMA_IOCTL_PHYCLOCKEN); 1311 } 1312 1313 static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1314 { 1315 struct brcmf_core *core; 1316 1317 chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1318 1319 /* restore ARM */ 1320 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7); 1321 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0); 1322 1323 return true; 1324 } 1325 1326 void brcmf_chip_set_passive(struct brcmf_chip *pub) 1327 { 1328 struct brcmf_chip_priv *chip; 1329 struct brcmf_core *arm; 1330 1331 brcmf_dbg(TRACE, "Enter\n"); 1332 1333 chip = container_of(pub, struct brcmf_chip_priv, pub); 1334 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1335 if (arm) { 1336 brcmf_chip_cr4_set_passive(chip); 1337 return; 1338 } 1339 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7); 1340 if (arm) { 1341 brcmf_chip_ca7_set_passive(chip); 1342 return; 1343 } 1344 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3); 1345 if (arm) { 1346 brcmf_chip_cm3_set_passive(chip); 1347 return; 1348 } 1349 } 1350 1351 bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec) 1352 { 1353 struct brcmf_chip_priv *chip; 1354 struct brcmf_core *arm; 1355 1356 brcmf_dbg(TRACE, "Enter\n"); 1357 1358 chip = container_of(pub, struct brcmf_chip_priv, pub); 1359 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1360 if (arm) 1361 return brcmf_chip_cr4_set_active(chip, rstvec); 1362 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7); 1363 if (arm) 1364 return brcmf_chip_ca7_set_active(chip, rstvec); 1365 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3); 1366 if (arm) 1367 return brcmf_chip_cm3_set_active(chip); 1368 1369 return false; 1370 } 1371 1372 bool brcmf_chip_sr_capable(struct brcmf_chip *pub) 1373 { 1374 u32 base, addr, reg, pmu_cc3_mask = ~0; 1375 struct brcmf_chip_priv *chip; 1376 struct brcmf_core *pmu = brcmf_chip_get_pmu(pub); 1377 1378 brcmf_dbg(TRACE, "Enter\n"); 1379 1380 /* old chips with PMU version less than 17 don't support save restore */ 1381 if (pub->pmurev < 17) 1382 return false; 1383 1384 base = brcmf_chip_get_chipcommon(pub)->base; 1385 chip = container_of(pub, struct brcmf_chip_priv, pub); 1386 1387 switch (pub->chip) { 1388 case BRCM_CC_4354_CHIP_ID: 1389 case BRCM_CC_4356_CHIP_ID: 1390 case BRCM_CC_4345_CHIP_ID: 1391 /* explicitly check SR engine enable bit */ 1392 pmu_cc3_mask = BIT(2); 1393 fallthrough; 1394 case BRCM_CC_43241_CHIP_ID: 1395 case BRCM_CC_4335_CHIP_ID: 1396 case BRCM_CC_4339_CHIP_ID: 1397 /* read PMU chipcontrol register 3 */ 1398 addr = CORE_CC_REG(pmu->base, chipcontrol_addr); 1399 chip->ops->write32(chip->ctx, addr, 3); 1400 addr = CORE_CC_REG(pmu->base, chipcontrol_data); 1401 reg = chip->ops->read32(chip->ctx, addr); 1402 return (reg & pmu_cc3_mask) != 0; 1403 case BRCM_CC_43430_CHIP_ID: 1404 addr = CORE_CC_REG(base, sr_control1); 1405 reg = chip->ops->read32(chip->ctx, addr); 1406 return reg != 0; 1407 case CY_CC_4373_CHIP_ID: 1408 /* explicitly check SR engine enable bit */ 1409 addr = CORE_CC_REG(base, sr_control0); 1410 reg = chip->ops->read32(chip->ctx, addr); 1411 return (reg & CC_SR_CTL0_ENABLE_MASK) != 0; 1412 case BRCM_CC_4359_CHIP_ID: 1413 case CY_CC_43012_CHIP_ID: 1414 addr = CORE_CC_REG(pmu->base, retention_ctl); 1415 reg = chip->ops->read32(chip->ctx, addr); 1416 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1417 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; 1418 default: 1419 addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); 1420 reg = chip->ops->read32(chip->ctx, addr); 1421 if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0) 1422 return false; 1423 1424 addr = CORE_CC_REG(pmu->base, retention_ctl); 1425 reg = chip->ops->read32(chip->ctx, addr); 1426 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1427 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; 1428 } 1429 } 1430