1 /* Copyright (c) 2014 Broadcom Corporation 2 * 3 * Permission to use, copy, modify, and/or distribute this software for any 4 * purpose with or without fee is hereby granted, provided that the above 5 * copyright notice and this permission notice appear in all copies. 6 * 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/firmware.h> 19 #include <linux/pci.h> 20 #include <linux/vmalloc.h> 21 #include <linux/delay.h> 22 #include <linux/interrupt.h> 23 #include <linux/bcma/bcma.h> 24 #include <linux/sched.h> 25 #include <asm/unaligned.h> 26 27 #include <soc.h> 28 #include <chipcommon.h> 29 #include <brcmu_utils.h> 30 #include <brcmu_wifi.h> 31 #include <brcm_hw_ids.h> 32 33 #include "debug.h" 34 #include "bus.h" 35 #include "commonring.h" 36 #include "msgbuf.h" 37 #include "pcie.h" 38 #include "firmware.h" 39 #include "chip.h" 40 #include "core.h" 41 #include "common.h" 42 43 44 enum brcmf_pcie_state { 45 BRCMFMAC_PCIE_STATE_DOWN, 46 BRCMFMAC_PCIE_STATE_UP 47 }; 48 49 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie"); 50 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie"); 51 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie"); 52 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie"); 53 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie"); 54 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie"); 55 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie"); 56 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie"); 57 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie"); 58 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie"); 59 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie"); 60 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie"); 61 62 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { 63 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), 64 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), 65 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), 66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), 67 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C), 68 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), 69 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), 70 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), 71 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), 72 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), 73 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), 74 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B), 75 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), 76 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), 77 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), 78 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C), 79 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), 80 }; 81 82 #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ 83 84 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024) 85 86 /* backplane addres space accessed by BAR0 */ 87 #define BRCMF_PCIE_BAR0_WINDOW 0x80 88 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000 89 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70 90 91 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000 92 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000 93 94 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40 95 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C 96 97 #define BRCMF_PCIE_REG_INTSTATUS 0x90 98 #define BRCMF_PCIE_REG_INTMASK 0x94 99 #define BRCMF_PCIE_REG_SBMBX 0x98 100 101 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC 102 103 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24 104 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48 105 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C 106 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120 107 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 108 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140 109 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144 110 111 #define BRCMF_PCIE2_INTA 0x01 112 #define BRCMF_PCIE2_INTB 0x02 113 114 #define BRCMF_PCIE_INT_0 0x01 115 #define BRCMF_PCIE_INT_1 0x02 116 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \ 117 BRCMF_PCIE_INT_1) 118 119 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100 120 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200 121 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000 122 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000 123 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000 124 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000 125 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000 126 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000 127 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000 128 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000 129 130 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \ 131 BRCMF_PCIE_MB_INT_D2H0_DB1 | \ 132 BRCMF_PCIE_MB_INT_D2H1_DB0 | \ 133 BRCMF_PCIE_MB_INT_D2H1_DB1 | \ 134 BRCMF_PCIE_MB_INT_D2H2_DB0 | \ 135 BRCMF_PCIE_MB_INT_D2H2_DB1 | \ 136 BRCMF_PCIE_MB_INT_D2H3_DB0 | \ 137 BRCMF_PCIE_MB_INT_D2H3_DB1) 138 139 #define BRCMF_PCIE_SHARED_VERSION_7 7 140 #define BRCMF_PCIE_MIN_SHARED_VERSION 5 141 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7 142 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF 143 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000 144 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000 145 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000 146 147 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 148 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 149 150 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34 151 #define BRCMF_SHARED_RING_BASE_OFFSET 52 152 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36 153 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20 154 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40 155 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44 156 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48 157 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52 158 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56 159 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64 160 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68 161 162 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0 163 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1 164 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4 165 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8 166 167 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8 168 #define BRCMF_RING_MAX_ITEM_OFFSET 4 169 #define BRCMF_RING_LEN_ITEMS_OFFSET 6 170 #define BRCMF_RING_MEM_SZ 16 171 #define BRCMF_RING_STATE_SZ 8 172 173 #define BRCMF_DEF_MAX_RXBUFPOST 255 174 175 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8 176 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12 177 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16 178 179 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8 180 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024 181 182 #define BRCMF_D2H_DEV_D3_ACK 0x00000001 183 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002 184 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004 185 186 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 187 #define BRCMF_H2D_HOST_DS_ACK 0x00000002 188 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 189 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010 190 191 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000) 192 193 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4 194 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C 195 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58 196 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C 197 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60 198 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64 199 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC 200 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC 201 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228 202 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248 203 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0 204 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 205 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 206 207 /* Magic number at a magic location to find RAM size */ 208 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */ 209 #define BRCMF_RAMSIZE_OFFSET 0x6c 210 211 212 struct brcmf_pcie_console { 213 u32 base_addr; 214 u32 buf_addr; 215 u32 bufsize; 216 u32 read_idx; 217 u8 log_str[256]; 218 u8 log_idx; 219 }; 220 221 struct brcmf_pcie_shared_info { 222 u32 tcm_base_address; 223 u32 flags; 224 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS]; 225 struct brcmf_pcie_ringbuf *flowrings; 226 u16 max_rxbufpost; 227 u16 max_flowrings; 228 u16 max_submissionrings; 229 u16 max_completionrings; 230 u32 rx_dataoffset; 231 u32 htod_mb_data_addr; 232 u32 dtoh_mb_data_addr; 233 u32 ring_info_addr; 234 struct brcmf_pcie_console console; 235 void *scratch; 236 dma_addr_t scratch_dmahandle; 237 void *ringupd; 238 dma_addr_t ringupd_dmahandle; 239 u8 version; 240 }; 241 242 struct brcmf_pcie_core_info { 243 u32 base; 244 u32 wrapbase; 245 }; 246 247 struct brcmf_pciedev_info { 248 enum brcmf_pcie_state state; 249 bool in_irq; 250 struct pci_dev *pdev; 251 char fw_name[BRCMF_FW_NAME_LEN]; 252 char nvram_name[BRCMF_FW_NAME_LEN]; 253 void __iomem *regs; 254 void __iomem *tcm; 255 u32 ram_base; 256 u32 ram_size; 257 struct brcmf_chip *ci; 258 u32 coreid; 259 struct brcmf_pcie_shared_info shared; 260 wait_queue_head_t mbdata_resp_wait; 261 bool mbdata_completed; 262 bool irq_allocated; 263 bool wowl_enabled; 264 u8 dma_idx_sz; 265 void *idxbuf; 266 u32 idxbuf_sz; 267 dma_addr_t idxbuf_dmahandle; 268 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset); 269 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 270 u16 value); 271 struct brcmf_mp_device *settings; 272 }; 273 274 struct brcmf_pcie_ringbuf { 275 struct brcmf_commonring commonring; 276 dma_addr_t dma_handle; 277 u32 w_idx_addr; 278 u32 r_idx_addr; 279 struct brcmf_pciedev_info *devinfo; 280 u8 id; 281 }; 282 283 /** 284 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info 285 * 286 * @ringmem: dongle memory pointer to ring memory location 287 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers 288 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers 289 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers 290 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers 291 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers 292 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers 293 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers 294 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers 295 * @max_flowrings: maximum number of tx flow rings supported. 296 * @max_submissionrings: maximum number of submission rings(h2d) supported. 297 * @max_completionrings: maximum number of completion rings(d2h) supported. 298 */ 299 struct brcmf_pcie_dhi_ringinfo { 300 __le32 ringmem; 301 __le32 h2d_w_idx_ptr; 302 __le32 h2d_r_idx_ptr; 303 __le32 d2h_w_idx_ptr; 304 __le32 d2h_r_idx_ptr; 305 struct msgbuf_buf_addr h2d_w_idx_hostaddr; 306 struct msgbuf_buf_addr h2d_r_idx_hostaddr; 307 struct msgbuf_buf_addr d2h_w_idx_hostaddr; 308 struct msgbuf_buf_addr d2h_r_idx_hostaddr; 309 __le16 max_flowrings; 310 __le16 max_submissionrings; 311 __le16 max_completionrings; 312 }; 313 314 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = { 315 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM, 316 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM, 317 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM, 318 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM, 319 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 320 }; 321 322 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = { 323 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, 324 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, 325 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, 326 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7, 327 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 328 }; 329 330 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { 331 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, 332 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, 333 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, 334 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE, 335 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 336 }; 337 338 339 static u32 340 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) 341 { 342 void __iomem *address = devinfo->regs + reg_offset; 343 344 return (ioread32(address)); 345 } 346 347 348 static void 349 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset, 350 u32 value) 351 { 352 void __iomem *address = devinfo->regs + reg_offset; 353 354 iowrite32(value, address); 355 } 356 357 358 static u8 359 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 360 { 361 void __iomem *address = devinfo->tcm + mem_offset; 362 363 return (ioread8(address)); 364 } 365 366 367 static u16 368 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 369 { 370 void __iomem *address = devinfo->tcm + mem_offset; 371 372 return (ioread16(address)); 373 } 374 375 376 static void 377 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 378 u16 value) 379 { 380 void __iomem *address = devinfo->tcm + mem_offset; 381 382 iowrite16(value, address); 383 } 384 385 386 static u16 387 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 388 { 389 u16 *address = devinfo->idxbuf + mem_offset; 390 391 return (*(address)); 392 } 393 394 395 static void 396 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 397 u16 value) 398 { 399 u16 *address = devinfo->idxbuf + mem_offset; 400 401 *(address) = value; 402 } 403 404 405 static u32 406 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 407 { 408 void __iomem *address = devinfo->tcm + mem_offset; 409 410 return (ioread32(address)); 411 } 412 413 414 static void 415 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 416 u32 value) 417 { 418 void __iomem *address = devinfo->tcm + mem_offset; 419 420 iowrite32(value, address); 421 } 422 423 424 static u32 425 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 426 { 427 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; 428 429 return (ioread32(addr)); 430 } 431 432 433 static void 434 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 435 u32 value) 436 { 437 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; 438 439 iowrite32(value, addr); 440 } 441 442 443 static void 444 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 445 void *srcaddr, u32 len) 446 { 447 void __iomem *address = devinfo->tcm + mem_offset; 448 __le32 *src32; 449 __le16 *src16; 450 u8 *src8; 451 452 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { 453 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { 454 src8 = (u8 *)srcaddr; 455 while (len) { 456 iowrite8(*src8, address); 457 address++; 458 src8++; 459 len--; 460 } 461 } else { 462 len = len / 2; 463 src16 = (__le16 *)srcaddr; 464 while (len) { 465 iowrite16(le16_to_cpu(*src16), address); 466 address += 2; 467 src16++; 468 len--; 469 } 470 } 471 } else { 472 len = len / 4; 473 src32 = (__le32 *)srcaddr; 474 while (len) { 475 iowrite32(le32_to_cpu(*src32), address); 476 address += 4; 477 src32++; 478 len--; 479 } 480 } 481 } 482 483 484 static void 485 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 486 void *dstaddr, u32 len) 487 { 488 void __iomem *address = devinfo->tcm + mem_offset; 489 __le32 *dst32; 490 __le16 *dst16; 491 u8 *dst8; 492 493 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) { 494 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) { 495 dst8 = (u8 *)dstaddr; 496 while (len) { 497 *dst8 = ioread8(address); 498 address++; 499 dst8++; 500 len--; 501 } 502 } else { 503 len = len / 2; 504 dst16 = (__le16 *)dstaddr; 505 while (len) { 506 *dst16 = cpu_to_le16(ioread16(address)); 507 address += 2; 508 dst16++; 509 len--; 510 } 511 } 512 } else { 513 len = len / 4; 514 dst32 = (__le32 *)dstaddr; 515 while (len) { 516 *dst32 = cpu_to_le32(ioread32(address)); 517 address += 4; 518 dst32++; 519 len--; 520 } 521 } 522 } 523 524 525 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \ 526 CHIPCREGOFFS(reg), value) 527 528 529 static void 530 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid) 531 { 532 const struct pci_dev *pdev = devinfo->pdev; 533 struct brcmf_core *core; 534 u32 bar0_win; 535 536 core = brcmf_chip_get_core(devinfo->ci, coreid); 537 if (core) { 538 bar0_win = core->base; 539 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win); 540 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, 541 &bar0_win) == 0) { 542 if (bar0_win != core->base) { 543 bar0_win = core->base; 544 pci_write_config_dword(pdev, 545 BRCMF_PCIE_BAR0_WINDOW, 546 bar0_win); 547 } 548 } 549 } else { 550 brcmf_err("Unsupported core selected %x\n", coreid); 551 } 552 } 553 554 555 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo) 556 { 557 struct brcmf_core *core; 558 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD, 559 BRCMF_PCIE_CFGREG_PM_CSR, 560 BRCMF_PCIE_CFGREG_MSI_CAP, 561 BRCMF_PCIE_CFGREG_MSI_ADDR_L, 562 BRCMF_PCIE_CFGREG_MSI_ADDR_H, 563 BRCMF_PCIE_CFGREG_MSI_DATA, 564 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2, 565 BRCMF_PCIE_CFGREG_RBAR_CTRL, 566 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1, 567 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG, 568 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG }; 569 u32 i; 570 u32 val; 571 u32 lsc; 572 573 if (!devinfo->ci) 574 return; 575 576 /* Disable ASPM */ 577 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 578 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL, 579 &lsc); 580 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); 581 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL, 582 val); 583 584 /* Watchdog reset */ 585 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON); 586 WRITECC32(devinfo, watchdog, 4); 587 msleep(100); 588 589 /* Restore ASPM */ 590 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 591 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL, 592 lsc); 593 594 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2); 595 if (core->rev <= 13) { 596 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { 597 brcmf_pcie_write_reg32(devinfo, 598 BRCMF_PCIE_PCIE2REG_CONFIGADDR, 599 cfg_offset[i]); 600 val = brcmf_pcie_read_reg32(devinfo, 601 BRCMF_PCIE_PCIE2REG_CONFIGDATA); 602 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", 603 cfg_offset[i], val); 604 brcmf_pcie_write_reg32(devinfo, 605 BRCMF_PCIE_PCIE2REG_CONFIGDATA, 606 val); 607 } 608 } 609 } 610 611 612 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo) 613 { 614 u32 config; 615 616 /* BAR1 window may not be sized properly */ 617 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 618 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); 619 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); 620 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config); 621 622 device_wakeup_enable(&devinfo->pdev->dev); 623 } 624 625 626 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo) 627 { 628 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { 629 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4); 630 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, 631 5); 632 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, 633 0); 634 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, 635 7); 636 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, 637 0); 638 } 639 return 0; 640 } 641 642 643 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, 644 u32 resetintr) 645 { 646 struct brcmf_core *core; 647 648 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { 649 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM); 650 brcmf_chip_resetcore(core, 0, 0, 0); 651 } 652 653 if (!brcmf_chip_set_active(devinfo->ci, resetintr)) 654 return -EINVAL; 655 return 0; 656 } 657 658 659 static int 660 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data) 661 { 662 struct brcmf_pcie_shared_info *shared; 663 u32 addr; 664 u32 cur_htod_mb_data; 665 u32 i; 666 667 shared = &devinfo->shared; 668 addr = shared->htod_mb_data_addr; 669 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); 670 671 if (cur_htod_mb_data != 0) 672 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n", 673 cur_htod_mb_data); 674 675 i = 0; 676 while (cur_htod_mb_data != 0) { 677 msleep(10); 678 i++; 679 if (i > 100) 680 return -EIO; 681 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); 682 } 683 684 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data); 685 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); 686 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); 687 688 return 0; 689 } 690 691 692 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) 693 { 694 struct brcmf_pcie_shared_info *shared; 695 u32 addr; 696 u32 dtoh_mb_data; 697 698 shared = &devinfo->shared; 699 addr = shared->dtoh_mb_data_addr; 700 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); 701 702 if (!dtoh_mb_data) 703 return; 704 705 brcmf_pcie_write_tcm32(devinfo, addr, 0); 706 707 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data); 708 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) { 709 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n"); 710 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK); 711 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n"); 712 } 713 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) 714 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); 715 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) { 716 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); 717 devinfo->mbdata_completed = true; 718 wake_up(&devinfo->mbdata_resp_wait); 719 } 720 } 721 722 723 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo) 724 { 725 struct brcmf_pcie_shared_info *shared; 726 struct brcmf_pcie_console *console; 727 u32 addr; 728 729 shared = &devinfo->shared; 730 console = &shared->console; 731 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET; 732 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr); 733 734 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET; 735 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr); 736 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET; 737 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr); 738 739 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n", 740 console->base_addr, console->buf_addr, console->bufsize); 741 } 742 743 744 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo) 745 { 746 struct brcmf_pcie_console *console; 747 u32 addr; 748 u8 ch; 749 u32 newidx; 750 751 if (!BRCMF_FWCON_ON()) 752 return; 753 754 console = &devinfo->shared.console; 755 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET; 756 newidx = brcmf_pcie_read_tcm32(devinfo, addr); 757 while (newidx != console->read_idx) { 758 addr = console->buf_addr + console->read_idx; 759 ch = brcmf_pcie_read_tcm8(devinfo, addr); 760 console->read_idx++; 761 if (console->read_idx == console->bufsize) 762 console->read_idx = 0; 763 if (ch == '\r') 764 continue; 765 console->log_str[console->log_idx] = ch; 766 console->log_idx++; 767 if ((ch != '\n') && 768 (console->log_idx == (sizeof(console->log_str) - 2))) { 769 ch = '\n'; 770 console->log_str[console->log_idx] = ch; 771 console->log_idx++; 772 } 773 if (ch == '\n') { 774 console->log_str[console->log_idx] = 0; 775 pr_debug("CONSOLE: %s", console->log_str); 776 console->log_idx = 0; 777 } 778 } 779 } 780 781 782 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo) 783 { 784 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0); 785 } 786 787 788 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) 789 { 790 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 791 BRCMF_PCIE_MB_INT_D2H_DB | 792 BRCMF_PCIE_MB_INT_FN0_0 | 793 BRCMF_PCIE_MB_INT_FN0_1); 794 } 795 796 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo) 797 { 798 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1) 799 brcmf_pcie_write_reg32(devinfo, 800 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1); 801 } 802 803 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg) 804 { 805 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; 806 807 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) { 808 brcmf_pcie_intr_disable(devinfo); 809 brcmf_dbg(PCIE, "Enter\n"); 810 return IRQ_WAKE_THREAD; 811 } 812 return IRQ_NONE; 813 } 814 815 816 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg) 817 { 818 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; 819 u32 status; 820 821 devinfo->in_irq = true; 822 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); 823 brcmf_dbg(PCIE, "Enter %x\n", status); 824 if (status) { 825 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, 826 status); 827 if (status & (BRCMF_PCIE_MB_INT_FN0_0 | 828 BRCMF_PCIE_MB_INT_FN0_1)) 829 brcmf_pcie_handle_mb_data(devinfo); 830 if (status & BRCMF_PCIE_MB_INT_D2H_DB) { 831 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) 832 brcmf_proto_msgbuf_rx_trigger( 833 &devinfo->pdev->dev); 834 } 835 } 836 brcmf_pcie_bus_console_read(devinfo); 837 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) 838 brcmf_pcie_intr_enable(devinfo); 839 devinfo->in_irq = false; 840 return IRQ_HANDLED; 841 } 842 843 844 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) 845 { 846 struct pci_dev *pdev; 847 848 pdev = devinfo->pdev; 849 850 brcmf_pcie_intr_disable(devinfo); 851 852 brcmf_dbg(PCIE, "Enter\n"); 853 854 pci_enable_msi(pdev); 855 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr, 856 brcmf_pcie_isr_thread, IRQF_SHARED, 857 "brcmf_pcie_intr", devinfo)) { 858 pci_disable_msi(pdev); 859 brcmf_err("Failed to request IRQ %d\n", pdev->irq); 860 return -EIO; 861 } 862 devinfo->irq_allocated = true; 863 return 0; 864 } 865 866 867 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) 868 { 869 struct pci_dev *pdev; 870 u32 status; 871 u32 count; 872 873 if (!devinfo->irq_allocated) 874 return; 875 876 pdev = devinfo->pdev; 877 878 brcmf_pcie_intr_disable(devinfo); 879 free_irq(pdev->irq, devinfo); 880 pci_disable_msi(pdev); 881 882 msleep(50); 883 count = 0; 884 while ((devinfo->in_irq) && (count < 20)) { 885 msleep(50); 886 count++; 887 } 888 if (devinfo->in_irq) 889 brcmf_err("Still in IRQ (processing) !!!\n"); 890 891 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); 892 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status); 893 894 devinfo->irq_allocated = false; 895 } 896 897 898 static int brcmf_pcie_ring_mb_write_rptr(void *ctx) 899 { 900 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 901 struct brcmf_pciedev_info *devinfo = ring->devinfo; 902 struct brcmf_commonring *commonring = &ring->commonring; 903 904 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 905 return -EIO; 906 907 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, 908 commonring->w_ptr, ring->id); 909 910 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr); 911 912 return 0; 913 } 914 915 916 static int brcmf_pcie_ring_mb_write_wptr(void *ctx) 917 { 918 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 919 struct brcmf_pciedev_info *devinfo = ring->devinfo; 920 struct brcmf_commonring *commonring = &ring->commonring; 921 922 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 923 return -EIO; 924 925 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, 926 commonring->r_ptr, ring->id); 927 928 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr); 929 930 return 0; 931 } 932 933 934 static int brcmf_pcie_ring_mb_ring_bell(void *ctx) 935 { 936 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 937 struct brcmf_pciedev_info *devinfo = ring->devinfo; 938 939 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 940 return -EIO; 941 942 brcmf_dbg(PCIE, "RING !\n"); 943 /* Any arbitrary value will do, lets use 1 */ 944 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1); 945 946 return 0; 947 } 948 949 950 static int brcmf_pcie_ring_mb_update_rptr(void *ctx) 951 { 952 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 953 struct brcmf_pciedev_info *devinfo = ring->devinfo; 954 struct brcmf_commonring *commonring = &ring->commonring; 955 956 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 957 return -EIO; 958 959 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr); 960 961 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, 962 commonring->w_ptr, ring->id); 963 964 return 0; 965 } 966 967 968 static int brcmf_pcie_ring_mb_update_wptr(void *ctx) 969 { 970 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 971 struct brcmf_pciedev_info *devinfo = ring->devinfo; 972 struct brcmf_commonring *commonring = &ring->commonring; 973 974 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 975 return -EIO; 976 977 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr); 978 979 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, 980 commonring->r_ptr, ring->id); 981 982 return 0; 983 } 984 985 986 static void * 987 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo, 988 u32 size, u32 tcm_dma_phys_addr, 989 dma_addr_t *dma_handle) 990 { 991 void *ring; 992 u64 address; 993 994 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, 995 GFP_KERNEL); 996 if (!ring) 997 return NULL; 998 999 address = (u64)*dma_handle; 1000 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, 1001 address & 0xffffffff); 1002 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); 1003 1004 memset(ring, 0, size); 1005 1006 return (ring); 1007 } 1008 1009 1010 static struct brcmf_pcie_ringbuf * 1011 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, 1012 u32 tcm_ring_phys_addr) 1013 { 1014 void *dma_buf; 1015 dma_addr_t dma_handle; 1016 struct brcmf_pcie_ringbuf *ring; 1017 u32 size; 1018 u32 addr; 1019 const u32 *ring_itemsize_array; 1020 1021 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7) 1022 ring_itemsize_array = brcmf_ring_itemsize_pre_v7; 1023 else 1024 ring_itemsize_array = brcmf_ring_itemsize; 1025 1026 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id]; 1027 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size, 1028 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET, 1029 &dma_handle); 1030 if (!dma_buf) 1031 return NULL; 1032 1033 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET; 1034 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]); 1035 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET; 1036 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]); 1037 1038 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1039 if (!ring) { 1040 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf, 1041 dma_handle); 1042 return NULL; 1043 } 1044 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id], 1045 ring_itemsize_array[ring_id], dma_buf); 1046 ring->dma_handle = dma_handle; 1047 ring->devinfo = devinfo; 1048 brcmf_commonring_register_cb(&ring->commonring, 1049 brcmf_pcie_ring_mb_ring_bell, 1050 brcmf_pcie_ring_mb_update_rptr, 1051 brcmf_pcie_ring_mb_update_wptr, 1052 brcmf_pcie_ring_mb_write_rptr, 1053 brcmf_pcie_ring_mb_write_wptr, ring); 1054 1055 return (ring); 1056 } 1057 1058 1059 static void brcmf_pcie_release_ringbuffer(struct device *dev, 1060 struct brcmf_pcie_ringbuf *ring) 1061 { 1062 void *dma_buf; 1063 u32 size; 1064 1065 if (!ring) 1066 return; 1067 1068 dma_buf = ring->commonring.buf_addr; 1069 if (dma_buf) { 1070 size = ring->commonring.depth * ring->commonring.item_len; 1071 dma_free_coherent(dev, size, dma_buf, ring->dma_handle); 1072 } 1073 kfree(ring); 1074 } 1075 1076 1077 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) 1078 { 1079 u32 i; 1080 1081 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) { 1082 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev, 1083 devinfo->shared.commonrings[i]); 1084 devinfo->shared.commonrings[i] = NULL; 1085 } 1086 kfree(devinfo->shared.flowrings); 1087 devinfo->shared.flowrings = NULL; 1088 if (devinfo->idxbuf) { 1089 dma_free_coherent(&devinfo->pdev->dev, 1090 devinfo->idxbuf_sz, 1091 devinfo->idxbuf, 1092 devinfo->idxbuf_dmahandle); 1093 devinfo->idxbuf = NULL; 1094 } 1095 } 1096 1097 1098 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) 1099 { 1100 struct brcmf_pcie_ringbuf *ring; 1101 struct brcmf_pcie_ringbuf *rings; 1102 u32 d2h_w_idx_ptr; 1103 u32 d2h_r_idx_ptr; 1104 u32 h2d_w_idx_ptr; 1105 u32 h2d_r_idx_ptr; 1106 u32 ring_mem_ptr; 1107 u32 i; 1108 u64 address; 1109 u32 bufsz; 1110 u8 idx_offset; 1111 struct brcmf_pcie_dhi_ringinfo ringinfo; 1112 u16 max_flowrings; 1113 u16 max_submissionrings; 1114 u16 max_completionrings; 1115 1116 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr, 1117 sizeof(ringinfo)); 1118 if (devinfo->shared.version >= 6) { 1119 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings); 1120 max_flowrings = le16_to_cpu(ringinfo.max_flowrings); 1121 max_completionrings = le16_to_cpu(ringinfo.max_completionrings); 1122 } else { 1123 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings); 1124 max_flowrings = max_submissionrings - 1125 BRCMF_NROF_H2D_COMMON_MSGRINGS; 1126 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS; 1127 } 1128 1129 if (devinfo->dma_idx_sz != 0) { 1130 bufsz = (max_submissionrings + max_completionrings) * 1131 devinfo->dma_idx_sz * 2; 1132 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz, 1133 &devinfo->idxbuf_dmahandle, 1134 GFP_KERNEL); 1135 if (!devinfo->idxbuf) 1136 devinfo->dma_idx_sz = 0; 1137 } 1138 1139 if (devinfo->dma_idx_sz == 0) { 1140 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr); 1141 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr); 1142 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr); 1143 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr); 1144 idx_offset = sizeof(u32); 1145 devinfo->write_ptr = brcmf_pcie_write_tcm16; 1146 devinfo->read_ptr = brcmf_pcie_read_tcm16; 1147 brcmf_dbg(PCIE, "Using TCM indices\n"); 1148 } else { 1149 memset(devinfo->idxbuf, 0, bufsz); 1150 devinfo->idxbuf_sz = bufsz; 1151 idx_offset = devinfo->dma_idx_sz; 1152 devinfo->write_ptr = brcmf_pcie_write_idx; 1153 devinfo->read_ptr = brcmf_pcie_read_idx; 1154 1155 h2d_w_idx_ptr = 0; 1156 address = (u64)devinfo->idxbuf_dmahandle; 1157 ringinfo.h2d_w_idx_hostaddr.low_addr = 1158 cpu_to_le32(address & 0xffffffff); 1159 ringinfo.h2d_w_idx_hostaddr.high_addr = 1160 cpu_to_le32(address >> 32); 1161 1162 h2d_r_idx_ptr = h2d_w_idx_ptr + 1163 max_submissionrings * idx_offset; 1164 address += max_submissionrings * idx_offset; 1165 ringinfo.h2d_r_idx_hostaddr.low_addr = 1166 cpu_to_le32(address & 0xffffffff); 1167 ringinfo.h2d_r_idx_hostaddr.high_addr = 1168 cpu_to_le32(address >> 32); 1169 1170 d2h_w_idx_ptr = h2d_r_idx_ptr + 1171 max_submissionrings * idx_offset; 1172 address += max_submissionrings * idx_offset; 1173 ringinfo.d2h_w_idx_hostaddr.low_addr = 1174 cpu_to_le32(address & 0xffffffff); 1175 ringinfo.d2h_w_idx_hostaddr.high_addr = 1176 cpu_to_le32(address >> 32); 1177 1178 d2h_r_idx_ptr = d2h_w_idx_ptr + 1179 max_completionrings * idx_offset; 1180 address += max_completionrings * idx_offset; 1181 ringinfo.d2h_r_idx_hostaddr.low_addr = 1182 cpu_to_le32(address & 0xffffffff); 1183 ringinfo.d2h_r_idx_hostaddr.high_addr = 1184 cpu_to_le32(address >> 32); 1185 1186 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr, 1187 &ringinfo, sizeof(ringinfo)); 1188 brcmf_dbg(PCIE, "Using host memory indices\n"); 1189 } 1190 1191 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem); 1192 1193 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) { 1194 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); 1195 if (!ring) 1196 goto fail; 1197 ring->w_idx_addr = h2d_w_idx_ptr; 1198 ring->r_idx_addr = h2d_r_idx_ptr; 1199 ring->id = i; 1200 devinfo->shared.commonrings[i] = ring; 1201 1202 h2d_w_idx_ptr += idx_offset; 1203 h2d_r_idx_ptr += idx_offset; 1204 ring_mem_ptr += BRCMF_RING_MEM_SZ; 1205 } 1206 1207 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS; 1208 i < BRCMF_NROF_COMMON_MSGRINGS; i++) { 1209 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); 1210 if (!ring) 1211 goto fail; 1212 ring->w_idx_addr = d2h_w_idx_ptr; 1213 ring->r_idx_addr = d2h_r_idx_ptr; 1214 ring->id = i; 1215 devinfo->shared.commonrings[i] = ring; 1216 1217 d2h_w_idx_ptr += idx_offset; 1218 d2h_r_idx_ptr += idx_offset; 1219 ring_mem_ptr += BRCMF_RING_MEM_SZ; 1220 } 1221 1222 devinfo->shared.max_flowrings = max_flowrings; 1223 devinfo->shared.max_submissionrings = max_submissionrings; 1224 devinfo->shared.max_completionrings = max_completionrings; 1225 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL); 1226 if (!rings) 1227 goto fail; 1228 1229 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings); 1230 1231 for (i = 0; i < max_flowrings; i++) { 1232 ring = &rings[i]; 1233 ring->devinfo = devinfo; 1234 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1235 brcmf_commonring_register_cb(&ring->commonring, 1236 brcmf_pcie_ring_mb_ring_bell, 1237 brcmf_pcie_ring_mb_update_rptr, 1238 brcmf_pcie_ring_mb_update_wptr, 1239 brcmf_pcie_ring_mb_write_rptr, 1240 brcmf_pcie_ring_mb_write_wptr, 1241 ring); 1242 ring->w_idx_addr = h2d_w_idx_ptr; 1243 ring->r_idx_addr = h2d_r_idx_ptr; 1244 h2d_w_idx_ptr += idx_offset; 1245 h2d_r_idx_ptr += idx_offset; 1246 } 1247 devinfo->shared.flowrings = rings; 1248 1249 return 0; 1250 1251 fail: 1252 brcmf_err("Allocating ring buffers failed\n"); 1253 brcmf_pcie_release_ringbuffers(devinfo); 1254 return -ENOMEM; 1255 } 1256 1257 1258 static void 1259 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo) 1260 { 1261 if (devinfo->shared.scratch) 1262 dma_free_coherent(&devinfo->pdev->dev, 1263 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1264 devinfo->shared.scratch, 1265 devinfo->shared.scratch_dmahandle); 1266 if (devinfo->shared.ringupd) 1267 dma_free_coherent(&devinfo->pdev->dev, 1268 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1269 devinfo->shared.ringupd, 1270 devinfo->shared.ringupd_dmahandle); 1271 } 1272 1273 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) 1274 { 1275 u64 address; 1276 u32 addr; 1277 1278 devinfo->shared.scratch = 1279 dma_zalloc_coherent(&devinfo->pdev->dev, 1280 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1281 &devinfo->shared.scratch_dmahandle, 1282 GFP_KERNEL); 1283 if (!devinfo->shared.scratch) 1284 goto fail; 1285 1286 addr = devinfo->shared.tcm_base_address + 1287 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; 1288 address = (u64)devinfo->shared.scratch_dmahandle; 1289 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); 1290 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); 1291 addr = devinfo->shared.tcm_base_address + 1292 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET; 1293 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1294 1295 devinfo->shared.ringupd = 1296 dma_zalloc_coherent(&devinfo->pdev->dev, 1297 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1298 &devinfo->shared.ringupd_dmahandle, 1299 GFP_KERNEL); 1300 if (!devinfo->shared.ringupd) 1301 goto fail; 1302 1303 addr = devinfo->shared.tcm_base_address + 1304 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; 1305 address = (u64)devinfo->shared.ringupd_dmahandle; 1306 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); 1307 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); 1308 addr = devinfo->shared.tcm_base_address + 1309 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET; 1310 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); 1311 return 0; 1312 1313 fail: 1314 brcmf_err("Allocating scratch buffers failed\n"); 1315 brcmf_pcie_release_scratchbuffers(devinfo); 1316 return -ENOMEM; 1317 } 1318 1319 1320 static void brcmf_pcie_down(struct device *dev) 1321 { 1322 } 1323 1324 1325 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) 1326 { 1327 return 0; 1328 } 1329 1330 1331 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg, 1332 uint len) 1333 { 1334 return 0; 1335 } 1336 1337 1338 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, 1339 uint len) 1340 { 1341 return 0; 1342 } 1343 1344 1345 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled) 1346 { 1347 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1348 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; 1349 struct brcmf_pciedev_info *devinfo = buspub->devinfo; 1350 1351 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); 1352 devinfo->wowl_enabled = enabled; 1353 } 1354 1355 1356 static size_t brcmf_pcie_get_ramsize(struct device *dev) 1357 { 1358 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1359 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; 1360 struct brcmf_pciedev_info *devinfo = buspub->devinfo; 1361 1362 return devinfo->ci->ramsize - devinfo->ci->srsize; 1363 } 1364 1365 1366 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len) 1367 { 1368 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1369 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; 1370 struct brcmf_pciedev_info *devinfo = buspub->devinfo; 1371 1372 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len); 1373 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len); 1374 return 0; 1375 } 1376 1377 static 1378 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name) 1379 { 1380 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1381 struct brcmf_fw_request *fwreq; 1382 struct brcmf_fw_name fwnames[] = { 1383 { ext, fw_name }, 1384 }; 1385 1386 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev, 1387 brcmf_pcie_fwnames, 1388 ARRAY_SIZE(brcmf_pcie_fwnames), 1389 fwnames, ARRAY_SIZE(fwnames)); 1390 if (!fwreq) 1391 return -ENOMEM; 1392 1393 kfree(fwreq); 1394 return 0; 1395 } 1396 1397 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { 1398 .txdata = brcmf_pcie_tx, 1399 .stop = brcmf_pcie_down, 1400 .txctl = brcmf_pcie_tx_ctlpkt, 1401 .rxctl = brcmf_pcie_rx_ctlpkt, 1402 .wowl_config = brcmf_pcie_wowl_config, 1403 .get_ramsize = brcmf_pcie_get_ramsize, 1404 .get_memdump = brcmf_pcie_get_memdump, 1405 .get_fwname = brcmf_pcie_get_fwname, 1406 }; 1407 1408 1409 static void 1410 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data, 1411 u32 data_len) 1412 { 1413 __le32 *field; 1414 u32 newsize; 1415 1416 if (data_len < BRCMF_RAMSIZE_OFFSET + 8) 1417 return; 1418 1419 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET]; 1420 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC) 1421 return; 1422 field++; 1423 newsize = le32_to_cpup(field); 1424 1425 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n", 1426 newsize); 1427 devinfo->ci->ramsize = newsize; 1428 } 1429 1430 1431 static int 1432 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, 1433 u32 sharedram_addr) 1434 { 1435 struct brcmf_pcie_shared_info *shared; 1436 u32 addr; 1437 1438 shared = &devinfo->shared; 1439 shared->tcm_base_address = sharedram_addr; 1440 1441 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr); 1442 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK); 1443 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version); 1444 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) || 1445 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) { 1446 brcmf_err("Unsupported PCIE version %d\n", shared->version); 1447 return -EINVAL; 1448 } 1449 1450 /* check firmware support dma indicies */ 1451 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) { 1452 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX) 1453 devinfo->dma_idx_sz = sizeof(u16); 1454 else 1455 devinfo->dma_idx_sz = sizeof(u32); 1456 } 1457 1458 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; 1459 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr); 1460 if (shared->max_rxbufpost == 0) 1461 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST; 1462 1463 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET; 1464 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr); 1465 1466 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET; 1467 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); 1468 1469 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET; 1470 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); 1471 1472 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET; 1473 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr); 1474 1475 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n", 1476 shared->max_rxbufpost, shared->rx_dataoffset); 1477 1478 brcmf_pcie_bus_console_init(devinfo); 1479 1480 return 0; 1481 } 1482 1483 1484 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, 1485 const struct firmware *fw, void *nvram, 1486 u32 nvram_len) 1487 { 1488 u32 sharedram_addr; 1489 u32 sharedram_addr_written; 1490 u32 loop_counter; 1491 int err; 1492 u32 address; 1493 u32 resetintr; 1494 1495 brcmf_dbg(PCIE, "Halt ARM.\n"); 1496 err = brcmf_pcie_enter_download_state(devinfo); 1497 if (err) 1498 return err; 1499 1500 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); 1501 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, 1502 (void *)fw->data, fw->size); 1503 1504 resetintr = get_unaligned_le32(fw->data); 1505 release_firmware(fw); 1506 1507 /* reset last 4 bytes of RAM address. to be used for shared 1508 * area. This identifies when FW is running 1509 */ 1510 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0); 1511 1512 if (nvram) { 1513 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); 1514 address = devinfo->ci->rambase + devinfo->ci->ramsize - 1515 nvram_len; 1516 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); 1517 brcmf_fw_nvram_free(nvram); 1518 } else { 1519 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", 1520 devinfo->nvram_name); 1521 } 1522 1523 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo, 1524 devinfo->ci->ramsize - 1525 4); 1526 brcmf_dbg(PCIE, "Bring ARM in running state\n"); 1527 err = brcmf_pcie_exit_download_state(devinfo, resetintr); 1528 if (err) 1529 return err; 1530 1531 brcmf_dbg(PCIE, "Wait for FW init\n"); 1532 sharedram_addr = sharedram_addr_written; 1533 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50; 1534 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) { 1535 msleep(50); 1536 sharedram_addr = brcmf_pcie_read_ram32(devinfo, 1537 devinfo->ci->ramsize - 1538 4); 1539 loop_counter--; 1540 } 1541 if (sharedram_addr == sharedram_addr_written) { 1542 brcmf_err("FW failed to initialize\n"); 1543 return -ENODEV; 1544 } 1545 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); 1546 1547 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr)); 1548 } 1549 1550 1551 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) 1552 { 1553 struct pci_dev *pdev; 1554 int err; 1555 phys_addr_t bar0_addr, bar1_addr; 1556 ulong bar1_size; 1557 1558 pdev = devinfo->pdev; 1559 1560 err = pci_enable_device(pdev); 1561 if (err) { 1562 brcmf_err("pci_enable_device failed err=%d\n", err); 1563 return err; 1564 } 1565 1566 pci_set_master(pdev); 1567 1568 /* Bar-0 mapped address */ 1569 bar0_addr = pci_resource_start(pdev, 0); 1570 /* Bar-1 mapped address */ 1571 bar1_addr = pci_resource_start(pdev, 2); 1572 /* read Bar-1 mapped memory range */ 1573 bar1_size = pci_resource_len(pdev, 2); 1574 if ((bar1_size == 0) || (bar1_addr == 0)) { 1575 brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n", 1576 bar1_size, (unsigned long long)bar1_addr); 1577 return -EINVAL; 1578 } 1579 1580 devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); 1581 devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size); 1582 1583 if (!devinfo->regs || !devinfo->tcm) { 1584 brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs, 1585 devinfo->tcm); 1586 return -EINVAL; 1587 } 1588 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", 1589 devinfo->regs, (unsigned long long)bar0_addr); 1590 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n", 1591 devinfo->tcm, (unsigned long long)bar1_addr, 1592 (unsigned int)bar1_size); 1593 1594 return 0; 1595 } 1596 1597 1598 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo) 1599 { 1600 if (devinfo->tcm) 1601 iounmap(devinfo->tcm); 1602 if (devinfo->regs) 1603 iounmap(devinfo->regs); 1604 1605 pci_disable_device(devinfo->pdev); 1606 } 1607 1608 1609 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) 1610 { 1611 u32 ret_addr; 1612 1613 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1); 1614 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1); 1615 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr); 1616 1617 return ret_addr; 1618 } 1619 1620 1621 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr) 1622 { 1623 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1624 1625 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); 1626 return brcmf_pcie_read_reg32(devinfo, addr); 1627 } 1628 1629 1630 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value) 1631 { 1632 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1633 1634 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); 1635 brcmf_pcie_write_reg32(devinfo, addr, value); 1636 } 1637 1638 1639 static int brcmf_pcie_buscoreprep(void *ctx) 1640 { 1641 return brcmf_pcie_get_resource(ctx); 1642 } 1643 1644 1645 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip) 1646 { 1647 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1648 u32 val; 1649 1650 devinfo->ci = chip; 1651 brcmf_pcie_reset_device(devinfo); 1652 1653 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); 1654 if (val != 0xffffffff) 1655 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, 1656 val); 1657 1658 return 0; 1659 } 1660 1661 1662 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip, 1663 u32 rstvec) 1664 { 1665 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1666 1667 brcmf_pcie_write_tcm32(devinfo, 0, rstvec); 1668 } 1669 1670 1671 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { 1672 .prepare = brcmf_pcie_buscoreprep, 1673 .reset = brcmf_pcie_buscore_reset, 1674 .activate = brcmf_pcie_buscore_activate, 1675 .read32 = brcmf_pcie_buscore_read32, 1676 .write32 = brcmf_pcie_buscore_write32, 1677 }; 1678 1679 #define BRCMF_PCIE_FW_CODE 0 1680 #define BRCMF_PCIE_FW_NVRAM 1 1681 1682 static void brcmf_pcie_setup(struct device *dev, int ret, 1683 struct brcmf_fw_request *fwreq) 1684 { 1685 const struct firmware *fw; 1686 void *nvram; 1687 struct brcmf_bus *bus; 1688 struct brcmf_pciedev *pcie_bus_dev; 1689 struct brcmf_pciedev_info *devinfo; 1690 struct brcmf_commonring **flowrings; 1691 u32 i, nvram_len; 1692 1693 /* check firmware loading result */ 1694 if (ret) 1695 goto fail; 1696 1697 bus = dev_get_drvdata(dev); 1698 pcie_bus_dev = bus->bus_priv.pcie; 1699 devinfo = pcie_bus_dev->devinfo; 1700 brcmf_pcie_attach(devinfo); 1701 1702 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary; 1703 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data; 1704 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len; 1705 kfree(fwreq); 1706 1707 /* Some of the firmwares have the size of the memory of the device 1708 * defined inside the firmware. This is because part of the memory in 1709 * the device is shared and the devision is determined by FW. Parse 1710 * the firmware and adjust the chip memory size now. 1711 */ 1712 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size); 1713 1714 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); 1715 if (ret) 1716 goto fail; 1717 1718 devinfo->state = BRCMFMAC_PCIE_STATE_UP; 1719 1720 ret = brcmf_pcie_init_ringbuffers(devinfo); 1721 if (ret) 1722 goto fail; 1723 1724 ret = brcmf_pcie_init_scratchbuffers(devinfo); 1725 if (ret) 1726 goto fail; 1727 1728 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 1729 ret = brcmf_pcie_request_irq(devinfo); 1730 if (ret) 1731 goto fail; 1732 1733 /* hook the commonrings in the bus structure. */ 1734 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) 1735 bus->msgbuf->commonrings[i] = 1736 &devinfo->shared.commonrings[i]->commonring; 1737 1738 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings), 1739 GFP_KERNEL); 1740 if (!flowrings) 1741 goto fail; 1742 1743 for (i = 0; i < devinfo->shared.max_flowrings; i++) 1744 flowrings[i] = &devinfo->shared.flowrings[i].commonring; 1745 bus->msgbuf->flowrings = flowrings; 1746 1747 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset; 1748 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost; 1749 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings; 1750 1751 init_waitqueue_head(&devinfo->mbdata_resp_wait); 1752 1753 brcmf_pcie_intr_enable(devinfo); 1754 brcmf_pcie_hostready(devinfo); 1755 if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0) 1756 return; 1757 1758 brcmf_pcie_bus_console_read(devinfo); 1759 1760 fail: 1761 device_release_driver(dev); 1762 } 1763 1764 static struct brcmf_fw_request * 1765 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) 1766 { 1767 struct brcmf_fw_request *fwreq; 1768 struct brcmf_fw_name fwnames[] = { 1769 { ".bin", devinfo->fw_name }, 1770 { ".txt", devinfo->nvram_name }, 1771 }; 1772 1773 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev, 1774 brcmf_pcie_fwnames, 1775 ARRAY_SIZE(brcmf_pcie_fwnames), 1776 fwnames, ARRAY_SIZE(fwnames)); 1777 if (!fwreq) 1778 return NULL; 1779 1780 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; 1781 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; 1782 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; 1783 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); 1784 fwreq->bus_nr = devinfo->pdev->bus->number; 1785 1786 return fwreq; 1787 } 1788 1789 static int 1790 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1791 { 1792 int ret; 1793 struct brcmf_fw_request *fwreq; 1794 struct brcmf_pciedev_info *devinfo; 1795 struct brcmf_pciedev *pcie_bus_dev; 1796 struct brcmf_bus *bus; 1797 1798 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); 1799 1800 ret = -ENOMEM; 1801 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); 1802 if (devinfo == NULL) 1803 return ret; 1804 1805 devinfo->pdev = pdev; 1806 pcie_bus_dev = NULL; 1807 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops); 1808 if (IS_ERR(devinfo->ci)) { 1809 ret = PTR_ERR(devinfo->ci); 1810 devinfo->ci = NULL; 1811 goto fail; 1812 } 1813 1814 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); 1815 if (pcie_bus_dev == NULL) { 1816 ret = -ENOMEM; 1817 goto fail; 1818 } 1819 1820 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev, 1821 BRCMF_BUSTYPE_PCIE, 1822 devinfo->ci->chip, 1823 devinfo->ci->chiprev); 1824 if (!devinfo->settings) { 1825 ret = -ENOMEM; 1826 goto fail; 1827 } 1828 1829 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 1830 if (!bus) { 1831 ret = -ENOMEM; 1832 goto fail; 1833 } 1834 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL); 1835 if (!bus->msgbuf) { 1836 ret = -ENOMEM; 1837 kfree(bus); 1838 goto fail; 1839 } 1840 1841 /* hook it all together. */ 1842 pcie_bus_dev->devinfo = devinfo; 1843 pcie_bus_dev->bus = bus; 1844 bus->dev = &pdev->dev; 1845 bus->bus_priv.pcie = pcie_bus_dev; 1846 bus->ops = &brcmf_pcie_bus_ops; 1847 bus->proto_type = BRCMF_PROTO_MSGBUF; 1848 bus->chip = devinfo->coreid; 1849 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); 1850 dev_set_drvdata(&pdev->dev, bus); 1851 1852 fwreq = brcmf_pcie_prepare_fw_request(devinfo); 1853 if (!fwreq) { 1854 ret = -ENOMEM; 1855 goto fail_bus; 1856 } 1857 1858 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup); 1859 if (ret < 0) { 1860 kfree(fwreq); 1861 goto fail_bus; 1862 } 1863 return 0; 1864 1865 fail_bus: 1866 kfree(bus->msgbuf); 1867 kfree(bus); 1868 fail: 1869 brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device); 1870 brcmf_pcie_release_resource(devinfo); 1871 if (devinfo->ci) 1872 brcmf_chip_detach(devinfo->ci); 1873 if (devinfo->settings) 1874 brcmf_release_module_param(devinfo->settings); 1875 kfree(pcie_bus_dev); 1876 kfree(devinfo); 1877 return ret; 1878 } 1879 1880 1881 static void 1882 brcmf_pcie_remove(struct pci_dev *pdev) 1883 { 1884 struct brcmf_pciedev_info *devinfo; 1885 struct brcmf_bus *bus; 1886 1887 brcmf_dbg(PCIE, "Enter\n"); 1888 1889 bus = dev_get_drvdata(&pdev->dev); 1890 if (bus == NULL) 1891 return; 1892 1893 devinfo = bus->bus_priv.pcie->devinfo; 1894 1895 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN; 1896 if (devinfo->ci) 1897 brcmf_pcie_intr_disable(devinfo); 1898 1899 brcmf_detach(&pdev->dev); 1900 1901 kfree(bus->bus_priv.pcie); 1902 kfree(bus->msgbuf->flowrings); 1903 kfree(bus->msgbuf); 1904 kfree(bus); 1905 1906 brcmf_pcie_release_irq(devinfo); 1907 brcmf_pcie_release_scratchbuffers(devinfo); 1908 brcmf_pcie_release_ringbuffers(devinfo); 1909 brcmf_pcie_reset_device(devinfo); 1910 brcmf_pcie_release_resource(devinfo); 1911 1912 if (devinfo->ci) 1913 brcmf_chip_detach(devinfo->ci); 1914 if (devinfo->settings) 1915 brcmf_release_module_param(devinfo->settings); 1916 1917 kfree(devinfo); 1918 dev_set_drvdata(&pdev->dev, NULL); 1919 } 1920 1921 1922 #ifdef CONFIG_PM 1923 1924 1925 static int brcmf_pcie_pm_enter_D3(struct device *dev) 1926 { 1927 struct brcmf_pciedev_info *devinfo; 1928 struct brcmf_bus *bus; 1929 1930 brcmf_dbg(PCIE, "Enter\n"); 1931 1932 bus = dev_get_drvdata(dev); 1933 devinfo = bus->bus_priv.pcie->devinfo; 1934 1935 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN); 1936 1937 devinfo->mbdata_completed = false; 1938 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM); 1939 1940 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed, 1941 BRCMF_PCIE_MBDATA_TIMEOUT); 1942 if (!devinfo->mbdata_completed) { 1943 brcmf_err("Timeout on response for entering D3 substate\n"); 1944 brcmf_bus_change_state(bus, BRCMF_BUS_UP); 1945 return -EIO; 1946 } 1947 1948 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN; 1949 1950 return 0; 1951 } 1952 1953 1954 static int brcmf_pcie_pm_leave_D3(struct device *dev) 1955 { 1956 struct brcmf_pciedev_info *devinfo; 1957 struct brcmf_bus *bus; 1958 struct pci_dev *pdev; 1959 int err; 1960 1961 brcmf_dbg(PCIE, "Enter\n"); 1962 1963 bus = dev_get_drvdata(dev); 1964 devinfo = bus->bus_priv.pcie->devinfo; 1965 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus); 1966 1967 /* Check if device is still up and running, if so we are ready */ 1968 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { 1969 brcmf_dbg(PCIE, "Try to wakeup device....\n"); 1970 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM)) 1971 goto cleanup; 1972 brcmf_dbg(PCIE, "Hot resume, continue....\n"); 1973 devinfo->state = BRCMFMAC_PCIE_STATE_UP; 1974 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 1975 brcmf_bus_change_state(bus, BRCMF_BUS_UP); 1976 brcmf_pcie_intr_enable(devinfo); 1977 brcmf_pcie_hostready(devinfo); 1978 return 0; 1979 } 1980 1981 cleanup: 1982 brcmf_chip_detach(devinfo->ci); 1983 devinfo->ci = NULL; 1984 pdev = devinfo->pdev; 1985 brcmf_pcie_remove(pdev); 1986 1987 err = brcmf_pcie_probe(pdev, NULL); 1988 if (err) 1989 brcmf_err("probe after resume failed, err=%d\n", err); 1990 1991 return err; 1992 } 1993 1994 1995 static const struct dev_pm_ops brcmf_pciedrvr_pm = { 1996 .suspend = brcmf_pcie_pm_enter_D3, 1997 .resume = brcmf_pcie_pm_leave_D3, 1998 .freeze = brcmf_pcie_pm_enter_D3, 1999 .restore = brcmf_pcie_pm_leave_D3, 2000 }; 2001 2002 2003 #endif /* CONFIG_PM */ 2004 2005 2006 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ 2007 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 2008 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \ 2009 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ 2010 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 2011 2012 static const struct pci_device_id brcmf_pcie_devid_table[] = { 2013 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), 2014 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), 2015 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), 2016 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), 2017 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID), 2018 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID), 2019 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID), 2020 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID), 2021 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID), 2022 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID), 2023 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID), 2024 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID), 2025 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID), 2026 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365), 2027 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID), 2028 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID), 2029 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID), 2030 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID), 2031 { /* end: all zeroes */ } 2032 }; 2033 2034 2035 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table); 2036 2037 2038 static struct pci_driver brcmf_pciedrvr = { 2039 .node = {}, 2040 .name = KBUILD_MODNAME, 2041 .id_table = brcmf_pcie_devid_table, 2042 .probe = brcmf_pcie_probe, 2043 .remove = brcmf_pcie_remove, 2044 #ifdef CONFIG_PM 2045 .driver.pm = &brcmf_pciedrvr_pm, 2046 #endif 2047 }; 2048 2049 2050 void brcmf_pcie_register(void) 2051 { 2052 int err; 2053 2054 brcmf_dbg(PCIE, "Enter\n"); 2055 err = pci_register_driver(&brcmf_pciedrvr); 2056 if (err) 2057 brcmf_err("PCIE driver registration failed, err=%d\n", err); 2058 } 2059 2060 2061 void brcmf_pcie_exit(void) 2062 { 2063 brcmf_dbg(PCIE, "Enter\n"); 2064 pci_unregister_driver(&brcmf_pciedrvr); 2065 } 2066