1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2010 Broadcom Corporation 4 */ 5 6 #include <linux/types.h> 7 #include <linux/atomic.h> 8 #include <linux/kernel.h> 9 #include <linux/kthread.h> 10 #include <linux/printk.h> 11 #include <linux/pci_ids.h> 12 #include <linux/netdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/sched/signal.h> 15 #include <linux/mmc/sdio.h> 16 #include <linux/mmc/sdio_ids.h> 17 #include <linux/mmc/sdio_func.h> 18 #include <linux/mmc/card.h> 19 #include <linux/semaphore.h> 20 #include <linux/firmware.h> 21 #include <linux/module.h> 22 #include <linux/bcma/bcma.h> 23 #include <linux/debugfs.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <defs.h> 27 #include <brcmu_wifi.h> 28 #include <brcmu_utils.h> 29 #include <brcm_hw_ids.h> 30 #include <soc.h> 31 #include "sdio.h" 32 #include "chip.h" 33 #include "firmware.h" 34 #include "core.h" 35 #include "common.h" 36 #include "bcdc.h" 37 38 #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) 39 #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) 40 41 /* watermark expressed in number of words */ 42 #define DEFAULT_F2_WATERMARK 0x8 43 #define CY_4373_F2_WATERMARK 0x40 44 #define CY_4373_F1_MESBUSYCTRL (CY_4373_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB) 45 #define CY_43012_F2_WATERMARK 0x60 46 #define CY_43012_MES_WATERMARK 0x50 47 #define CY_43012_MESBUSYCTRL (CY_43012_MES_WATERMARK | \ 48 SBSDIO_MESBUSYCTRL_ENAB) 49 #define CY_4339_F2_WATERMARK 48 50 #define CY_4339_MES_WATERMARK 80 51 #define CY_4339_MESBUSYCTRL (CY_4339_MES_WATERMARK | \ 52 SBSDIO_MESBUSYCTRL_ENAB) 53 #define CY_43455_F2_WATERMARK 0x60 54 #define CY_43455_MES_WATERMARK 0x50 55 #define CY_43455_MESBUSYCTRL (CY_43455_MES_WATERMARK | \ 56 SBSDIO_MESBUSYCTRL_ENAB) 57 #define CY_435X_F2_WATERMARK 0x40 58 #define CY_435X_F1_MESBUSYCTRL (CY_435X_F2_WATERMARK | \ 59 SBSDIO_MESBUSYCTRL_ENAB) 60 61 #ifdef DEBUG 62 63 #define BRCMF_TRAP_INFO_SIZE 80 64 65 #define CBUF_LEN (128) 66 67 /* Device console log buffer state */ 68 #define CONSOLE_BUFFER_MAX 2024 69 70 struct rte_log_le { 71 __le32 buf; /* Can't be pointer on (64-bit) hosts */ 72 __le32 buf_size; 73 __le32 idx; 74 char *_buf_compat; /* Redundant pointer for backward compat. */ 75 }; 76 77 struct rte_console { 78 /* Virtual UART 79 * When there is no UART (e.g. Quickturn), 80 * the host should write a complete 81 * input line directly into cbuf and then write 82 * the length into vcons_in. 83 * This may also be used when there is a real UART 84 * (at risk of conflicting with 85 * the real UART). vcons_out is currently unused. 86 */ 87 uint vcons_in; 88 uint vcons_out; 89 90 /* Output (logging) buffer 91 * Console output is written to a ring buffer log_buf at index log_idx. 92 * The host may read the output when it sees log_idx advance. 93 * Output will be lost if the output wraps around faster than the host 94 * polls. 95 */ 96 struct rte_log_le log_le; 97 98 /* Console input line buffer 99 * Characters are read one at a time into cbuf 100 * until <CR> is received, then 101 * the buffer is processed as a command line. 102 * Also used for virtual UART. 103 */ 104 uint cbuf_idx; 105 char cbuf[CBUF_LEN]; 106 }; 107 108 #endif /* DEBUG */ 109 #include <chipcommon.h> 110 111 #include "bus.h" 112 #include "debug.h" 113 #include "tracepoint.h" 114 115 #define TXQLEN 2048 /* bulk tx queue length */ 116 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */ 117 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */ 118 #define PRIOMASK 7 119 120 #define TXRETRIES 2 /* # of retries for tx frames */ 121 122 #define BRCMF_RXBOUND 50 /* Default for max rx frames in 123 one scheduling */ 124 125 #define BRCMF_TXBOUND 20 /* Default for max tx frames in 126 one scheduling */ 127 128 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */ 129 130 #define MEMBLOCK 2048 /* Block size used for downloading 131 of dongle image */ 132 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold 133 biggest possible glom */ 134 135 #define BRCMF_FIRSTREAD (1 << 6) 136 137 #define BRCMF_CONSOLE 10 /* watchdog interval to poll console */ 138 139 /* SBSDIO_DEVICE_CTL */ 140 141 /* 1: device will assert busy signal when receiving CMD53 */ 142 #define SBSDIO_DEVCTL_SETBUSY 0x01 143 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */ 144 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 145 /* 1: mask all interrupts to host except the chipActive (rev 8) */ 146 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 147 /* 1: isolate internal sdio signals, put external pads in tri-state; requires 148 * sdio bus power cycle to clear (rev 9) */ 149 #define SBSDIO_DEVCTL_PADS_ISO 0x08 150 /* 1: enable F2 Watermark */ 151 #define SBSDIO_DEVCTL_F2WM_ENAB 0x10 152 /* Force SD->SB reset mapping (rev 11) */ 153 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30 154 /* Determined by CoreControl bit */ 155 #define SBSDIO_DEVCTL_RST_CORECTL 0x00 156 /* Force backplane reset */ 157 #define SBSDIO_DEVCTL_RST_BPRESET 0x10 158 /* Force no backplane reset */ 159 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 160 161 /* direct(mapped) cis space */ 162 163 /* MAPPED common CIS address */ 164 #define SBSDIO_CIS_BASE_COMMON 0x1000 165 /* maximum bytes in one CIS */ 166 #define SBSDIO_CIS_SIZE_LIMIT 0x200 167 /* cis offset addr is < 17 bits */ 168 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF 169 170 /* manfid tuple length, include tuple, link bytes */ 171 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6 172 173 #define SD_REG(field) \ 174 (offsetof(struct sdpcmd_regs, field)) 175 176 /* SDIO function 1 register CHIPCLKCSR */ 177 /* Force ALP request to backplane */ 178 #define SBSDIO_FORCE_ALP 0x01 179 /* Force HT request to backplane */ 180 #define SBSDIO_FORCE_HT 0x02 181 /* Force ILP request to backplane */ 182 #define SBSDIO_FORCE_ILP 0x04 183 /* Make ALP ready (power up xtal) */ 184 #define SBSDIO_ALP_AVAIL_REQ 0x08 185 /* Make HT ready (power up PLL) */ 186 #define SBSDIO_HT_AVAIL_REQ 0x10 187 /* Squelch clock requests from HW */ 188 #define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 189 /* Status: ALP is ready */ 190 #define SBSDIO_ALP_AVAIL 0x40 191 /* Status: HT is ready */ 192 #define SBSDIO_HT_AVAIL 0x80 193 #define SBSDIO_CSR_MASK 0x1F 194 #define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) 195 #define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) 196 #define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) 197 #define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) 198 #define SBSDIO_CLKAV(regval, alponly) \ 199 (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval))) 200 201 /* intstatus */ 202 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ 203 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ 204 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ 205 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ 206 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ 207 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ 208 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ 209 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ 210 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ 211 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ 212 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ 213 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ 214 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ 215 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ 216 #define I_PC (1 << 10) /* descriptor error */ 217 #define I_PD (1 << 11) /* data error */ 218 #define I_DE (1 << 12) /* Descriptor protocol Error */ 219 #define I_RU (1 << 13) /* Receive descriptor Underflow */ 220 #define I_RO (1 << 14) /* Receive fifo Overflow */ 221 #define I_XU (1 << 15) /* Transmit fifo Underflow */ 222 #define I_RI (1 << 16) /* Receive Interrupt */ 223 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ 224 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */ 225 #define I_XI (1 << 24) /* Transmit Interrupt */ 226 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */ 227 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */ 228 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ 229 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */ 230 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */ 231 #define I_SRESET (1 << 30) /* CCCR RES interrupt */ 232 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ 233 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) 234 #define I_DMA (I_RI | I_XI | I_ERRORS) 235 236 /* corecontrol */ 237 #define CC_CISRDY (1 << 0) /* CIS Ready */ 238 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */ 239 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ 240 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */ 241 #define CC_XMTDATAAVAIL_MODE (1 << 4) 242 #define CC_XMTDATAAVAIL_CTRL (1 << 5) 243 244 /* SDA_FRAMECTRL */ 245 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ 246 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ 247 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */ 248 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */ 249 250 /* 251 * Software allocation of To SB Mailbox resources 252 */ 253 254 /* tosbmailbox bits corresponding to intstatus bits */ 255 #define SMB_NAK (1 << 0) /* Frame NAK */ 256 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */ 257 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */ 258 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */ 259 260 /* tosbmailboxdata */ 261 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */ 262 263 /* 264 * Software allocation of To Host Mailbox resources 265 */ 266 267 /* intstatus bits */ 268 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */ 269 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */ 270 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */ 271 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */ 272 273 /* tohostmailboxdata */ 274 #define HMB_DATA_NAKHANDLED 0x0001 /* retransmit NAK'd frame */ 275 #define HMB_DATA_DEVREADY 0x0002 /* talk to host after enable */ 276 #define HMB_DATA_FC 0x0004 /* per prio flowcontrol update flag */ 277 #define HMB_DATA_FWREADY 0x0008 /* fw ready for protocol activity */ 278 #define HMB_DATA_FWHALT 0x0010 /* firmware halted */ 279 280 #define HMB_DATA_FCDATA_MASK 0xff000000 281 #define HMB_DATA_FCDATA_SHIFT 24 282 283 #define HMB_DATA_VERSION_MASK 0x00ff0000 284 #define HMB_DATA_VERSION_SHIFT 16 285 286 /* 287 * Software-defined protocol header 288 */ 289 290 /* Current protocol version */ 291 #define SDPCM_PROT_VERSION 4 292 293 /* 294 * Shared structure between dongle and the host. 295 * The structure contains pointers to trap or assert information. 296 */ 297 #define SDPCM_SHARED_VERSION 0x0003 298 #define SDPCM_SHARED_VERSION_MASK 0x00FF 299 #define SDPCM_SHARED_ASSERT_BUILT 0x0100 300 #define SDPCM_SHARED_ASSERT 0x0200 301 #define SDPCM_SHARED_TRAP 0x0400 302 303 /* Space for header read, limit for data packets */ 304 #define MAX_HDR_READ (1 << 6) 305 #define MAX_RX_DATASZ 2048 306 307 /* Bump up limit on waiting for HT to account for first startup; 308 * if the image is doing a CRC calculation before programming the PMU 309 * for HT availability, it could take a couple hundred ms more, so 310 * max out at a 1 second (1000000us). 311 */ 312 #undef PMU_MAX_TRANSITION_DLY 313 #define PMU_MAX_TRANSITION_DLY 1000000 314 315 /* Value for ChipClockCSR during initial setup */ 316 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \ 317 SBSDIO_ALP_AVAIL_REQ) 318 319 /* Flags for SDH calls */ 320 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) 321 322 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change 323 * when idle 324 */ 325 #define BRCMF_IDLE_INTERVAL 1 326 327 #define KSO_WAIT_US 50 328 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) 329 #define BRCMF_SDIO_MAX_ACCESS_ERRORS 5 330 331 #ifdef DEBUG 332 /* Device console log buffer state */ 333 struct brcmf_console { 334 uint count; /* Poll interval msec counter */ 335 uint log_addr; /* Log struct address (fixed) */ 336 struct rte_log_le log_le; /* Log struct (host copy) */ 337 uint bufsize; /* Size of log buffer */ 338 u8 *buf; /* Log buffer (host copy) */ 339 uint last; /* Last buffer read index */ 340 }; 341 342 struct brcmf_trap_info { 343 __le32 type; 344 __le32 epc; 345 __le32 cpsr; 346 __le32 spsr; 347 __le32 r0; /* a1 */ 348 __le32 r1; /* a2 */ 349 __le32 r2; /* a3 */ 350 __le32 r3; /* a4 */ 351 __le32 r4; /* v1 */ 352 __le32 r5; /* v2 */ 353 __le32 r6; /* v3 */ 354 __le32 r7; /* v4 */ 355 __le32 r8; /* v5 */ 356 __le32 r9; /* sb/v6 */ 357 __le32 r10; /* sl/v7 */ 358 __le32 r11; /* fp/v8 */ 359 __le32 r12; /* ip */ 360 __le32 r13; /* sp */ 361 __le32 r14; /* lr */ 362 __le32 pc; /* r15 */ 363 }; 364 #endif /* DEBUG */ 365 366 struct sdpcm_shared { 367 u32 flags; 368 u32 trap_addr; 369 u32 assert_exp_addr; 370 u32 assert_file_addr; 371 u32 assert_line; 372 u32 console_addr; /* Address of struct rte_console */ 373 u32 msgtrace_addr; 374 u8 tag[32]; 375 u32 brpt_addr; 376 }; 377 378 struct sdpcm_shared_le { 379 __le32 flags; 380 __le32 trap_addr; 381 __le32 assert_exp_addr; 382 __le32 assert_file_addr; 383 __le32 assert_line; 384 __le32 console_addr; /* Address of struct rte_console */ 385 __le32 msgtrace_addr; 386 u8 tag[32]; 387 __le32 brpt_addr; 388 }; 389 390 /* dongle SDIO bus specific header info */ 391 struct brcmf_sdio_hdrinfo { 392 u8 seq_num; 393 u8 channel; 394 u16 len; 395 u16 len_left; 396 u16 len_nxtfrm; 397 u8 dat_offset; 398 bool lastfrm; 399 u16 tail_pad; 400 }; 401 402 /* 403 * hold counter variables 404 */ 405 struct brcmf_sdio_count { 406 uint intrcount; /* Count of device interrupt callbacks */ 407 uint lastintrs; /* Count as of last watchdog timer */ 408 uint pollcnt; /* Count of active polls */ 409 uint regfails; /* Count of R_REG failures */ 410 uint tx_sderrs; /* Count of tx attempts with sd errors */ 411 uint fcqueued; /* Tx packets that got queued */ 412 uint rxrtx; /* Count of rtx requests (NAK to dongle) */ 413 uint rx_toolong; /* Receive frames too long to receive */ 414 uint rxc_errors; /* SDIO errors when reading control frames */ 415 uint rx_hdrfail; /* SDIO errors on header reads */ 416 uint rx_badhdr; /* Bad received headers (roosync?) */ 417 uint rx_badseq; /* Mismatched rx sequence number */ 418 uint fc_rcvd; /* Number of flow-control events received */ 419 uint fc_xoff; /* Number which turned on flow-control */ 420 uint fc_xon; /* Number which turned off flow-control */ 421 uint rxglomfail; /* Failed deglom attempts */ 422 uint rxglomframes; /* Number of glom frames (superframes) */ 423 uint rxglompkts; /* Number of packets from glom frames */ 424 uint f2rxhdrs; /* Number of header reads */ 425 uint f2rxdata; /* Number of frame data reads */ 426 uint f2txdata; /* Number of f2 frame writes */ 427 uint f1regdata; /* Number of f1 register accesses */ 428 uint tickcnt; /* Number of watchdog been schedule */ 429 ulong tx_ctlerrs; /* Err of sending ctrl frames */ 430 ulong tx_ctlpkts; /* Ctrl frames sent to dongle */ 431 ulong rx_ctlerrs; /* Err of processing rx ctrl frames */ 432 ulong rx_ctlpkts; /* Ctrl frames processed from dongle */ 433 ulong rx_readahead_cnt; /* packets where header read-ahead was used */ 434 }; 435 436 /* misc chip info needed by some of the routines */ 437 /* Private data for SDIO bus interaction */ 438 struct brcmf_sdio { 439 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ 440 struct brcmf_chip *ci; /* Chip info struct */ 441 struct brcmf_core *sdio_core; /* sdio core info struct */ 442 443 u32 hostintmask; /* Copy of Host Interrupt Mask */ 444 atomic_t intstatus; /* Intstatus bits (events) pending */ 445 atomic_t fcstate; /* State of dongle flow-control */ 446 447 uint blocksize; /* Block size of SDIO transfers */ 448 uint roundup; /* Max roundup limit */ 449 450 struct pktq txq; /* Queue length used for flow-control */ 451 u8 flowcontrol; /* per prio flow control bitmask */ 452 u8 tx_seq; /* Transmit sequence number (next) */ 453 u8 tx_max; /* Maximum transmit sequence allowed */ 454 455 u8 *hdrbuf; /* buffer for handling rx frame */ 456 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ 457 u8 rx_seq; /* Receive sequence number (expected) */ 458 struct brcmf_sdio_hdrinfo cur_read; 459 /* info of current read frame */ 460 bool rxskip; /* Skip receive (awaiting NAK ACK) */ 461 bool rxpending; /* Data frame pending in dongle */ 462 463 uint rxbound; /* Rx frames to read before resched */ 464 uint txbound; /* Tx frames to send before resched */ 465 uint txminmax; 466 467 struct sk_buff *glomd; /* Packet containing glomming descriptor */ 468 struct sk_buff_head glom; /* Packet list for glommed superframe */ 469 470 u8 *rxbuf; /* Buffer for receiving control packets */ 471 uint rxblen; /* Allocated length of rxbuf */ 472 u8 *rxctl; /* Aligned pointer into rxbuf */ 473 u8 *rxctl_orig; /* pointer for freeing rxctl */ 474 uint rxlen; /* Length of valid data in buffer */ 475 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */ 476 477 u8 sdpcm_ver; /* Bus protocol reported by dongle */ 478 479 bool intr; /* Use interrupts */ 480 bool poll; /* Use polling */ 481 atomic_t ipend; /* Device interrupt is pending */ 482 uint spurious; /* Count of spurious interrupts */ 483 uint pollrate; /* Ticks between device polls */ 484 uint polltick; /* Tick counter */ 485 486 #ifdef DEBUG 487 uint console_interval; 488 struct brcmf_console console; /* Console output polling support */ 489 uint console_addr; /* Console address from shared struct */ 490 #endif /* DEBUG */ 491 492 uint clkstate; /* State of sd and backplane clock(s) */ 493 s32 idletime; /* Control for activity timeout */ 494 s32 idlecount; /* Activity timeout counter */ 495 s32 idleclock; /* How to set bus driver when idle */ 496 bool rxflow_mode; /* Rx flow control mode */ 497 bool rxflow; /* Is rx flow control on */ 498 bool alp_only; /* Don't use HT clock (ALP only) */ 499 500 u8 *ctrl_frame_buf; 501 u16 ctrl_frame_len; 502 bool ctrl_frame_stat; 503 int ctrl_frame_err; 504 505 spinlock_t txq_lock; /* protect bus->txq */ 506 wait_queue_head_t ctrl_wait; 507 wait_queue_head_t dcmd_resp_wait; 508 509 struct timer_list timer; 510 struct completion watchdog_wait; 511 struct task_struct *watchdog_tsk; 512 bool wd_active; 513 514 struct workqueue_struct *brcmf_wq; 515 struct work_struct datawork; 516 bool dpc_triggered; 517 bool dpc_running; 518 519 bool txoff; /* Transmit flow-controlled */ 520 struct brcmf_sdio_count sdcnt; 521 bool sr_enabled; /* SaveRestore enabled */ 522 bool sleeping; 523 524 u8 tx_hdrlen; /* sdio bus header length for tx packet */ 525 bool txglom; /* host tx glomming enable flag */ 526 u16 head_align; /* buffer pointer alignment */ 527 u16 sgentry_align; /* scatter-gather buffer alignment */ 528 }; 529 530 /* clkstate */ 531 #define CLK_NONE 0 532 #define CLK_SDONLY 1 533 #define CLK_PENDING 2 534 #define CLK_AVAIL 3 535 536 #ifdef DEBUG 537 static int qcount[NUMPRIO]; 538 #endif /* DEBUG */ 539 540 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */ 541 542 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL) 543 544 /* Limit on rounding up frames */ 545 static const uint max_roundup = 512; 546 547 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 548 #define ALIGNMENT 8 549 #else 550 #define ALIGNMENT 4 551 #endif 552 553 enum brcmf_sdio_frmtype { 554 BRCMF_SDIO_FT_NORMAL, 555 BRCMF_SDIO_FT_SUPER, 556 BRCMF_SDIO_FT_SUB, 557 }; 558 559 #define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) 560 561 /* SDIO Pad drive strength to select value mappings */ 562 struct sdiod_drive_str { 563 u8 strength; /* Pad Drive Strength in mA */ 564 u8 sel; /* Chip-specific select value */ 565 }; 566 567 /* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */ 568 static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = { 569 {32, 0x6}, 570 {26, 0x7}, 571 {22, 0x4}, 572 {16, 0x5}, 573 {12, 0x2}, 574 {8, 0x3}, 575 {4, 0x0}, 576 {0, 0x1} 577 }; 578 579 /* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */ 580 static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = { 581 {6, 0x7}, 582 {5, 0x6}, 583 {4, 0x5}, 584 {3, 0x4}, 585 {2, 0x2}, 586 {1, 0x1}, 587 {0, 0x0} 588 }; 589 590 /* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */ 591 static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = { 592 {3, 0x3}, 593 {2, 0x2}, 594 {1, 0x1}, 595 {0, 0x0} }; 596 597 /* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */ 598 static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = { 599 {16, 0x7}, 600 {12, 0x5}, 601 {8, 0x3}, 602 {4, 0x1} 603 }; 604 605 BRCMF_FW_DEF(43143, "brcmfmac43143-sdio"); 606 BRCMF_FW_DEF(43241B0, "brcmfmac43241b0-sdio"); 607 BRCMF_FW_DEF(43241B4, "brcmfmac43241b4-sdio"); 608 BRCMF_FW_DEF(43241B5, "brcmfmac43241b5-sdio"); 609 BRCMF_FW_DEF(4329, "brcmfmac4329-sdio"); 610 BRCMF_FW_DEF(4330, "brcmfmac4330-sdio"); 611 BRCMF_FW_DEF(4334, "brcmfmac4334-sdio"); 612 BRCMF_FW_DEF(43340, "brcmfmac43340-sdio"); 613 BRCMF_FW_DEF(4335, "brcmfmac4335-sdio"); 614 BRCMF_FW_DEF(43362, "brcmfmac43362-sdio"); 615 BRCMF_FW_DEF(4339, "brcmfmac4339-sdio"); 616 BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio"); 617 /* Note the names are not postfixed with a1 for backward compatibility */ 618 BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio"); 619 BRCMF_FW_DEF(43455, "brcmfmac43455-sdio"); 620 BRCMF_FW_DEF(43456, "brcmfmac43456-sdio"); 621 BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); 622 BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); 623 BRCMF_FW_DEF(4359, "brcmfmac4359-sdio"); 624 BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); 625 BRCMF_FW_DEF(43012, "brcmfmac43012-sdio"); 626 627 static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { 628 BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), 629 BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), 630 BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4), 631 BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, 43241B5), 632 BRCMF_FW_ENTRY(BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, 4329), 633 BRCMF_FW_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330), 634 BRCMF_FW_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334), 635 BRCMF_FW_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340), 636 BRCMF_FW_ENTRY(BRCM_CC_43341_CHIP_ID, 0xFFFFFFFF, 43340), 637 BRCMF_FW_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335), 638 BRCMF_FW_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362), 639 BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), 640 BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0), 641 BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), 642 BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456), 643 BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455), 644 BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), 645 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), 646 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), 647 BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373), 648 BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012) 649 }; 650 651 static void pkt_align(struct sk_buff *p, int len, int align) 652 { 653 uint datalign; 654 datalign = (unsigned long)(p->data); 655 datalign = roundup(datalign, (align)) - datalign; 656 if (datalign) 657 skb_pull(p, datalign); 658 __skb_trim(p, len); 659 } 660 661 /* To check if there's window offered */ 662 static bool data_ok(struct brcmf_sdio *bus) 663 { 664 return (u8)(bus->tx_max - bus->tx_seq) != 0 && 665 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; 666 } 667 668 static int 669 brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) 670 { 671 u8 wr_val = 0, rd_val, cmp_val, bmask; 672 int err = 0; 673 int err_cnt = 0; 674 int try_cnt = 0; 675 676 brcmf_dbg(TRACE, "Enter: on=%d\n", on); 677 678 sdio_retune_crc_disable(bus->sdiodev->func1); 679 680 /* Cannot re-tune if device is asleep; defer till we're awake */ 681 if (on) 682 sdio_retune_hold_now(bus->sdiodev->func1); 683 684 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); 685 /* 1st KSO write goes to AOS wake up core if device is asleep */ 686 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); 687 688 /* In case of 43012 chip, the chip could go down immediately after 689 * KSO bit is cleared. So the further reads of KSO register could 690 * fail. Thereby just bailing out immediately after clearing KSO 691 * bit, to avoid polling of KSO bit. 692 */ 693 if (!on && bus->ci->chip == CY_CC_43012_CHIP_ID) 694 return err; 695 696 if (on) { 697 /* device WAKEUP through KSO: 698 * write bit 0 & read back until 699 * both bits 0 (kso bit) & 1 (dev on status) are set 700 */ 701 cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | 702 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; 703 bmask = cmp_val; 704 usleep_range(2000, 3000); 705 } else { 706 /* Put device to sleep, turn off KSO */ 707 cmp_val = 0; 708 /* only check for bit0, bit1(dev on status) may not 709 * get cleared right away 710 */ 711 bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK; 712 } 713 714 do { 715 /* reliable KSO bit set/clr: 716 * the sdiod sleep write access is synced to PMU 32khz clk 717 * just one write attempt may fail, 718 * read it back until it matches written value 719 */ 720 rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, 721 &err); 722 if (!err) { 723 if ((rd_val & bmask) == cmp_val) 724 break; 725 err_cnt = 0; 726 } 727 /* bail out upon subsequent access errors */ 728 if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS)) 729 break; 730 731 udelay(KSO_WAIT_US); 732 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, 733 &err); 734 735 } while (try_cnt++ < MAX_KSO_ATTEMPTS); 736 737 if (try_cnt > 2) 738 brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt, 739 rd_val, err); 740 741 if (try_cnt > MAX_KSO_ATTEMPTS) 742 brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err); 743 744 if (on) 745 sdio_retune_release(bus->sdiodev->func1); 746 747 sdio_retune_crc_enable(bus->sdiodev->func1); 748 749 return err; 750 } 751 752 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) 753 754 /* Turn backplane clock on or off */ 755 static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok) 756 { 757 int err; 758 u8 clkctl, clkreq, devctl; 759 unsigned long timeout; 760 761 brcmf_dbg(SDIO, "Enter\n"); 762 763 clkctl = 0; 764 765 if (bus->sr_enabled) { 766 bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); 767 return 0; 768 } 769 770 if (on) { 771 /* Request HT Avail */ 772 clkreq = 773 bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; 774 775 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 776 clkreq, &err); 777 if (err) { 778 brcmf_err("HT Avail request error: %d\n", err); 779 return -EBADE; 780 } 781 782 /* Check current status */ 783 clkctl = brcmf_sdiod_readb(bus->sdiodev, 784 SBSDIO_FUNC1_CHIPCLKCSR, &err); 785 if (err) { 786 brcmf_err("HT Avail read error: %d\n", err); 787 return -EBADE; 788 } 789 790 /* Go to pending and await interrupt if appropriate */ 791 if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { 792 /* Allow only clock-available interrupt */ 793 devctl = brcmf_sdiod_readb(bus->sdiodev, 794 SBSDIO_DEVICE_CTL, &err); 795 if (err) { 796 brcmf_err("Devctl error setting CA: %d\n", err); 797 return -EBADE; 798 } 799 800 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; 801 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, 802 devctl, &err); 803 brcmf_dbg(SDIO, "CLKCTL: set PENDING\n"); 804 bus->clkstate = CLK_PENDING; 805 806 return 0; 807 } else if (bus->clkstate == CLK_PENDING) { 808 /* Cancel CA-only interrupt filter */ 809 devctl = brcmf_sdiod_readb(bus->sdiodev, 810 SBSDIO_DEVICE_CTL, &err); 811 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; 812 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, 813 devctl, &err); 814 } 815 816 /* Otherwise, wait here (polling) for HT Avail */ 817 timeout = jiffies + 818 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000); 819 while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { 820 clkctl = brcmf_sdiod_readb(bus->sdiodev, 821 SBSDIO_FUNC1_CHIPCLKCSR, 822 &err); 823 if (time_after(jiffies, timeout)) 824 break; 825 else 826 usleep_range(5000, 10000); 827 } 828 if (err) { 829 brcmf_err("HT Avail request error: %d\n", err); 830 return -EBADE; 831 } 832 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { 833 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n", 834 PMU_MAX_TRANSITION_DLY, clkctl); 835 return -EBADE; 836 } 837 838 /* Mark clock available */ 839 bus->clkstate = CLK_AVAIL; 840 brcmf_dbg(SDIO, "CLKCTL: turned ON\n"); 841 842 #if defined(DEBUG) 843 if (!bus->alp_only) { 844 if (SBSDIO_ALPONLY(clkctl)) 845 brcmf_err("HT Clock should be on\n"); 846 } 847 #endif /* defined (DEBUG) */ 848 849 } else { 850 clkreq = 0; 851 852 if (bus->clkstate == CLK_PENDING) { 853 /* Cancel CA-only interrupt filter */ 854 devctl = brcmf_sdiod_readb(bus->sdiodev, 855 SBSDIO_DEVICE_CTL, &err); 856 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; 857 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, 858 devctl, &err); 859 } 860 861 bus->clkstate = CLK_SDONLY; 862 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 863 clkreq, &err); 864 brcmf_dbg(SDIO, "CLKCTL: turned OFF\n"); 865 if (err) { 866 brcmf_err("Failed access turning clock off: %d\n", 867 err); 868 return -EBADE; 869 } 870 } 871 return 0; 872 } 873 874 /* Change idle/active SD state */ 875 static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on) 876 { 877 brcmf_dbg(SDIO, "Enter\n"); 878 879 if (on) 880 bus->clkstate = CLK_SDONLY; 881 else 882 bus->clkstate = CLK_NONE; 883 884 return 0; 885 } 886 887 /* Transition SD and backplane clock readiness */ 888 static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok) 889 { 890 #ifdef DEBUG 891 uint oldstate = bus->clkstate; 892 #endif /* DEBUG */ 893 894 brcmf_dbg(SDIO, "Enter\n"); 895 896 /* Early exit if we're already there */ 897 if (bus->clkstate == target) 898 return 0; 899 900 switch (target) { 901 case CLK_AVAIL: 902 /* Make sure SD clock is available */ 903 if (bus->clkstate == CLK_NONE) 904 brcmf_sdio_sdclk(bus, true); 905 /* Now request HT Avail on the backplane */ 906 brcmf_sdio_htclk(bus, true, pendok); 907 break; 908 909 case CLK_SDONLY: 910 /* Remove HT request, or bring up SD clock */ 911 if (bus->clkstate == CLK_NONE) 912 brcmf_sdio_sdclk(bus, true); 913 else if (bus->clkstate == CLK_AVAIL) 914 brcmf_sdio_htclk(bus, false, false); 915 else 916 brcmf_err("request for %d -> %d\n", 917 bus->clkstate, target); 918 break; 919 920 case CLK_NONE: 921 /* Make sure to remove HT request */ 922 if (bus->clkstate == CLK_AVAIL) 923 brcmf_sdio_htclk(bus, false, false); 924 /* Now remove the SD clock */ 925 brcmf_sdio_sdclk(bus, false); 926 break; 927 } 928 #ifdef DEBUG 929 brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate); 930 #endif /* DEBUG */ 931 932 return 0; 933 } 934 935 static int 936 brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok) 937 { 938 int err = 0; 939 u8 clkcsr; 940 941 brcmf_dbg(SDIO, "Enter: request %s currently %s\n", 942 (sleep ? "SLEEP" : "WAKE"), 943 (bus->sleeping ? "SLEEP" : "WAKE")); 944 945 /* If SR is enabled control bus state with KSO */ 946 if (bus->sr_enabled) { 947 /* Done if we're already in the requested state */ 948 if (sleep == bus->sleeping) 949 goto end; 950 951 /* Going to sleep */ 952 if (sleep) { 953 clkcsr = brcmf_sdiod_readb(bus->sdiodev, 954 SBSDIO_FUNC1_CHIPCLKCSR, 955 &err); 956 if ((clkcsr & SBSDIO_CSR_MASK) == 0) { 957 brcmf_dbg(SDIO, "no clock, set ALP\n"); 958 brcmf_sdiod_writeb(bus->sdiodev, 959 SBSDIO_FUNC1_CHIPCLKCSR, 960 SBSDIO_ALP_AVAIL_REQ, &err); 961 } 962 err = brcmf_sdio_kso_control(bus, false); 963 } else { 964 err = brcmf_sdio_kso_control(bus, true); 965 } 966 if (err) { 967 brcmf_err("error while changing bus sleep state %d\n", 968 err); 969 goto done; 970 } 971 } 972 973 end: 974 /* control clocks */ 975 if (sleep) { 976 if (!bus->sr_enabled) 977 brcmf_sdio_clkctl(bus, CLK_NONE, pendok); 978 } else { 979 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); 980 brcmf_sdio_wd_timer(bus, true); 981 } 982 bus->sleeping = sleep; 983 brcmf_dbg(SDIO, "new state %s\n", 984 (sleep ? "SLEEP" : "WAKE")); 985 done: 986 brcmf_dbg(SDIO, "Exit: err=%d\n", err); 987 return err; 988 989 } 990 991 #ifdef DEBUG 992 static inline bool brcmf_sdio_valid_shared_address(u32 addr) 993 { 994 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)); 995 } 996 997 static int brcmf_sdio_readshared(struct brcmf_sdio *bus, 998 struct sdpcm_shared *sh) 999 { 1000 u32 addr = 0; 1001 int rv; 1002 u32 shaddr = 0; 1003 struct sdpcm_shared_le sh_le; 1004 __le32 addr_le; 1005 1006 sdio_claim_host(bus->sdiodev->func1); 1007 brcmf_sdio_bus_sleep(bus, false, false); 1008 1009 /* 1010 * Read last word in socram to determine 1011 * address of sdpcm_shared structure 1012 */ 1013 shaddr = bus->ci->rambase + bus->ci->ramsize - 4; 1014 if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci)) 1015 shaddr -= bus->ci->srsize; 1016 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, 1017 (u8 *)&addr_le, 4); 1018 if (rv < 0) 1019 goto fail; 1020 1021 /* 1022 * Check if addr is valid. 1023 * NVRAM length at the end of memory should have been overwritten. 1024 */ 1025 addr = le32_to_cpu(addr_le); 1026 if (!brcmf_sdio_valid_shared_address(addr)) { 1027 brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr); 1028 rv = -EINVAL; 1029 goto fail; 1030 } 1031 1032 brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr); 1033 1034 /* Read hndrte_shared structure */ 1035 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le, 1036 sizeof(struct sdpcm_shared_le)); 1037 if (rv < 0) 1038 goto fail; 1039 1040 sdio_release_host(bus->sdiodev->func1); 1041 1042 /* Endianness */ 1043 sh->flags = le32_to_cpu(sh_le.flags); 1044 sh->trap_addr = le32_to_cpu(sh_le.trap_addr); 1045 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr); 1046 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr); 1047 sh->assert_line = le32_to_cpu(sh_le.assert_line); 1048 sh->console_addr = le32_to_cpu(sh_le.console_addr); 1049 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr); 1050 1051 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) { 1052 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n", 1053 SDPCM_SHARED_VERSION, 1054 sh->flags & SDPCM_SHARED_VERSION_MASK); 1055 return -EPROTO; 1056 } 1057 return 0; 1058 1059 fail: 1060 brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n", 1061 rv, addr); 1062 sdio_release_host(bus->sdiodev->func1); 1063 return rv; 1064 } 1065 1066 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus) 1067 { 1068 struct sdpcm_shared sh; 1069 1070 if (brcmf_sdio_readshared(bus, &sh) == 0) 1071 bus->console_addr = sh.console_addr; 1072 } 1073 #else 1074 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus) 1075 { 1076 } 1077 #endif /* DEBUG */ 1078 1079 static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) 1080 { 1081 struct brcmf_sdio_dev *sdiod = bus->sdiodev; 1082 struct brcmf_core *core = bus->sdio_core; 1083 u32 intstatus = 0; 1084 u32 hmb_data; 1085 u8 fcbits; 1086 int ret; 1087 1088 brcmf_dbg(SDIO, "Enter\n"); 1089 1090 /* Read mailbox data and ack that we did so */ 1091 hmb_data = brcmf_sdiod_readl(sdiod, 1092 core->base + SD_REG(tohostmailboxdata), 1093 &ret); 1094 1095 if (!ret) 1096 brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox), 1097 SMB_INT_ACK, &ret); 1098 1099 bus->sdcnt.f1regdata += 2; 1100 1101 /* dongle indicates the firmware has halted/crashed */ 1102 if (hmb_data & HMB_DATA_FWHALT) { 1103 brcmf_dbg(SDIO, "mailbox indicates firmware halted\n"); 1104 brcmf_fw_crashed(&sdiod->func1->dev); 1105 } 1106 1107 /* Dongle recomposed rx frames, accept them again */ 1108 if (hmb_data & HMB_DATA_NAKHANDLED) { 1109 brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n", 1110 bus->rx_seq); 1111 if (!bus->rxskip) 1112 brcmf_err("unexpected NAKHANDLED!\n"); 1113 1114 bus->rxskip = false; 1115 intstatus |= I_HMB_FRAME_IND; 1116 } 1117 1118 /* 1119 * DEVREADY does not occur with gSPI. 1120 */ 1121 if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { 1122 bus->sdpcm_ver = 1123 (hmb_data & HMB_DATA_VERSION_MASK) >> 1124 HMB_DATA_VERSION_SHIFT; 1125 if (bus->sdpcm_ver != SDPCM_PROT_VERSION) 1126 brcmf_err("Version mismatch, dongle reports %d, " 1127 "expecting %d\n", 1128 bus->sdpcm_ver, SDPCM_PROT_VERSION); 1129 else 1130 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n", 1131 bus->sdpcm_ver); 1132 1133 /* 1134 * Retrieve console state address now that firmware should have 1135 * updated it. 1136 */ 1137 brcmf_sdio_get_console_addr(bus); 1138 } 1139 1140 /* 1141 * Flow Control has been moved into the RX headers and this out of band 1142 * method isn't used any more. 1143 * remaining backward compatible with older dongles. 1144 */ 1145 if (hmb_data & HMB_DATA_FC) { 1146 fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> 1147 HMB_DATA_FCDATA_SHIFT; 1148 1149 if (fcbits & ~bus->flowcontrol) 1150 bus->sdcnt.fc_xoff++; 1151 1152 if (bus->flowcontrol & ~fcbits) 1153 bus->sdcnt.fc_xon++; 1154 1155 bus->sdcnt.fc_rcvd++; 1156 bus->flowcontrol = fcbits; 1157 } 1158 1159 /* Shouldn't be any others */ 1160 if (hmb_data & ~(HMB_DATA_DEVREADY | 1161 HMB_DATA_NAKHANDLED | 1162 HMB_DATA_FC | 1163 HMB_DATA_FWREADY | 1164 HMB_DATA_FWHALT | 1165 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK)) 1166 brcmf_err("Unknown mailbox data content: 0x%02x\n", 1167 hmb_data); 1168 1169 return intstatus; 1170 } 1171 1172 static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) 1173 { 1174 struct brcmf_sdio_dev *sdiod = bus->sdiodev; 1175 struct brcmf_core *core = bus->sdio_core; 1176 uint retries = 0; 1177 u16 lastrbc; 1178 u8 hi, lo; 1179 int err; 1180 1181 brcmf_err("%sterminate frame%s\n", 1182 abort ? "abort command, " : "", 1183 rtx ? ", send NAK" : ""); 1184 1185 if (abort) 1186 brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2); 1187 1188 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, 1189 &err); 1190 bus->sdcnt.f1regdata++; 1191 1192 /* Wait until the packet has been flushed (device/FIFO stable) */ 1193 for (lastrbc = retries = 0xffff; retries > 0; retries--) { 1194 hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI, 1195 &err); 1196 lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO, 1197 &err); 1198 bus->sdcnt.f1regdata += 2; 1199 1200 if ((hi == 0) && (lo == 0)) 1201 break; 1202 1203 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { 1204 brcmf_err("count growing: last 0x%04x now 0x%04x\n", 1205 lastrbc, (hi << 8) + lo); 1206 } 1207 lastrbc = (hi << 8) + lo; 1208 } 1209 1210 if (!retries) 1211 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc); 1212 else 1213 brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries); 1214 1215 if (rtx) { 1216 bus->sdcnt.rxrtx++; 1217 brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox), 1218 SMB_NAK, &err); 1219 1220 bus->sdcnt.f1regdata++; 1221 if (err == 0) 1222 bus->rxskip = true; 1223 } 1224 1225 /* Clear partial in any case */ 1226 bus->cur_read.len = 0; 1227 } 1228 1229 static void brcmf_sdio_txfail(struct brcmf_sdio *bus) 1230 { 1231 struct brcmf_sdio_dev *sdiodev = bus->sdiodev; 1232 u8 i, hi, lo; 1233 1234 /* On failure, abort the command and terminate the frame */ 1235 brcmf_err("sdio error, abort command and terminate frame\n"); 1236 bus->sdcnt.tx_sderrs++; 1237 1238 brcmf_sdiod_abort(sdiodev, sdiodev->func2); 1239 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); 1240 bus->sdcnt.f1regdata++; 1241 1242 for (i = 0; i < 3; i++) { 1243 hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL); 1244 lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); 1245 bus->sdcnt.f1regdata += 2; 1246 if ((hi == 0) && (lo == 0)) 1247 break; 1248 } 1249 } 1250 1251 /* return total length of buffer chain */ 1252 static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus) 1253 { 1254 struct sk_buff *p; 1255 uint total; 1256 1257 total = 0; 1258 skb_queue_walk(&bus->glom, p) 1259 total += p->len; 1260 return total; 1261 } 1262 1263 static void brcmf_sdio_free_glom(struct brcmf_sdio *bus) 1264 { 1265 struct sk_buff *cur, *next; 1266 1267 skb_queue_walk_safe(&bus->glom, cur, next) { 1268 skb_unlink(cur, &bus->glom); 1269 brcmu_pkt_buf_free_skb(cur); 1270 } 1271 } 1272 1273 /** 1274 * brcmfmac sdio bus specific header 1275 * This is the lowest layer header wrapped on the packets transmitted between 1276 * host and WiFi dongle which contains information needed for SDIO core and 1277 * firmware 1278 * 1279 * It consists of 3 parts: hardware header, hardware extension header and 1280 * software header 1281 * hardware header (frame tag) - 4 bytes 1282 * Byte 0~1: Frame length 1283 * Byte 2~3: Checksum, bit-wise inverse of frame length 1284 * hardware extension header - 8 bytes 1285 * Tx glom mode only, N/A for Rx or normal Tx 1286 * Byte 0~1: Packet length excluding hw frame tag 1287 * Byte 2: Reserved 1288 * Byte 3: Frame flags, bit 0: last frame indication 1289 * Byte 4~5: Reserved 1290 * Byte 6~7: Tail padding length 1291 * software header - 8 bytes 1292 * Byte 0: Rx/Tx sequence number 1293 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag 1294 * Byte 2: Length of next data frame, reserved for Tx 1295 * Byte 3: Data offset 1296 * Byte 4: Flow control bits, reserved for Tx 1297 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet 1298 * Byte 6~7: Reserved 1299 */ 1300 #define SDPCM_HWHDR_LEN 4 1301 #define SDPCM_HWEXT_LEN 8 1302 #define SDPCM_SWHDR_LEN 8 1303 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN) 1304 /* software header */ 1305 #define SDPCM_SEQ_MASK 0x000000ff 1306 #define SDPCM_SEQ_WRAP 256 1307 #define SDPCM_CHANNEL_MASK 0x00000f00 1308 #define SDPCM_CHANNEL_SHIFT 8 1309 #define SDPCM_CONTROL_CHANNEL 0 /* Control */ 1310 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */ 1311 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */ 1312 #define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */ 1313 #define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */ 1314 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80) 1315 #define SDPCM_NEXTLEN_MASK 0x00ff0000 1316 #define SDPCM_NEXTLEN_SHIFT 16 1317 #define SDPCM_DOFFSET_MASK 0xff000000 1318 #define SDPCM_DOFFSET_SHIFT 24 1319 #define SDPCM_FCMASK_MASK 0x000000ff 1320 #define SDPCM_WINDOW_MASK 0x0000ff00 1321 #define SDPCM_WINDOW_SHIFT 8 1322 1323 static inline u8 brcmf_sdio_getdatoffset(u8 *swheader) 1324 { 1325 u32 hdrvalue; 1326 hdrvalue = *(u32 *)swheader; 1327 return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT); 1328 } 1329 1330 static inline bool brcmf_sdio_fromevntchan(u8 *swheader) 1331 { 1332 u32 hdrvalue; 1333 u8 ret; 1334 1335 hdrvalue = *(u32 *)swheader; 1336 ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT); 1337 1338 return (ret == SDPCM_EVENT_CHANNEL); 1339 } 1340 1341 static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header, 1342 struct brcmf_sdio_hdrinfo *rd, 1343 enum brcmf_sdio_frmtype type) 1344 { 1345 u16 len, checksum; 1346 u8 rx_seq, fc, tx_seq_max; 1347 u32 swheader; 1348 1349 trace_brcmf_sdpcm_hdr(SDPCM_RX, header); 1350 1351 /* hw header */ 1352 len = get_unaligned_le16(header); 1353 checksum = get_unaligned_le16(header + sizeof(u16)); 1354 /* All zero means no more to read */ 1355 if (!(len | checksum)) { 1356 bus->rxpending = false; 1357 return -ENODATA; 1358 } 1359 if ((u16)(~(len ^ checksum))) { 1360 brcmf_err("HW header checksum error\n"); 1361 bus->sdcnt.rx_badhdr++; 1362 brcmf_sdio_rxfail(bus, false, false); 1363 return -EIO; 1364 } 1365 if (len < SDPCM_HDRLEN) { 1366 brcmf_err("HW header length error\n"); 1367 return -EPROTO; 1368 } 1369 if (type == BRCMF_SDIO_FT_SUPER && 1370 (roundup(len, bus->blocksize) != rd->len)) { 1371 brcmf_err("HW superframe header length error\n"); 1372 return -EPROTO; 1373 } 1374 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) { 1375 brcmf_err("HW subframe header length error\n"); 1376 return -EPROTO; 1377 } 1378 rd->len = len; 1379 1380 /* software header */ 1381 header += SDPCM_HWHDR_LEN; 1382 swheader = le32_to_cpu(*(__le32 *)header); 1383 if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) { 1384 brcmf_err("Glom descriptor found in superframe head\n"); 1385 rd->len = 0; 1386 return -EINVAL; 1387 } 1388 rx_seq = (u8)(swheader & SDPCM_SEQ_MASK); 1389 rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT; 1390 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL && 1391 type != BRCMF_SDIO_FT_SUPER) { 1392 brcmf_err("HW header length too long\n"); 1393 bus->sdcnt.rx_toolong++; 1394 brcmf_sdio_rxfail(bus, false, false); 1395 rd->len = 0; 1396 return -EPROTO; 1397 } 1398 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) { 1399 brcmf_err("Wrong channel for superframe\n"); 1400 rd->len = 0; 1401 return -EINVAL; 1402 } 1403 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL && 1404 rd->channel != SDPCM_EVENT_CHANNEL) { 1405 brcmf_err("Wrong channel for subframe\n"); 1406 rd->len = 0; 1407 return -EINVAL; 1408 } 1409 rd->dat_offset = brcmf_sdio_getdatoffset(header); 1410 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) { 1411 brcmf_err("seq %d: bad data offset\n", rx_seq); 1412 bus->sdcnt.rx_badhdr++; 1413 brcmf_sdio_rxfail(bus, false, false); 1414 rd->len = 0; 1415 return -ENXIO; 1416 } 1417 if (rd->seq_num != rx_seq) { 1418 brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num); 1419 bus->sdcnt.rx_badseq++; 1420 rd->seq_num = rx_seq; 1421 } 1422 /* no need to check the reset for subframe */ 1423 if (type == BRCMF_SDIO_FT_SUB) 1424 return 0; 1425 rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT; 1426 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) { 1427 /* only warm for NON glom packet */ 1428 if (rd->channel != SDPCM_GLOM_CHANNEL) 1429 brcmf_err("seq %d: next length error\n", rx_seq); 1430 rd->len_nxtfrm = 0; 1431 } 1432 swheader = le32_to_cpu(*(__le32 *)(header + 4)); 1433 fc = swheader & SDPCM_FCMASK_MASK; 1434 if (bus->flowcontrol != fc) { 1435 if (~bus->flowcontrol & fc) 1436 bus->sdcnt.fc_xoff++; 1437 if (bus->flowcontrol & ~fc) 1438 bus->sdcnt.fc_xon++; 1439 bus->sdcnt.fc_rcvd++; 1440 bus->flowcontrol = fc; 1441 } 1442 tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT; 1443 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) { 1444 brcmf_err("seq %d: max tx seq number error\n", rx_seq); 1445 tx_seq_max = bus->tx_seq + 2; 1446 } 1447 bus->tx_max = tx_seq_max; 1448 1449 return 0; 1450 } 1451 1452 static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length) 1453 { 1454 *(__le16 *)header = cpu_to_le16(frm_length); 1455 *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length); 1456 } 1457 1458 static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header, 1459 struct brcmf_sdio_hdrinfo *hd_info) 1460 { 1461 u32 hdrval; 1462 u8 hdr_offset; 1463 1464 brcmf_sdio_update_hwhdr(header, hd_info->len); 1465 hdr_offset = SDPCM_HWHDR_LEN; 1466 1467 if (bus->txglom) { 1468 hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24); 1469 *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval); 1470 hdrval = (u16)hd_info->tail_pad << 16; 1471 *(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval); 1472 hdr_offset += SDPCM_HWEXT_LEN; 1473 } 1474 1475 hdrval = hd_info->seq_num; 1476 hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) & 1477 SDPCM_CHANNEL_MASK; 1478 hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) & 1479 SDPCM_DOFFSET_MASK; 1480 *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval); 1481 *(((__le32 *)(header + hdr_offset)) + 1) = 0; 1482 trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header); 1483 } 1484 1485 static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) 1486 { 1487 u16 dlen, totlen; 1488 u8 *dptr, num = 0; 1489 u16 sublen; 1490 struct sk_buff *pfirst, *pnext; 1491 1492 int errcode; 1493 u8 doff; 1494 1495 struct brcmf_sdio_hdrinfo rd_new; 1496 1497 /* If packets, issue read(s) and send up packet chain */ 1498 /* Return sequence numbers consumed? */ 1499 1500 brcmf_dbg(SDIO, "start: glomd %p glom %p\n", 1501 bus->glomd, skb_peek(&bus->glom)); 1502 1503 /* If there's a descriptor, generate the packet chain */ 1504 if (bus->glomd) { 1505 pfirst = pnext = NULL; 1506 dlen = (u16) (bus->glomd->len); 1507 dptr = bus->glomd->data; 1508 if (!dlen || (dlen & 1)) { 1509 brcmf_err("bad glomd len(%d), ignore descriptor\n", 1510 dlen); 1511 dlen = 0; 1512 } 1513 1514 for (totlen = num = 0; dlen; num++) { 1515 /* Get (and move past) next length */ 1516 sublen = get_unaligned_le16(dptr); 1517 dlen -= sizeof(u16); 1518 dptr += sizeof(u16); 1519 if ((sublen < SDPCM_HDRLEN) || 1520 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { 1521 brcmf_err("descriptor len %d bad: %d\n", 1522 num, sublen); 1523 pnext = NULL; 1524 break; 1525 } 1526 if (sublen % bus->sgentry_align) { 1527 brcmf_err("sublen %d not multiple of %d\n", 1528 sublen, bus->sgentry_align); 1529 } 1530 totlen += sublen; 1531 1532 /* For last frame, adjust read len so total 1533 is a block multiple */ 1534 if (!dlen) { 1535 sublen += 1536 (roundup(totlen, bus->blocksize) - totlen); 1537 totlen = roundup(totlen, bus->blocksize); 1538 } 1539 1540 /* Allocate/chain packet for next subframe */ 1541 pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align); 1542 if (pnext == NULL) { 1543 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n", 1544 num, sublen); 1545 break; 1546 } 1547 skb_queue_tail(&bus->glom, pnext); 1548 1549 /* Adhere to start alignment requirements */ 1550 pkt_align(pnext, sublen, bus->sgentry_align); 1551 } 1552 1553 /* If all allocations succeeded, save packet chain 1554 in bus structure */ 1555 if (pnext) { 1556 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n", 1557 totlen, num); 1558 if (BRCMF_GLOM_ON() && bus->cur_read.len && 1559 totlen != bus->cur_read.len) { 1560 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n", 1561 bus->cur_read.len, totlen, rxseq); 1562 } 1563 pfirst = pnext = NULL; 1564 } else { 1565 brcmf_sdio_free_glom(bus); 1566 num = 0; 1567 } 1568 1569 /* Done with descriptor packet */ 1570 brcmu_pkt_buf_free_skb(bus->glomd); 1571 bus->glomd = NULL; 1572 bus->cur_read.len = 0; 1573 } 1574 1575 /* Ok -- either we just generated a packet chain, 1576 or had one from before */ 1577 if (!skb_queue_empty(&bus->glom)) { 1578 if (BRCMF_GLOM_ON()) { 1579 brcmf_dbg(GLOM, "try superframe read, packet chain:\n"); 1580 skb_queue_walk(&bus->glom, pnext) { 1581 brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n", 1582 pnext, (u8 *) (pnext->data), 1583 pnext->len, pnext->len); 1584 } 1585 } 1586 1587 pfirst = skb_peek(&bus->glom); 1588 dlen = (u16) brcmf_sdio_glom_len(bus); 1589 1590 /* Do an SDIO read for the superframe. Configurable iovar to 1591 * read directly into the chained packet, or allocate a large 1592 * packet and and copy into the chain. 1593 */ 1594 sdio_claim_host(bus->sdiodev->func1); 1595 errcode = brcmf_sdiod_recv_chain(bus->sdiodev, 1596 &bus->glom, dlen); 1597 sdio_release_host(bus->sdiodev->func1); 1598 bus->sdcnt.f2rxdata++; 1599 1600 /* On failure, kill the superframe */ 1601 if (errcode < 0) { 1602 brcmf_err("glom read of %d bytes failed: %d\n", 1603 dlen, errcode); 1604 1605 sdio_claim_host(bus->sdiodev->func1); 1606 brcmf_sdio_rxfail(bus, true, false); 1607 bus->sdcnt.rxglomfail++; 1608 brcmf_sdio_free_glom(bus); 1609 sdio_release_host(bus->sdiodev->func1); 1610 return 0; 1611 } 1612 1613 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1614 pfirst->data, min_t(int, pfirst->len, 48), 1615 "SUPERFRAME:\n"); 1616 1617 rd_new.seq_num = rxseq; 1618 rd_new.len = dlen; 1619 sdio_claim_host(bus->sdiodev->func1); 1620 errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new, 1621 BRCMF_SDIO_FT_SUPER); 1622 sdio_release_host(bus->sdiodev->func1); 1623 bus->cur_read.len = rd_new.len_nxtfrm << 4; 1624 1625 /* Remove superframe header, remember offset */ 1626 skb_pull(pfirst, rd_new.dat_offset); 1627 num = 0; 1628 1629 /* Validate all the subframe headers */ 1630 skb_queue_walk(&bus->glom, pnext) { 1631 /* leave when invalid subframe is found */ 1632 if (errcode) 1633 break; 1634 1635 rd_new.len = pnext->len; 1636 rd_new.seq_num = rxseq++; 1637 sdio_claim_host(bus->sdiodev->func1); 1638 errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new, 1639 BRCMF_SDIO_FT_SUB); 1640 sdio_release_host(bus->sdiodev->func1); 1641 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1642 pnext->data, 32, "subframe:\n"); 1643 1644 num++; 1645 } 1646 1647 if (errcode) { 1648 /* Terminate frame on error */ 1649 sdio_claim_host(bus->sdiodev->func1); 1650 brcmf_sdio_rxfail(bus, true, false); 1651 bus->sdcnt.rxglomfail++; 1652 brcmf_sdio_free_glom(bus); 1653 sdio_release_host(bus->sdiodev->func1); 1654 bus->cur_read.len = 0; 1655 return 0; 1656 } 1657 1658 /* Basic SD framing looks ok - process each packet (header) */ 1659 1660 skb_queue_walk_safe(&bus->glom, pfirst, pnext) { 1661 dptr = (u8 *) (pfirst->data); 1662 sublen = get_unaligned_le16(dptr); 1663 doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]); 1664 1665 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), 1666 dptr, pfirst->len, 1667 "Rx Subframe Data:\n"); 1668 1669 __skb_trim(pfirst, sublen); 1670 skb_pull(pfirst, doff); 1671 1672 if (pfirst->len == 0) { 1673 skb_unlink(pfirst, &bus->glom); 1674 brcmu_pkt_buf_free_skb(pfirst); 1675 continue; 1676 } 1677 1678 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1679 pfirst->data, 1680 min_t(int, pfirst->len, 32), 1681 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n", 1682 bus->glom.qlen, pfirst, pfirst->data, 1683 pfirst->len, pfirst->next, 1684 pfirst->prev); 1685 skb_unlink(pfirst, &bus->glom); 1686 if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN])) 1687 brcmf_rx_event(bus->sdiodev->dev, pfirst); 1688 else 1689 brcmf_rx_frame(bus->sdiodev->dev, pfirst, 1690 false); 1691 bus->sdcnt.rxglompkts++; 1692 } 1693 1694 bus->sdcnt.rxglomframes++; 1695 } 1696 return num; 1697 } 1698 1699 static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition, 1700 bool *pending) 1701 { 1702 DECLARE_WAITQUEUE(wait, current); 1703 int timeout = DCMD_RESP_TIMEOUT; 1704 1705 /* Wait until control frame is available */ 1706 add_wait_queue(&bus->dcmd_resp_wait, &wait); 1707 set_current_state(TASK_INTERRUPTIBLE); 1708 1709 while (!(*condition) && (!signal_pending(current) && timeout)) 1710 timeout = schedule_timeout(timeout); 1711 1712 if (signal_pending(current)) 1713 *pending = true; 1714 1715 set_current_state(TASK_RUNNING); 1716 remove_wait_queue(&bus->dcmd_resp_wait, &wait); 1717 1718 return timeout; 1719 } 1720 1721 static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus) 1722 { 1723 wake_up_interruptible(&bus->dcmd_resp_wait); 1724 1725 return 0; 1726 } 1727 static void 1728 brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) 1729 { 1730 uint rdlen, pad; 1731 u8 *buf = NULL, *rbuf; 1732 int sdret; 1733 1734 brcmf_dbg(SDIO, "Enter\n"); 1735 if (bus->rxblen) 1736 buf = vzalloc(bus->rxblen); 1737 if (!buf) 1738 goto done; 1739 1740 rbuf = bus->rxbuf; 1741 pad = ((unsigned long)rbuf % bus->head_align); 1742 if (pad) 1743 rbuf += (bus->head_align - pad); 1744 1745 /* Copy the already-read portion over */ 1746 memcpy(buf, hdr, BRCMF_FIRSTREAD); 1747 if (len <= BRCMF_FIRSTREAD) 1748 goto gotpkt; 1749 1750 /* Raise rdlen to next SDIO block to avoid tail command */ 1751 rdlen = len - BRCMF_FIRSTREAD; 1752 if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { 1753 pad = bus->blocksize - (rdlen % bus->blocksize); 1754 if ((pad <= bus->roundup) && (pad < bus->blocksize) && 1755 ((len + pad) < bus->sdiodev->bus_if->maxctl)) 1756 rdlen += pad; 1757 } else if (rdlen % bus->head_align) { 1758 rdlen += bus->head_align - (rdlen % bus->head_align); 1759 } 1760 1761 /* Drop if the read is too big or it exceeds our maximum */ 1762 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) { 1763 brcmf_err("%d-byte control read exceeds %d-byte buffer\n", 1764 rdlen, bus->sdiodev->bus_if->maxctl); 1765 brcmf_sdio_rxfail(bus, false, false); 1766 goto done; 1767 } 1768 1769 if ((len - doff) > bus->sdiodev->bus_if->maxctl) { 1770 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", 1771 len, len - doff, bus->sdiodev->bus_if->maxctl); 1772 bus->sdcnt.rx_toolong++; 1773 brcmf_sdio_rxfail(bus, false, false); 1774 goto done; 1775 } 1776 1777 /* Read remain of frame body */ 1778 sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen); 1779 bus->sdcnt.f2rxdata++; 1780 1781 /* Control frame failures need retransmission */ 1782 if (sdret < 0) { 1783 brcmf_err("read %d control bytes failed: %d\n", 1784 rdlen, sdret); 1785 bus->sdcnt.rxc_errors++; 1786 brcmf_sdio_rxfail(bus, true, true); 1787 goto done; 1788 } else 1789 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen); 1790 1791 gotpkt: 1792 1793 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(), 1794 buf, len, "RxCtrl:\n"); 1795 1796 /* Point to valid data and indicate its length */ 1797 spin_lock_bh(&bus->rxctl_lock); 1798 if (bus->rxctl) { 1799 brcmf_err("last control frame is being processed.\n"); 1800 spin_unlock_bh(&bus->rxctl_lock); 1801 vfree(buf); 1802 goto done; 1803 } 1804 bus->rxctl = buf + doff; 1805 bus->rxctl_orig = buf; 1806 bus->rxlen = len - doff; 1807 spin_unlock_bh(&bus->rxctl_lock); 1808 1809 done: 1810 /* Awake any waiters */ 1811 brcmf_sdio_dcmd_resp_wake(bus); 1812 } 1813 1814 /* Pad read to blocksize for efficiency */ 1815 static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen) 1816 { 1817 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) { 1818 *pad = bus->blocksize - (*rdlen % bus->blocksize); 1819 if (*pad <= bus->roundup && *pad < bus->blocksize && 1820 *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ) 1821 *rdlen += *pad; 1822 } else if (*rdlen % bus->head_align) { 1823 *rdlen += bus->head_align - (*rdlen % bus->head_align); 1824 } 1825 } 1826 1827 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) 1828 { 1829 struct sk_buff *pkt; /* Packet for event or data frames */ 1830 u16 pad; /* Number of pad bytes to read */ 1831 uint rxleft = 0; /* Remaining number of frames allowed */ 1832 int ret; /* Return code from calls */ 1833 uint rxcount = 0; /* Total frames read */ 1834 struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new; 1835 u8 head_read = 0; 1836 1837 brcmf_dbg(SDIO, "Enter\n"); 1838 1839 /* Not finished unless we encounter no more frames indication */ 1840 bus->rxpending = true; 1841 1842 for (rd->seq_num = bus->rx_seq, rxleft = maxframes; 1843 !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA; 1844 rd->seq_num++, rxleft--) { 1845 1846 /* Handle glomming separately */ 1847 if (bus->glomd || !skb_queue_empty(&bus->glom)) { 1848 u8 cnt; 1849 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n", 1850 bus->glomd, skb_peek(&bus->glom)); 1851 cnt = brcmf_sdio_rxglom(bus, rd->seq_num); 1852 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt); 1853 rd->seq_num += cnt - 1; 1854 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; 1855 continue; 1856 } 1857 1858 rd->len_left = rd->len; 1859 /* read header first for unknow frame length */ 1860 sdio_claim_host(bus->sdiodev->func1); 1861 if (!rd->len) { 1862 ret = brcmf_sdiod_recv_buf(bus->sdiodev, 1863 bus->rxhdr, BRCMF_FIRSTREAD); 1864 bus->sdcnt.f2rxhdrs++; 1865 if (ret < 0) { 1866 brcmf_err("RXHEADER FAILED: %d\n", 1867 ret); 1868 bus->sdcnt.rx_hdrfail++; 1869 brcmf_sdio_rxfail(bus, true, true); 1870 sdio_release_host(bus->sdiodev->func1); 1871 continue; 1872 } 1873 1874 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(), 1875 bus->rxhdr, SDPCM_HDRLEN, 1876 "RxHdr:\n"); 1877 1878 if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd, 1879 BRCMF_SDIO_FT_NORMAL)) { 1880 sdio_release_host(bus->sdiodev->func1); 1881 if (!bus->rxpending) 1882 break; 1883 else 1884 continue; 1885 } 1886 1887 if (rd->channel == SDPCM_CONTROL_CHANNEL) { 1888 brcmf_sdio_read_control(bus, bus->rxhdr, 1889 rd->len, 1890 rd->dat_offset); 1891 /* prepare the descriptor for the next read */ 1892 rd->len = rd->len_nxtfrm << 4; 1893 rd->len_nxtfrm = 0; 1894 /* treat all packet as event if we don't know */ 1895 rd->channel = SDPCM_EVENT_CHANNEL; 1896 sdio_release_host(bus->sdiodev->func1); 1897 continue; 1898 } 1899 rd->len_left = rd->len > BRCMF_FIRSTREAD ? 1900 rd->len - BRCMF_FIRSTREAD : 0; 1901 head_read = BRCMF_FIRSTREAD; 1902 } 1903 1904 brcmf_sdio_pad(bus, &pad, &rd->len_left); 1905 1906 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read + 1907 bus->head_align); 1908 if (!pkt) { 1909 /* Give up on data, request rtx of events */ 1910 brcmf_err("brcmu_pkt_buf_get_skb failed\n"); 1911 brcmf_sdio_rxfail(bus, false, 1912 RETRYCHAN(rd->channel)); 1913 sdio_release_host(bus->sdiodev->func1); 1914 continue; 1915 } 1916 skb_pull(pkt, head_read); 1917 pkt_align(pkt, rd->len_left, bus->head_align); 1918 1919 ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt); 1920 bus->sdcnt.f2rxdata++; 1921 sdio_release_host(bus->sdiodev->func1); 1922 1923 if (ret < 0) { 1924 brcmf_err("read %d bytes from channel %d failed: %d\n", 1925 rd->len, rd->channel, ret); 1926 brcmu_pkt_buf_free_skb(pkt); 1927 sdio_claim_host(bus->sdiodev->func1); 1928 brcmf_sdio_rxfail(bus, true, 1929 RETRYCHAN(rd->channel)); 1930 sdio_release_host(bus->sdiodev->func1); 1931 continue; 1932 } 1933 1934 if (head_read) { 1935 skb_push(pkt, head_read); 1936 memcpy(pkt->data, bus->rxhdr, head_read); 1937 head_read = 0; 1938 } else { 1939 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN); 1940 rd_new.seq_num = rd->seq_num; 1941 sdio_claim_host(bus->sdiodev->func1); 1942 if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new, 1943 BRCMF_SDIO_FT_NORMAL)) { 1944 rd->len = 0; 1945 brcmf_sdio_rxfail(bus, true, true); 1946 sdio_release_host(bus->sdiodev->func1); 1947 brcmu_pkt_buf_free_skb(pkt); 1948 continue; 1949 } 1950 bus->sdcnt.rx_readahead_cnt++; 1951 if (rd->len != roundup(rd_new.len, 16)) { 1952 brcmf_err("frame length mismatch:read %d, should be %d\n", 1953 rd->len, 1954 roundup(rd_new.len, 16) >> 4); 1955 rd->len = 0; 1956 brcmf_sdio_rxfail(bus, true, true); 1957 sdio_release_host(bus->sdiodev->func1); 1958 brcmu_pkt_buf_free_skb(pkt); 1959 continue; 1960 } 1961 sdio_release_host(bus->sdiodev->func1); 1962 rd->len_nxtfrm = rd_new.len_nxtfrm; 1963 rd->channel = rd_new.channel; 1964 rd->dat_offset = rd_new.dat_offset; 1965 1966 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && 1967 BRCMF_DATA_ON()) && 1968 BRCMF_HDRS_ON(), 1969 bus->rxhdr, SDPCM_HDRLEN, 1970 "RxHdr:\n"); 1971 1972 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) { 1973 brcmf_err("readahead on control packet %d?\n", 1974 rd_new.seq_num); 1975 /* Force retry w/normal header read */ 1976 rd->len = 0; 1977 sdio_claim_host(bus->sdiodev->func1); 1978 brcmf_sdio_rxfail(bus, false, true); 1979 sdio_release_host(bus->sdiodev->func1); 1980 brcmu_pkt_buf_free_skb(pkt); 1981 continue; 1982 } 1983 } 1984 1985 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), 1986 pkt->data, rd->len, "Rx Data:\n"); 1987 1988 /* Save superframe descriptor and allocate packet frame */ 1989 if (rd->channel == SDPCM_GLOM_CHANNEL) { 1990 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) { 1991 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n", 1992 rd->len); 1993 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1994 pkt->data, rd->len, 1995 "Glom Data:\n"); 1996 __skb_trim(pkt, rd->len); 1997 skb_pull(pkt, SDPCM_HDRLEN); 1998 bus->glomd = pkt; 1999 } else { 2000 brcmf_err("%s: glom superframe w/o " 2001 "descriptor!\n", __func__); 2002 sdio_claim_host(bus->sdiodev->func1); 2003 brcmf_sdio_rxfail(bus, false, false); 2004 sdio_release_host(bus->sdiodev->func1); 2005 } 2006 /* prepare the descriptor for the next read */ 2007 rd->len = rd->len_nxtfrm << 4; 2008 rd->len_nxtfrm = 0; 2009 /* treat all packet as event if we don't know */ 2010 rd->channel = SDPCM_EVENT_CHANNEL; 2011 continue; 2012 } 2013 2014 /* Fill in packet len and prio, deliver upward */ 2015 __skb_trim(pkt, rd->len); 2016 skb_pull(pkt, rd->dat_offset); 2017 2018 if (pkt->len == 0) 2019 brcmu_pkt_buf_free_skb(pkt); 2020 else if (rd->channel == SDPCM_EVENT_CHANNEL) 2021 brcmf_rx_event(bus->sdiodev->dev, pkt); 2022 else 2023 brcmf_rx_frame(bus->sdiodev->dev, pkt, 2024 false); 2025 2026 /* prepare the descriptor for the next read */ 2027 rd->len = rd->len_nxtfrm << 4; 2028 rd->len_nxtfrm = 0; 2029 /* treat all packet as event if we don't know */ 2030 rd->channel = SDPCM_EVENT_CHANNEL; 2031 } 2032 2033 rxcount = maxframes - rxleft; 2034 /* Message if we hit the limit */ 2035 if (!rxleft) 2036 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes); 2037 else 2038 brcmf_dbg(DATA, "processed %d frames\n", rxcount); 2039 /* Back off rxseq if awaiting rtx, update rx_seq */ 2040 if (bus->rxskip) 2041 rd->seq_num--; 2042 bus->rx_seq = rd->seq_num; 2043 2044 return rxcount; 2045 } 2046 2047 static void 2048 brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus) 2049 { 2050 wake_up_interruptible(&bus->ctrl_wait); 2051 return; 2052 } 2053 2054 static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt) 2055 { 2056 struct brcmf_bus_stats *stats; 2057 u16 head_pad; 2058 u8 *dat_buf; 2059 2060 dat_buf = (u8 *)(pkt->data); 2061 2062 /* Check head padding */ 2063 head_pad = ((unsigned long)dat_buf % bus->head_align); 2064 if (head_pad) { 2065 if (skb_headroom(pkt) < head_pad) { 2066 stats = &bus->sdiodev->bus_if->stats; 2067 atomic_inc(&stats->pktcowed); 2068 if (skb_cow_head(pkt, head_pad)) { 2069 atomic_inc(&stats->pktcow_failed); 2070 return -ENOMEM; 2071 } 2072 head_pad = 0; 2073 } 2074 skb_push(pkt, head_pad); 2075 dat_buf = (u8 *)(pkt->data); 2076 } 2077 memset(dat_buf, 0, head_pad + bus->tx_hdrlen); 2078 return head_pad; 2079 } 2080 2081 /* 2082 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for 2083 * bus layer usage. 2084 */ 2085 /* flag marking a dummy skb added for DMA alignment requirement */ 2086 #define ALIGN_SKB_FLAG 0x8000 2087 /* bit mask of data length chopped from the previous packet */ 2088 #define ALIGN_SKB_CHOP_LEN_MASK 0x7fff 2089 2090 static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, 2091 struct sk_buff_head *pktq, 2092 struct sk_buff *pkt, u16 total_len) 2093 { 2094 struct brcmf_sdio_dev *sdiodev; 2095 struct sk_buff *pkt_pad; 2096 u16 tail_pad, tail_chop, chain_pad; 2097 unsigned int blksize; 2098 bool lastfrm; 2099 int ntail, ret; 2100 2101 sdiodev = bus->sdiodev; 2102 blksize = sdiodev->func2->cur_blksize; 2103 /* sg entry alignment should be a divisor of block size */ 2104 WARN_ON(blksize % bus->sgentry_align); 2105 2106 /* Check tail padding */ 2107 lastfrm = skb_queue_is_last(pktq, pkt); 2108 tail_pad = 0; 2109 tail_chop = pkt->len % bus->sgentry_align; 2110 if (tail_chop) 2111 tail_pad = bus->sgentry_align - tail_chop; 2112 chain_pad = (total_len + tail_pad) % blksize; 2113 if (lastfrm && chain_pad) 2114 tail_pad += blksize - chain_pad; 2115 if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { 2116 pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop + 2117 bus->head_align); 2118 if (pkt_pad == NULL) 2119 return -ENOMEM; 2120 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); 2121 if (unlikely(ret < 0)) { 2122 kfree_skb(pkt_pad); 2123 return ret; 2124 } 2125 memcpy(pkt_pad->data, 2126 pkt->data + pkt->len - tail_chop, 2127 tail_chop); 2128 *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; 2129 skb_trim(pkt, pkt->len - tail_chop); 2130 skb_trim(pkt_pad, tail_pad + tail_chop); 2131 __skb_queue_after(pktq, pkt, pkt_pad); 2132 } else { 2133 ntail = pkt->data_len + tail_pad - 2134 (pkt->end - pkt->tail); 2135 if (skb_cloned(pkt) || ntail > 0) 2136 if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC)) 2137 return -ENOMEM; 2138 if (skb_linearize(pkt)) 2139 return -ENOMEM; 2140 __skb_put(pkt, tail_pad); 2141 } 2142 2143 return tail_pad; 2144 } 2145 2146 /** 2147 * brcmf_sdio_txpkt_prep - packet preparation for transmit 2148 * @bus: brcmf_sdio structure pointer 2149 * @pktq: packet list pointer 2150 * @chan: virtual channel to transmit the packet 2151 * 2152 * Processes to be applied to the packet 2153 * - Align data buffer pointer 2154 * - Align data buffer length 2155 * - Prepare header 2156 * Return: negative value if there is error 2157 */ 2158 static int 2159 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, 2160 uint chan) 2161 { 2162 u16 head_pad, total_len; 2163 struct sk_buff *pkt_next; 2164 u8 txseq; 2165 int ret; 2166 struct brcmf_sdio_hdrinfo hd_info = {0}; 2167 2168 txseq = bus->tx_seq; 2169 total_len = 0; 2170 skb_queue_walk(pktq, pkt_next) { 2171 /* alignment packet inserted in previous 2172 * loop cycle can be skipped as it is 2173 * already properly aligned and does not 2174 * need an sdpcm header. 2175 */ 2176 if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG) 2177 continue; 2178 2179 /* align packet data pointer */ 2180 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next); 2181 if (ret < 0) 2182 return ret; 2183 head_pad = (u16)ret; 2184 if (head_pad) 2185 memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad); 2186 2187 total_len += pkt_next->len; 2188 2189 hd_info.len = pkt_next->len; 2190 hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next); 2191 if (bus->txglom && pktq->qlen > 1) { 2192 ret = brcmf_sdio_txpkt_prep_sg(bus, pktq, 2193 pkt_next, total_len); 2194 if (ret < 0) 2195 return ret; 2196 hd_info.tail_pad = (u16)ret; 2197 total_len += (u16)ret; 2198 } 2199 2200 hd_info.channel = chan; 2201 hd_info.dat_offset = head_pad + bus->tx_hdrlen; 2202 hd_info.seq_num = txseq++; 2203 2204 /* Now fill the header */ 2205 brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info); 2206 2207 if (BRCMF_BYTES_ON() && 2208 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) || 2209 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL))) 2210 brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len, 2211 "Tx Frame:\n"); 2212 else if (BRCMF_HDRS_ON()) 2213 brcmf_dbg_hex_dump(true, pkt_next->data, 2214 head_pad + bus->tx_hdrlen, 2215 "Tx Header:\n"); 2216 } 2217 /* Hardware length tag of the first packet should be total 2218 * length of the chain (including padding) 2219 */ 2220 if (bus->txglom) 2221 brcmf_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len); 2222 return 0; 2223 } 2224 2225 /** 2226 * brcmf_sdio_txpkt_postp - packet post processing for transmit 2227 * @bus: brcmf_sdio structure pointer 2228 * @pktq: packet list pointer 2229 * 2230 * Processes to be applied to the packet 2231 * - Remove head padding 2232 * - Remove tail padding 2233 */ 2234 static void 2235 brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq) 2236 { 2237 u8 *hdr; 2238 u32 dat_offset; 2239 u16 tail_pad; 2240 u16 dummy_flags, chop_len; 2241 struct sk_buff *pkt_next, *tmp, *pkt_prev; 2242 2243 skb_queue_walk_safe(pktq, pkt_next, tmp) { 2244 dummy_flags = *(u16 *)(pkt_next->cb); 2245 if (dummy_flags & ALIGN_SKB_FLAG) { 2246 chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK; 2247 if (chop_len) { 2248 pkt_prev = pkt_next->prev; 2249 skb_put(pkt_prev, chop_len); 2250 } 2251 __skb_unlink(pkt_next, pktq); 2252 brcmu_pkt_buf_free_skb(pkt_next); 2253 } else { 2254 hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN; 2255 dat_offset = le32_to_cpu(*(__le32 *)hdr); 2256 dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >> 2257 SDPCM_DOFFSET_SHIFT; 2258 skb_pull(pkt_next, dat_offset); 2259 if (bus->txglom) { 2260 tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2)); 2261 skb_trim(pkt_next, pkt_next->len - tail_pad); 2262 } 2263 } 2264 } 2265 } 2266 2267 /* Writes a HW/SW header into the packet and sends it. */ 2268 /* Assumes: (a) header space already there, (b) caller holds lock */ 2269 static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq, 2270 uint chan) 2271 { 2272 int ret; 2273 struct sk_buff *pkt_next, *tmp; 2274 2275 brcmf_dbg(TRACE, "Enter\n"); 2276 2277 ret = brcmf_sdio_txpkt_prep(bus, pktq, chan); 2278 if (ret) 2279 goto done; 2280 2281 sdio_claim_host(bus->sdiodev->func1); 2282 ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq); 2283 bus->sdcnt.f2txdata++; 2284 2285 if (ret < 0) 2286 brcmf_sdio_txfail(bus); 2287 2288 sdio_release_host(bus->sdiodev->func1); 2289 2290 done: 2291 brcmf_sdio_txpkt_postp(bus, pktq); 2292 if (ret == 0) 2293 bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP; 2294 skb_queue_walk_safe(pktq, pkt_next, tmp) { 2295 __skb_unlink(pkt_next, pktq); 2296 brcmf_proto_bcdc_txcomplete(bus->sdiodev->dev, pkt_next, 2297 ret == 0); 2298 } 2299 return ret; 2300 } 2301 2302 static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes) 2303 { 2304 struct sk_buff *pkt; 2305 struct sk_buff_head pktq; 2306 u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus); 2307 u32 intstatus = 0; 2308 int ret = 0, prec_out, i; 2309 uint cnt = 0; 2310 u8 tx_prec_map, pkt_num; 2311 2312 brcmf_dbg(TRACE, "Enter\n"); 2313 2314 tx_prec_map = ~bus->flowcontrol; 2315 2316 /* Send frames until the limit or some other event */ 2317 for (cnt = 0; (cnt < maxframes) && data_ok(bus);) { 2318 pkt_num = 1; 2319 if (bus->txglom) 2320 pkt_num = min_t(u8, bus->tx_max - bus->tx_seq, 2321 bus->sdiodev->txglomsz); 2322 pkt_num = min_t(u32, pkt_num, 2323 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)); 2324 __skb_queue_head_init(&pktq); 2325 spin_lock_bh(&bus->txq_lock); 2326 for (i = 0; i < pkt_num; i++) { 2327 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, 2328 &prec_out); 2329 if (pkt == NULL) 2330 break; 2331 __skb_queue_tail(&pktq, pkt); 2332 } 2333 spin_unlock_bh(&bus->txq_lock); 2334 if (i == 0) 2335 break; 2336 2337 ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL); 2338 2339 cnt += i; 2340 2341 /* In poll mode, need to check for other events */ 2342 if (!bus->intr) { 2343 /* Check device status, signal pending interrupt */ 2344 sdio_claim_host(bus->sdiodev->func1); 2345 intstatus = brcmf_sdiod_readl(bus->sdiodev, 2346 intstat_addr, &ret); 2347 sdio_release_host(bus->sdiodev->func1); 2348 2349 bus->sdcnt.f2txdata++; 2350 if (ret != 0) 2351 break; 2352 if (intstatus & bus->hostintmask) 2353 atomic_set(&bus->ipend, 1); 2354 } 2355 } 2356 2357 /* Deflow-control stack if needed */ 2358 if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) && 2359 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { 2360 bus->txoff = false; 2361 brcmf_proto_bcdc_txflowblock(bus->sdiodev->dev, false); 2362 } 2363 2364 return cnt; 2365 } 2366 2367 static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len) 2368 { 2369 u8 doff; 2370 u16 pad; 2371 uint retries = 0; 2372 struct brcmf_sdio_hdrinfo hd_info = {0}; 2373 int ret; 2374 2375 brcmf_dbg(SDIO, "Enter\n"); 2376 2377 /* Back the pointer to make room for bus header */ 2378 frame -= bus->tx_hdrlen; 2379 len += bus->tx_hdrlen; 2380 2381 /* Add alignment padding (optional for ctl frames) */ 2382 doff = ((unsigned long)frame % bus->head_align); 2383 if (doff) { 2384 frame -= doff; 2385 len += doff; 2386 memset(frame + bus->tx_hdrlen, 0, doff); 2387 } 2388 2389 /* Round send length to next SDIO block */ 2390 pad = 0; 2391 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { 2392 pad = bus->blocksize - (len % bus->blocksize); 2393 if ((pad > bus->roundup) || (pad >= bus->blocksize)) 2394 pad = 0; 2395 } else if (len % bus->head_align) { 2396 pad = bus->head_align - (len % bus->head_align); 2397 } 2398 len += pad; 2399 2400 hd_info.len = len - pad; 2401 hd_info.channel = SDPCM_CONTROL_CHANNEL; 2402 hd_info.dat_offset = doff + bus->tx_hdrlen; 2403 hd_info.seq_num = bus->tx_seq; 2404 hd_info.lastfrm = true; 2405 hd_info.tail_pad = pad; 2406 brcmf_sdio_hdpack(bus, frame, &hd_info); 2407 2408 if (bus->txglom) 2409 brcmf_sdio_update_hwhdr(frame, len); 2410 2411 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(), 2412 frame, len, "Tx Frame:\n"); 2413 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) && 2414 BRCMF_HDRS_ON(), 2415 frame, min_t(u16, len, 16), "TxHdr:\n"); 2416 2417 do { 2418 ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len); 2419 2420 if (ret < 0) 2421 brcmf_sdio_txfail(bus); 2422 else 2423 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP; 2424 } while (ret < 0 && retries++ < TXRETRIES); 2425 2426 return ret; 2427 } 2428 2429 static bool brcmf_chip_is_ulp(struct brcmf_chip *ci) 2430 { 2431 if (ci->chip == CY_CC_43012_CHIP_ID) 2432 return true; 2433 else 2434 return false; 2435 } 2436 2437 static void brcmf_sdio_bus_stop(struct device *dev) 2438 { 2439 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2440 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2441 struct brcmf_sdio *bus = sdiodev->bus; 2442 struct brcmf_core *core = bus->sdio_core; 2443 u32 local_hostintmask; 2444 u8 saveclk, bpreq; 2445 int err; 2446 2447 brcmf_dbg(TRACE, "Enter\n"); 2448 2449 if (bus->watchdog_tsk) { 2450 send_sig(SIGTERM, bus->watchdog_tsk, 1); 2451 kthread_stop(bus->watchdog_tsk); 2452 bus->watchdog_tsk = NULL; 2453 } 2454 2455 if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { 2456 sdio_claim_host(sdiodev->func1); 2457 2458 /* Enable clock for device interrupts */ 2459 brcmf_sdio_bus_sleep(bus, false, false); 2460 2461 /* Disable and clear interrupts at the chip level also */ 2462 brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask), 2463 0, NULL); 2464 2465 local_hostintmask = bus->hostintmask; 2466 bus->hostintmask = 0; 2467 2468 /* Force backplane clocks to assure F2 interrupt propagates */ 2469 saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 2470 &err); 2471 if (!err) { 2472 bpreq = saveclk; 2473 bpreq |= brcmf_chip_is_ulp(bus->ci) ? 2474 SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT; 2475 brcmf_sdiod_writeb(sdiodev, 2476 SBSDIO_FUNC1_CHIPCLKCSR, 2477 bpreq, &err); 2478 } 2479 if (err) 2480 brcmf_err("Failed to force clock for F2: err %d\n", 2481 err); 2482 2483 /* Turn off the bus (F2), free any pending packets */ 2484 brcmf_dbg(INTR, "disable SDIO interrupts\n"); 2485 sdio_disable_func(sdiodev->func2); 2486 2487 /* Clear any pending interrupts now that F2 is disabled */ 2488 brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus), 2489 local_hostintmask, NULL); 2490 2491 sdio_release_host(sdiodev->func1); 2492 } 2493 /* Clear the data packet queues */ 2494 brcmu_pktq_flush(&bus->txq, true, NULL, NULL); 2495 2496 /* Clear any held glomming stuff */ 2497 brcmu_pkt_buf_free_skb(bus->glomd); 2498 brcmf_sdio_free_glom(bus); 2499 2500 /* Clear rx control and wake any waiters */ 2501 spin_lock_bh(&bus->rxctl_lock); 2502 bus->rxlen = 0; 2503 spin_unlock_bh(&bus->rxctl_lock); 2504 brcmf_sdio_dcmd_resp_wake(bus); 2505 2506 /* Reset some F2 state stuff */ 2507 bus->rxskip = false; 2508 bus->tx_seq = bus->rx_seq = 0; 2509 } 2510 2511 static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus) 2512 { 2513 struct brcmf_sdio_dev *sdiodev; 2514 unsigned long flags; 2515 2516 sdiodev = bus->sdiodev; 2517 if (sdiodev->oob_irq_requested) { 2518 spin_lock_irqsave(&sdiodev->irq_en_lock, flags); 2519 if (!sdiodev->irq_en && !atomic_read(&bus->ipend)) { 2520 enable_irq(sdiodev->settings->bus.sdio.oob_irq_nr); 2521 sdiodev->irq_en = true; 2522 } 2523 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); 2524 } 2525 } 2526 2527 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) 2528 { 2529 struct brcmf_core *core = bus->sdio_core; 2530 u32 addr; 2531 unsigned long val; 2532 int ret; 2533 2534 addr = core->base + SD_REG(intstatus); 2535 2536 val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret); 2537 bus->sdcnt.f1regdata++; 2538 if (ret != 0) 2539 return ret; 2540 2541 val &= bus->hostintmask; 2542 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE)); 2543 2544 /* Clear interrupts */ 2545 if (val) { 2546 brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret); 2547 bus->sdcnt.f1regdata++; 2548 atomic_or(val, &bus->intstatus); 2549 } 2550 2551 return ret; 2552 } 2553 2554 static void brcmf_sdio_dpc(struct brcmf_sdio *bus) 2555 { 2556 struct brcmf_sdio_dev *sdiod = bus->sdiodev; 2557 u32 newstatus = 0; 2558 u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus); 2559 unsigned long intstatus; 2560 uint txlimit = bus->txbound; /* Tx frames to send before resched */ 2561 uint framecnt; /* Temporary counter of tx/rx frames */ 2562 int err = 0; 2563 2564 brcmf_dbg(SDIO, "Enter\n"); 2565 2566 sdio_claim_host(bus->sdiodev->func1); 2567 2568 /* If waiting for HTAVAIL, check status */ 2569 if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) { 2570 u8 clkctl, devctl = 0; 2571 2572 #ifdef DEBUG 2573 /* Check for inconsistent device control */ 2574 devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, 2575 &err); 2576 #endif /* DEBUG */ 2577 2578 /* Read CSR, if clock on switch to AVAIL, else ignore */ 2579 clkctl = brcmf_sdiod_readb(bus->sdiodev, 2580 SBSDIO_FUNC1_CHIPCLKCSR, &err); 2581 2582 brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", 2583 devctl, clkctl); 2584 2585 if (SBSDIO_HTAV(clkctl)) { 2586 devctl = brcmf_sdiod_readb(bus->sdiodev, 2587 SBSDIO_DEVICE_CTL, &err); 2588 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; 2589 brcmf_sdiod_writeb(bus->sdiodev, 2590 SBSDIO_DEVICE_CTL, devctl, &err); 2591 bus->clkstate = CLK_AVAIL; 2592 } 2593 } 2594 2595 /* Make sure backplane clock is on */ 2596 brcmf_sdio_bus_sleep(bus, false, true); 2597 2598 /* Pending interrupt indicates new device status */ 2599 if (atomic_read(&bus->ipend) > 0) { 2600 atomic_set(&bus->ipend, 0); 2601 err = brcmf_sdio_intr_rstatus(bus); 2602 } 2603 2604 /* Start with leftover status bits */ 2605 intstatus = atomic_xchg(&bus->intstatus, 0); 2606 2607 /* Handle flow-control change: read new state in case our ack 2608 * crossed another change interrupt. If change still set, assume 2609 * FC ON for safety, let next loop through do the debounce. 2610 */ 2611 if (intstatus & I_HMB_FC_CHANGE) { 2612 intstatus &= ~I_HMB_FC_CHANGE; 2613 brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err); 2614 2615 newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err); 2616 2617 bus->sdcnt.f1regdata += 2; 2618 atomic_set(&bus->fcstate, 2619 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE))); 2620 intstatus |= (newstatus & bus->hostintmask); 2621 } 2622 2623 /* Handle host mailbox indication */ 2624 if (intstatus & I_HMB_HOST_INT) { 2625 intstatus &= ~I_HMB_HOST_INT; 2626 intstatus |= brcmf_sdio_hostmail(bus); 2627 } 2628 2629 sdio_release_host(bus->sdiodev->func1); 2630 2631 /* Generally don't ask for these, can get CRC errors... */ 2632 if (intstatus & I_WR_OOSYNC) { 2633 brcmf_err("Dongle reports WR_OOSYNC\n"); 2634 intstatus &= ~I_WR_OOSYNC; 2635 } 2636 2637 if (intstatus & I_RD_OOSYNC) { 2638 brcmf_err("Dongle reports RD_OOSYNC\n"); 2639 intstatus &= ~I_RD_OOSYNC; 2640 } 2641 2642 if (intstatus & I_SBINT) { 2643 brcmf_err("Dongle reports SBINT\n"); 2644 intstatus &= ~I_SBINT; 2645 } 2646 2647 /* Would be active due to wake-wlan in gSPI */ 2648 if (intstatus & I_CHIPACTIVE) { 2649 brcmf_dbg(SDIO, "Dongle reports CHIPACTIVE\n"); 2650 intstatus &= ~I_CHIPACTIVE; 2651 } 2652 2653 /* Ignore frame indications if rxskip is set */ 2654 if (bus->rxskip) 2655 intstatus &= ~I_HMB_FRAME_IND; 2656 2657 /* On frame indication, read available frames */ 2658 if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) { 2659 brcmf_sdio_readframes(bus, bus->rxbound); 2660 if (!bus->rxpending) 2661 intstatus &= ~I_HMB_FRAME_IND; 2662 } 2663 2664 /* Keep still-pending events for next scheduling */ 2665 if (intstatus) 2666 atomic_or(intstatus, &bus->intstatus); 2667 2668 brcmf_sdio_clrintr(bus); 2669 2670 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) && 2671 data_ok(bus)) { 2672 sdio_claim_host(bus->sdiodev->func1); 2673 if (bus->ctrl_frame_stat) { 2674 err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, 2675 bus->ctrl_frame_len); 2676 bus->ctrl_frame_err = err; 2677 wmb(); 2678 bus->ctrl_frame_stat = false; 2679 } 2680 sdio_release_host(bus->sdiodev->func1); 2681 brcmf_sdio_wait_event_wakeup(bus); 2682 } 2683 /* Send queued frames (limit 1 if rx may still be pending) */ 2684 if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) && 2685 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && 2686 data_ok(bus)) { 2687 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) : 2688 txlimit; 2689 brcmf_sdio_sendfromq(bus, framecnt); 2690 } 2691 2692 if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) { 2693 brcmf_err("failed backplane access over SDIO, halting operation\n"); 2694 atomic_set(&bus->intstatus, 0); 2695 if (bus->ctrl_frame_stat) { 2696 sdio_claim_host(bus->sdiodev->func1); 2697 if (bus->ctrl_frame_stat) { 2698 bus->ctrl_frame_err = -ENODEV; 2699 wmb(); 2700 bus->ctrl_frame_stat = false; 2701 brcmf_sdio_wait_event_wakeup(bus); 2702 } 2703 sdio_release_host(bus->sdiodev->func1); 2704 } 2705 } else if (atomic_read(&bus->intstatus) || 2706 atomic_read(&bus->ipend) > 0 || 2707 (!atomic_read(&bus->fcstate) && 2708 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 2709 data_ok(bus))) { 2710 bus->dpc_triggered = true; 2711 } 2712 } 2713 2714 static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev) 2715 { 2716 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2717 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2718 struct brcmf_sdio *bus = sdiodev->bus; 2719 2720 return &bus->txq; 2721 } 2722 2723 static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec) 2724 { 2725 struct sk_buff *p; 2726 int eprec = -1; /* precedence to evict from */ 2727 2728 /* Fast case, precedence queue is not full and we are also not 2729 * exceeding total queue length 2730 */ 2731 if (!pktq_pfull(q, prec) && !pktq_full(q)) { 2732 brcmu_pktq_penq(q, prec, pkt); 2733 return true; 2734 } 2735 2736 /* Determine precedence from which to evict packet, if any */ 2737 if (pktq_pfull(q, prec)) { 2738 eprec = prec; 2739 } else if (pktq_full(q)) { 2740 p = brcmu_pktq_peek_tail(q, &eprec); 2741 if (eprec > prec) 2742 return false; 2743 } 2744 2745 /* Evict if needed */ 2746 if (eprec >= 0) { 2747 /* Detect queueing to unconfigured precedence */ 2748 if (eprec == prec) 2749 return false; /* refuse newer (incoming) packet */ 2750 /* Evict packet according to discard policy */ 2751 p = brcmu_pktq_pdeq_tail(q, eprec); 2752 if (p == NULL) 2753 brcmf_err("brcmu_pktq_pdeq_tail() failed\n"); 2754 brcmu_pkt_buf_free_skb(p); 2755 } 2756 2757 /* Enqueue */ 2758 p = brcmu_pktq_penq(q, prec, pkt); 2759 if (p == NULL) 2760 brcmf_err("brcmu_pktq_penq() failed\n"); 2761 2762 return p != NULL; 2763 } 2764 2765 static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) 2766 { 2767 int ret = -EBADE; 2768 uint prec; 2769 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2770 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2771 struct brcmf_sdio *bus = sdiodev->bus; 2772 2773 brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len); 2774 if (sdiodev->state != BRCMF_SDIOD_DATA) 2775 return -EIO; 2776 2777 /* Add space for the header */ 2778 skb_push(pkt, bus->tx_hdrlen); 2779 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */ 2780 2781 /* In WLAN, priority is always set by the AP using WMM parameters 2782 * and this need not always follow the standard 802.1d priority. 2783 * Based on AP WMM config, map from 802.1d priority to corresponding 2784 * precedence level. 2785 */ 2786 prec = brcmf_map_prio_to_prec(bus_if->drvr->config, 2787 (pkt->priority & PRIOMASK)); 2788 2789 /* Check for existing queue, current flow-control, 2790 pending event, or pending clock */ 2791 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); 2792 bus->sdcnt.fcqueued++; 2793 2794 /* Priority based enq */ 2795 spin_lock_bh(&bus->txq_lock); 2796 /* reset bus_flags in packet cb */ 2797 *(u16 *)(pkt->cb) = 0; 2798 if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) { 2799 skb_pull(pkt, bus->tx_hdrlen); 2800 brcmf_err("out of bus->txq !!!\n"); 2801 ret = -ENOSR; 2802 } else { 2803 ret = 0; 2804 } 2805 2806 if (pktq_len(&bus->txq) >= TXHI) { 2807 bus->txoff = true; 2808 brcmf_proto_bcdc_txflowblock(dev, true); 2809 } 2810 spin_unlock_bh(&bus->txq_lock); 2811 2812 #ifdef DEBUG 2813 if (pktq_plen(&bus->txq, prec) > qcount[prec]) 2814 qcount[prec] = pktq_plen(&bus->txq, prec); 2815 #endif 2816 2817 brcmf_sdio_trigger_dpc(bus); 2818 return ret; 2819 } 2820 2821 #ifdef DEBUG 2822 #define CONSOLE_LINE_MAX 192 2823 2824 static int brcmf_sdio_readconsole(struct brcmf_sdio *bus) 2825 { 2826 struct brcmf_console *c = &bus->console; 2827 u8 line[CONSOLE_LINE_MAX], ch; 2828 u32 n, idx, addr; 2829 int rv; 2830 2831 /* Don't do anything until FWREADY updates console address */ 2832 if (bus->console_addr == 0) 2833 return 0; 2834 2835 /* Read console log struct */ 2836 addr = bus->console_addr + offsetof(struct rte_console, log_le); 2837 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le, 2838 sizeof(c->log_le)); 2839 if (rv < 0) 2840 return rv; 2841 2842 /* Allocate console buffer (one time only) */ 2843 if (c->buf == NULL) { 2844 c->bufsize = le32_to_cpu(c->log_le.buf_size); 2845 c->buf = kmalloc(c->bufsize, GFP_ATOMIC); 2846 if (c->buf == NULL) 2847 return -ENOMEM; 2848 } 2849 2850 idx = le32_to_cpu(c->log_le.idx); 2851 2852 /* Protect against corrupt value */ 2853 if (idx > c->bufsize) 2854 return -EBADE; 2855 2856 /* Skip reading the console buffer if the index pointer 2857 has not moved */ 2858 if (idx == c->last) 2859 return 0; 2860 2861 /* Read the console buffer */ 2862 addr = le32_to_cpu(c->log_le.buf); 2863 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize); 2864 if (rv < 0) 2865 return rv; 2866 2867 while (c->last != idx) { 2868 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { 2869 if (c->last == idx) { 2870 /* This would output a partial line. 2871 * Instead, back up 2872 * the buffer pointer and output this 2873 * line next time around. 2874 */ 2875 if (c->last >= n) 2876 c->last -= n; 2877 else 2878 c->last = c->bufsize - n; 2879 goto break2; 2880 } 2881 ch = c->buf[c->last]; 2882 c->last = (c->last + 1) % c->bufsize; 2883 if (ch == '\n') 2884 break; 2885 line[n] = ch; 2886 } 2887 2888 if (n > 0) { 2889 if (line[n - 1] == '\r') 2890 n--; 2891 line[n] = 0; 2892 pr_debug("CONSOLE: %s\n", line); 2893 } 2894 } 2895 break2: 2896 2897 return 0; 2898 } 2899 #endif /* DEBUG */ 2900 2901 static int 2902 brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) 2903 { 2904 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2905 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2906 struct brcmf_sdio *bus = sdiodev->bus; 2907 int ret; 2908 2909 brcmf_dbg(TRACE, "Enter\n"); 2910 if (sdiodev->state != BRCMF_SDIOD_DATA) 2911 return -EIO; 2912 2913 /* Send from dpc */ 2914 bus->ctrl_frame_buf = msg; 2915 bus->ctrl_frame_len = msglen; 2916 wmb(); 2917 bus->ctrl_frame_stat = true; 2918 2919 brcmf_sdio_trigger_dpc(bus); 2920 wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, 2921 CTL_DONE_TIMEOUT); 2922 ret = 0; 2923 if (bus->ctrl_frame_stat) { 2924 sdio_claim_host(bus->sdiodev->func1); 2925 if (bus->ctrl_frame_stat) { 2926 brcmf_dbg(SDIO, "ctrl_frame timeout\n"); 2927 bus->ctrl_frame_stat = false; 2928 ret = -ETIMEDOUT; 2929 } 2930 sdio_release_host(bus->sdiodev->func1); 2931 } 2932 if (!ret) { 2933 brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n", 2934 bus->ctrl_frame_err); 2935 rmb(); 2936 ret = bus->ctrl_frame_err; 2937 } 2938 2939 if (ret) 2940 bus->sdcnt.tx_ctlerrs++; 2941 else 2942 bus->sdcnt.tx_ctlpkts++; 2943 2944 return ret; 2945 } 2946 2947 #ifdef DEBUG 2948 static int brcmf_sdio_dump_console(struct seq_file *seq, struct brcmf_sdio *bus, 2949 struct sdpcm_shared *sh) 2950 { 2951 u32 addr, console_ptr, console_size, console_index; 2952 char *conbuf = NULL; 2953 __le32 sh_val; 2954 int rv; 2955 2956 /* obtain console information from device memory */ 2957 addr = sh->console_addr + offsetof(struct rte_console, log_le); 2958 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, 2959 (u8 *)&sh_val, sizeof(u32)); 2960 if (rv < 0) 2961 return rv; 2962 console_ptr = le32_to_cpu(sh_val); 2963 2964 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size); 2965 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, 2966 (u8 *)&sh_val, sizeof(u32)); 2967 if (rv < 0) 2968 return rv; 2969 console_size = le32_to_cpu(sh_val); 2970 2971 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx); 2972 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, 2973 (u8 *)&sh_val, sizeof(u32)); 2974 if (rv < 0) 2975 return rv; 2976 console_index = le32_to_cpu(sh_val); 2977 2978 /* allocate buffer for console data */ 2979 if (console_size <= CONSOLE_BUFFER_MAX) 2980 conbuf = vzalloc(console_size+1); 2981 2982 if (!conbuf) 2983 return -ENOMEM; 2984 2985 /* obtain the console data from device */ 2986 conbuf[console_size] = '\0'; 2987 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf, 2988 console_size); 2989 if (rv < 0) 2990 goto done; 2991 2992 rv = seq_write(seq, conbuf + console_index, 2993 console_size - console_index); 2994 if (rv < 0) 2995 goto done; 2996 2997 if (console_index > 0) 2998 rv = seq_write(seq, conbuf, console_index - 1); 2999 3000 done: 3001 vfree(conbuf); 3002 return rv; 3003 } 3004 3005 static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus, 3006 struct sdpcm_shared *sh) 3007 { 3008 int error; 3009 struct brcmf_trap_info tr; 3010 3011 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) { 3012 brcmf_dbg(INFO, "no trap in firmware\n"); 3013 return 0; 3014 } 3015 3016 error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr, 3017 sizeof(struct brcmf_trap_info)); 3018 if (error < 0) 3019 return error; 3020 3021 if (seq) 3022 seq_printf(seq, 3023 "dongle trap info: type 0x%x @ epc 0x%08x\n" 3024 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" 3025 " lr 0x%08x pc 0x%08x offset 0x%x\n" 3026 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" 3027 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", 3028 le32_to_cpu(tr.type), le32_to_cpu(tr.epc), 3029 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), 3030 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), 3031 le32_to_cpu(tr.pc), sh->trap_addr, 3032 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), 3033 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), 3034 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), 3035 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); 3036 else 3037 pr_debug("dongle trap info: type 0x%x @ epc 0x%08x\n" 3038 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" 3039 " lr 0x%08x pc 0x%08x offset 0x%x\n" 3040 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" 3041 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", 3042 le32_to_cpu(tr.type), le32_to_cpu(tr.epc), 3043 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), 3044 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), 3045 le32_to_cpu(tr.pc), sh->trap_addr, 3046 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), 3047 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), 3048 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), 3049 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); 3050 return 0; 3051 } 3052 3053 static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus, 3054 struct sdpcm_shared *sh) 3055 { 3056 int error = 0; 3057 char file[80] = "?"; 3058 char expr[80] = "<???>"; 3059 3060 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { 3061 brcmf_dbg(INFO, "firmware not built with -assert\n"); 3062 return 0; 3063 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) { 3064 brcmf_dbg(INFO, "no assert in dongle\n"); 3065 return 0; 3066 } 3067 3068 sdio_claim_host(bus->sdiodev->func1); 3069 if (sh->assert_file_addr != 0) { 3070 error = brcmf_sdiod_ramrw(bus->sdiodev, false, 3071 sh->assert_file_addr, (u8 *)file, 80); 3072 if (error < 0) 3073 return error; 3074 } 3075 if (sh->assert_exp_addr != 0) { 3076 error = brcmf_sdiod_ramrw(bus->sdiodev, false, 3077 sh->assert_exp_addr, (u8 *)expr, 80); 3078 if (error < 0) 3079 return error; 3080 } 3081 sdio_release_host(bus->sdiodev->func1); 3082 3083 seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n", 3084 file, sh->assert_line, expr); 3085 return 0; 3086 } 3087 3088 static int brcmf_sdio_checkdied(struct brcmf_sdio *bus) 3089 { 3090 int error; 3091 struct sdpcm_shared sh; 3092 3093 error = brcmf_sdio_readshared(bus, &sh); 3094 3095 if (error < 0) 3096 return error; 3097 3098 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) 3099 brcmf_dbg(INFO, "firmware not built with -assert\n"); 3100 else if (sh.flags & SDPCM_SHARED_ASSERT) 3101 brcmf_err("assertion in dongle\n"); 3102 3103 if (sh.flags & SDPCM_SHARED_TRAP) { 3104 brcmf_err("firmware trap in dongle\n"); 3105 brcmf_sdio_trap_info(NULL, bus, &sh); 3106 } 3107 3108 return 0; 3109 } 3110 3111 static int brcmf_sdio_died_dump(struct seq_file *seq, struct brcmf_sdio *bus) 3112 { 3113 int error = 0; 3114 struct sdpcm_shared sh; 3115 3116 error = brcmf_sdio_readshared(bus, &sh); 3117 if (error < 0) 3118 goto done; 3119 3120 error = brcmf_sdio_assert_info(seq, bus, &sh); 3121 if (error < 0) 3122 goto done; 3123 3124 error = brcmf_sdio_trap_info(seq, bus, &sh); 3125 if (error < 0) 3126 goto done; 3127 3128 error = brcmf_sdio_dump_console(seq, bus, &sh); 3129 3130 done: 3131 return error; 3132 } 3133 3134 static int brcmf_sdio_forensic_read(struct seq_file *seq, void *data) 3135 { 3136 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 3137 struct brcmf_sdio *bus = bus_if->bus_priv.sdio->bus; 3138 3139 return brcmf_sdio_died_dump(seq, bus); 3140 } 3141 3142 static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data) 3143 { 3144 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 3145 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3146 struct brcmf_sdio_count *sdcnt = &sdiodev->bus->sdcnt; 3147 3148 seq_printf(seq, 3149 "intrcount: %u\nlastintrs: %u\n" 3150 "pollcnt: %u\nregfails: %u\n" 3151 "tx_sderrs: %u\nfcqueued: %u\n" 3152 "rxrtx: %u\nrx_toolong: %u\n" 3153 "rxc_errors: %u\nrx_hdrfail: %u\n" 3154 "rx_badhdr: %u\nrx_badseq: %u\n" 3155 "fc_rcvd: %u\nfc_xoff: %u\n" 3156 "fc_xon: %u\nrxglomfail: %u\n" 3157 "rxglomframes: %u\nrxglompkts: %u\n" 3158 "f2rxhdrs: %u\nf2rxdata: %u\n" 3159 "f2txdata: %u\nf1regdata: %u\n" 3160 "tickcnt: %u\ntx_ctlerrs: %lu\n" 3161 "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n" 3162 "rx_ctlpkts: %lu\nrx_readahead: %lu\n", 3163 sdcnt->intrcount, sdcnt->lastintrs, 3164 sdcnt->pollcnt, sdcnt->regfails, 3165 sdcnt->tx_sderrs, sdcnt->fcqueued, 3166 sdcnt->rxrtx, sdcnt->rx_toolong, 3167 sdcnt->rxc_errors, sdcnt->rx_hdrfail, 3168 sdcnt->rx_badhdr, sdcnt->rx_badseq, 3169 sdcnt->fc_rcvd, sdcnt->fc_xoff, 3170 sdcnt->fc_xon, sdcnt->rxglomfail, 3171 sdcnt->rxglomframes, sdcnt->rxglompkts, 3172 sdcnt->f2rxhdrs, sdcnt->f2rxdata, 3173 sdcnt->f2txdata, sdcnt->f1regdata, 3174 sdcnt->tickcnt, sdcnt->tx_ctlerrs, 3175 sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs, 3176 sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt); 3177 3178 return 0; 3179 } 3180 3181 static void brcmf_sdio_debugfs_create(struct device *dev) 3182 { 3183 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3184 struct brcmf_pub *drvr = bus_if->drvr; 3185 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3186 struct brcmf_sdio *bus = sdiodev->bus; 3187 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr); 3188 3189 if (IS_ERR_OR_NULL(dentry)) 3190 return; 3191 3192 bus->console_interval = BRCMF_CONSOLE; 3193 3194 brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read); 3195 brcmf_debugfs_add_entry(drvr, "counters", 3196 brcmf_debugfs_sdio_count_read); 3197 debugfs_create_u32("console_interval", 0644, dentry, 3198 &bus->console_interval); 3199 } 3200 #else 3201 static int brcmf_sdio_checkdied(struct brcmf_sdio *bus) 3202 { 3203 return 0; 3204 } 3205 3206 static void brcmf_sdio_debugfs_create(struct device *dev) 3207 { 3208 } 3209 #endif /* DEBUG */ 3210 3211 static int 3212 brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) 3213 { 3214 int timeleft; 3215 uint rxlen = 0; 3216 bool pending; 3217 u8 *buf; 3218 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3219 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3220 struct brcmf_sdio *bus = sdiodev->bus; 3221 3222 brcmf_dbg(TRACE, "Enter\n"); 3223 if (sdiodev->state != BRCMF_SDIOD_DATA) 3224 return -EIO; 3225 3226 /* Wait until control frame is available */ 3227 timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending); 3228 3229 spin_lock_bh(&bus->rxctl_lock); 3230 rxlen = bus->rxlen; 3231 memcpy(msg, bus->rxctl, min(msglen, rxlen)); 3232 bus->rxctl = NULL; 3233 buf = bus->rxctl_orig; 3234 bus->rxctl_orig = NULL; 3235 bus->rxlen = 0; 3236 spin_unlock_bh(&bus->rxctl_lock); 3237 vfree(buf); 3238 3239 if (rxlen) { 3240 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n", 3241 rxlen, msglen); 3242 } else if (timeleft == 0) { 3243 brcmf_err("resumed on timeout\n"); 3244 brcmf_sdio_checkdied(bus); 3245 } else if (pending) { 3246 brcmf_dbg(CTL, "cancelled\n"); 3247 return -ERESTARTSYS; 3248 } else { 3249 brcmf_dbg(CTL, "resumed for unknown reason?\n"); 3250 brcmf_sdio_checkdied(bus); 3251 } 3252 3253 if (rxlen) 3254 bus->sdcnt.rx_ctlpkts++; 3255 else 3256 bus->sdcnt.rx_ctlerrs++; 3257 3258 return rxlen ? (int)rxlen : -ETIMEDOUT; 3259 } 3260 3261 #ifdef DEBUG 3262 static bool 3263 brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr, 3264 u8 *ram_data, uint ram_sz) 3265 { 3266 char *ram_cmp; 3267 int err; 3268 bool ret = true; 3269 int address; 3270 int offset; 3271 int len; 3272 3273 /* read back and verify */ 3274 brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr, 3275 ram_sz); 3276 ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL); 3277 /* do not proceed while no memory but */ 3278 if (!ram_cmp) 3279 return true; 3280 3281 address = ram_addr; 3282 offset = 0; 3283 while (offset < ram_sz) { 3284 len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK : 3285 ram_sz - offset; 3286 err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len); 3287 if (err) { 3288 brcmf_err("error %d on reading %d membytes at 0x%08x\n", 3289 err, len, address); 3290 ret = false; 3291 break; 3292 } else if (memcmp(ram_cmp, &ram_data[offset], len)) { 3293 brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n", 3294 offset, len); 3295 ret = false; 3296 break; 3297 } 3298 offset += len; 3299 address += len; 3300 } 3301 3302 kfree(ram_cmp); 3303 3304 return ret; 3305 } 3306 #else /* DEBUG */ 3307 static bool 3308 brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr, 3309 u8 *ram_data, uint ram_sz) 3310 { 3311 return true; 3312 } 3313 #endif /* DEBUG */ 3314 3315 static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus, 3316 const struct firmware *fw) 3317 { 3318 int err; 3319 3320 brcmf_dbg(TRACE, "Enter\n"); 3321 3322 err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase, 3323 (u8 *)fw->data, fw->size); 3324 if (err) 3325 brcmf_err("error %d on writing %d membytes at 0x%08x\n", 3326 err, (int)fw->size, bus->ci->rambase); 3327 else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase, 3328 (u8 *)fw->data, fw->size)) 3329 err = -EIO; 3330 3331 return err; 3332 } 3333 3334 static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus, 3335 void *vars, u32 varsz) 3336 { 3337 int address; 3338 int err; 3339 3340 brcmf_dbg(TRACE, "Enter\n"); 3341 3342 address = bus->ci->ramsize - varsz + bus->ci->rambase; 3343 err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz); 3344 if (err) 3345 brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n", 3346 err, varsz, address); 3347 else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz)) 3348 err = -EIO; 3349 3350 return err; 3351 } 3352 3353 static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, 3354 const struct firmware *fw, 3355 void *nvram, u32 nvlen) 3356 { 3357 int bcmerror; 3358 u32 rstvec; 3359 3360 sdio_claim_host(bus->sdiodev->func1); 3361 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 3362 3363 rstvec = get_unaligned_le32(fw->data); 3364 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec); 3365 3366 bcmerror = brcmf_sdio_download_code_file(bus, fw); 3367 release_firmware(fw); 3368 if (bcmerror) { 3369 brcmf_err("dongle image file download failed\n"); 3370 brcmf_fw_nvram_free(nvram); 3371 goto err; 3372 } 3373 3374 bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen); 3375 brcmf_fw_nvram_free(nvram); 3376 if (bcmerror) { 3377 brcmf_err("dongle nvram file download failed\n"); 3378 goto err; 3379 } 3380 3381 /* Take arm out of reset */ 3382 if (!brcmf_chip_set_active(bus->ci, rstvec)) { 3383 brcmf_err("error getting out of ARM core reset\n"); 3384 goto err; 3385 } 3386 3387 err: 3388 brcmf_sdio_clkctl(bus, CLK_SDONLY, false); 3389 sdio_release_host(bus->sdiodev->func1); 3390 return bcmerror; 3391 } 3392 3393 static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus) 3394 { 3395 if (bus->ci->chip == CY_CC_43012_CHIP_ID) 3396 return true; 3397 else 3398 return false; 3399 } 3400 3401 static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) 3402 { 3403 int err = 0; 3404 u8 val; 3405 u8 wakeupctrl; 3406 u8 cardcap; 3407 u8 chipclkcsr; 3408 3409 brcmf_dbg(TRACE, "Enter\n"); 3410 3411 if (brcmf_chip_is_ulp(bus->ci)) { 3412 wakeupctrl = SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; 3413 chipclkcsr = SBSDIO_HT_AVAIL_REQ; 3414 } else { 3415 wakeupctrl = SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; 3416 chipclkcsr = SBSDIO_FORCE_HT; 3417 } 3418 3419 if (brcmf_sdio_aos_no_decode(bus)) { 3420 cardcap = SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC; 3421 } else { 3422 cardcap = (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | 3423 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT); 3424 } 3425 3426 val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err); 3427 if (err) { 3428 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n"); 3429 return; 3430 } 3431 val |= 1 << wakeupctrl; 3432 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err); 3433 if (err) { 3434 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n"); 3435 return; 3436 } 3437 3438 /* Add CMD14 Support */ 3439 brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP, 3440 cardcap, 3441 &err); 3442 if (err) { 3443 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n"); 3444 return; 3445 } 3446 3447 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 3448 chipclkcsr, &err); 3449 if (err) { 3450 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n"); 3451 return; 3452 } 3453 3454 /* set flag */ 3455 bus->sr_enabled = true; 3456 brcmf_dbg(INFO, "SR enabled\n"); 3457 } 3458 3459 /* enable KSO bit */ 3460 static int brcmf_sdio_kso_init(struct brcmf_sdio *bus) 3461 { 3462 struct brcmf_core *core = bus->sdio_core; 3463 u8 val; 3464 int err = 0; 3465 3466 brcmf_dbg(TRACE, "Enter\n"); 3467 3468 /* KSO bit added in SDIO core rev 12 */ 3469 if (core->rev < 12) 3470 return 0; 3471 3472 val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err); 3473 if (err) { 3474 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n"); 3475 return err; 3476 } 3477 3478 if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { 3479 val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << 3480 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); 3481 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, 3482 val, &err); 3483 if (err) { 3484 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n"); 3485 return err; 3486 } 3487 } 3488 3489 return 0; 3490 } 3491 3492 3493 static int brcmf_sdio_bus_preinit(struct device *dev) 3494 { 3495 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3496 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3497 struct brcmf_sdio *bus = sdiodev->bus; 3498 struct brcmf_core *core = bus->sdio_core; 3499 u32 value; 3500 int err; 3501 3502 /* maxctl provided by common layer */ 3503 if (WARN_ON(!bus_if->maxctl)) 3504 return -EINVAL; 3505 3506 /* Allocate control receive buffer */ 3507 bus_if->maxctl += bus->roundup; 3508 value = roundup((bus_if->maxctl + SDPCM_HDRLEN), ALIGNMENT); 3509 value += bus->head_align; 3510 bus->rxbuf = kmalloc(value, GFP_ATOMIC); 3511 if (bus->rxbuf) 3512 bus->rxblen = value; 3513 3514 /* the commands below use the terms tx and rx from 3515 * a device perspective, ie. bus:txglom affects the 3516 * bus transfers from device to host. 3517 */ 3518 if (core->rev < 12) { 3519 /* for sdio core rev < 12, disable txgloming */ 3520 value = 0; 3521 err = brcmf_iovar_data_set(dev, "bus:txglom", &value, 3522 sizeof(u32)); 3523 } else { 3524 /* otherwise, set txglomalign */ 3525 value = sdiodev->settings->bus.sdio.sd_sgentry_align; 3526 /* SDIO ADMA requires at least 32 bit alignment */ 3527 value = max_t(u32, value, ALIGNMENT); 3528 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, 3529 sizeof(u32)); 3530 } 3531 3532 if (err < 0) 3533 goto done; 3534 3535 bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; 3536 if (sdiodev->sg_support) { 3537 bus->txglom = false; 3538 value = 1; 3539 err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", 3540 &value, sizeof(u32)); 3541 if (err < 0) { 3542 /* bus:rxglom is allowed to fail */ 3543 err = 0; 3544 } else { 3545 bus->txglom = true; 3546 bus->tx_hdrlen += SDPCM_HWEXT_LEN; 3547 } 3548 } 3549 brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen); 3550 3551 done: 3552 return err; 3553 } 3554 3555 static size_t brcmf_sdio_bus_get_ramsize(struct device *dev) 3556 { 3557 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3558 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3559 struct brcmf_sdio *bus = sdiodev->bus; 3560 3561 return bus->ci->ramsize - bus->ci->srsize; 3562 } 3563 3564 static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data, 3565 size_t mem_size) 3566 { 3567 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3568 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3569 struct brcmf_sdio *bus = sdiodev->bus; 3570 int err; 3571 int address; 3572 int offset; 3573 int len; 3574 3575 brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase, 3576 mem_size); 3577 3578 address = bus->ci->rambase; 3579 offset = err = 0; 3580 sdio_claim_host(sdiodev->func1); 3581 while (offset < mem_size) { 3582 len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK : 3583 mem_size - offset; 3584 err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len); 3585 if (err) { 3586 brcmf_err("error %d on reading %d membytes at 0x%08x\n", 3587 err, len, address); 3588 goto done; 3589 } 3590 data += len; 3591 offset += len; 3592 address += len; 3593 } 3594 3595 done: 3596 sdio_release_host(sdiodev->func1); 3597 return err; 3598 } 3599 3600 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus) 3601 { 3602 if (!bus->dpc_triggered) { 3603 bus->dpc_triggered = true; 3604 queue_work(bus->brcmf_wq, &bus->datawork); 3605 } 3606 } 3607 3608 void brcmf_sdio_isr(struct brcmf_sdio *bus) 3609 { 3610 brcmf_dbg(TRACE, "Enter\n"); 3611 3612 if (!bus) { 3613 brcmf_err("bus is null pointer, exiting\n"); 3614 return; 3615 } 3616 3617 /* Count the interrupt call */ 3618 bus->sdcnt.intrcount++; 3619 if (in_interrupt()) 3620 atomic_set(&bus->ipend, 1); 3621 else 3622 if (brcmf_sdio_intr_rstatus(bus)) { 3623 brcmf_err("failed backplane access\n"); 3624 } 3625 3626 /* Disable additional interrupts (is this needed now)? */ 3627 if (!bus->intr) 3628 brcmf_err("isr w/o interrupt configured!\n"); 3629 3630 bus->dpc_triggered = true; 3631 queue_work(bus->brcmf_wq, &bus->datawork); 3632 } 3633 3634 static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) 3635 { 3636 brcmf_dbg(TIMER, "Enter\n"); 3637 3638 /* Poll period: check device if appropriate. */ 3639 if (!bus->sr_enabled && 3640 bus->poll && (++bus->polltick >= bus->pollrate)) { 3641 u32 intstatus = 0; 3642 3643 /* Reset poll tick */ 3644 bus->polltick = 0; 3645 3646 /* Check device if no interrupts */ 3647 if (!bus->intr || 3648 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { 3649 3650 if (!bus->dpc_triggered) { 3651 u8 devpend; 3652 3653 sdio_claim_host(bus->sdiodev->func1); 3654 devpend = brcmf_sdiod_func0_rb(bus->sdiodev, 3655 SDIO_CCCR_INTx, NULL); 3656 sdio_release_host(bus->sdiodev->func1); 3657 intstatus = devpend & (INTR_STATUS_FUNC1 | 3658 INTR_STATUS_FUNC2); 3659 } 3660 3661 /* If there is something, make like the ISR and 3662 schedule the DPC */ 3663 if (intstatus) { 3664 bus->sdcnt.pollcnt++; 3665 atomic_set(&bus->ipend, 1); 3666 3667 bus->dpc_triggered = true; 3668 queue_work(bus->brcmf_wq, &bus->datawork); 3669 } 3670 } 3671 3672 /* Update interrupt tracking */ 3673 bus->sdcnt.lastintrs = bus->sdcnt.intrcount; 3674 } 3675 #ifdef DEBUG 3676 /* Poll for console output periodically */ 3677 if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() && 3678 bus->console_interval != 0) { 3679 bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL); 3680 if (bus->console.count >= bus->console_interval) { 3681 bus->console.count -= bus->console_interval; 3682 sdio_claim_host(bus->sdiodev->func1); 3683 /* Make sure backplane clock is on */ 3684 brcmf_sdio_bus_sleep(bus, false, false); 3685 if (brcmf_sdio_readconsole(bus) < 0) 3686 /* stop on error */ 3687 bus->console_interval = 0; 3688 sdio_release_host(bus->sdiodev->func1); 3689 } 3690 } 3691 #endif /* DEBUG */ 3692 3693 /* On idle timeout clear activity flag and/or turn off clock */ 3694 if (!bus->dpc_triggered) { 3695 rmb(); 3696 if ((!bus->dpc_running) && (bus->idletime > 0) && 3697 (bus->clkstate == CLK_AVAIL)) { 3698 bus->idlecount++; 3699 if (bus->idlecount > bus->idletime) { 3700 brcmf_dbg(SDIO, "idle\n"); 3701 sdio_claim_host(bus->sdiodev->func1); 3702 brcmf_sdio_wd_timer(bus, false); 3703 bus->idlecount = 0; 3704 brcmf_sdio_bus_sleep(bus, true, false); 3705 sdio_release_host(bus->sdiodev->func1); 3706 } 3707 } else { 3708 bus->idlecount = 0; 3709 } 3710 } else { 3711 bus->idlecount = 0; 3712 } 3713 } 3714 3715 static void brcmf_sdio_dataworker(struct work_struct *work) 3716 { 3717 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, 3718 datawork); 3719 3720 bus->dpc_running = true; 3721 wmb(); 3722 while (READ_ONCE(bus->dpc_triggered)) { 3723 bus->dpc_triggered = false; 3724 brcmf_sdio_dpc(bus); 3725 bus->idlecount = 0; 3726 } 3727 bus->dpc_running = false; 3728 if (brcmf_sdiod_freezing(bus->sdiodev)) { 3729 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN); 3730 brcmf_sdiod_try_freeze(bus->sdiodev); 3731 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); 3732 } 3733 } 3734 3735 static void 3736 brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, 3737 struct brcmf_chip *ci, u32 drivestrength) 3738 { 3739 const struct sdiod_drive_str *str_tab = NULL; 3740 u32 str_mask; 3741 u32 str_shift; 3742 u32 i; 3743 u32 drivestrength_sel = 0; 3744 u32 cc_data_temp; 3745 u32 addr; 3746 3747 if (!(ci->cc_caps & CC_CAP_PMU)) 3748 return; 3749 3750 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) { 3751 case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID, 12): 3752 str_tab = sdiod_drvstr_tab1_1v8; 3753 str_mask = 0x00003800; 3754 str_shift = 11; 3755 break; 3756 case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID, 17): 3757 str_tab = sdiod_drvstr_tab6_1v8; 3758 str_mask = 0x00001800; 3759 str_shift = 11; 3760 break; 3761 case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID, 17): 3762 /* note: 43143 does not support tristate */ 3763 i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1; 3764 if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) { 3765 str_tab = sdiod_drvstr_tab2_3v3; 3766 str_mask = 0x00000007; 3767 str_shift = 0; 3768 } else 3769 brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n", 3770 ci->name, drivestrength); 3771 break; 3772 case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID, 13): 3773 str_tab = sdiod_drive_strength_tab5_1v8; 3774 str_mask = 0x00003800; 3775 str_shift = 11; 3776 break; 3777 default: 3778 brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n", 3779 ci->name, ci->chiprev, ci->pmurev); 3780 break; 3781 } 3782 3783 if (str_tab != NULL) { 3784 struct brcmf_core *pmu = brcmf_chip_get_pmu(ci); 3785 3786 for (i = 0; str_tab[i].strength != 0; i++) { 3787 if (drivestrength >= str_tab[i].strength) { 3788 drivestrength_sel = str_tab[i].sel; 3789 break; 3790 } 3791 } 3792 addr = CORE_CC_REG(pmu->base, chipcontrol_addr); 3793 brcmf_sdiod_writel(sdiodev, addr, 1, NULL); 3794 cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL); 3795 cc_data_temp &= ~str_mask; 3796 drivestrength_sel <<= str_shift; 3797 cc_data_temp |= drivestrength_sel; 3798 brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL); 3799 3800 brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n", 3801 str_tab[i].strength, drivestrength, cc_data_temp); 3802 } 3803 } 3804 3805 static int brcmf_sdio_buscoreprep(void *ctx) 3806 { 3807 struct brcmf_sdio_dev *sdiodev = ctx; 3808 int err = 0; 3809 u8 clkval, clkset; 3810 3811 /* Try forcing SDIO core to do ALPAvail request only */ 3812 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; 3813 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); 3814 if (err) { 3815 brcmf_err("error writing for HT off\n"); 3816 return err; 3817 } 3818 3819 /* If register supported, wait for ALPAvail and then force ALP */ 3820 /* This may take up to 15 milliseconds */ 3821 clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL); 3822 3823 if ((clkval & ~SBSDIO_AVBITS) != clkset) { 3824 brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n", 3825 clkset, clkval); 3826 return -EACCES; 3827 } 3828 3829 SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 3830 NULL)), 3831 !SBSDIO_ALPAV(clkval)), 3832 PMU_MAX_TRANSITION_DLY); 3833 3834 if (!SBSDIO_ALPAV(clkval)) { 3835 brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n", 3836 clkval); 3837 return -EBUSY; 3838 } 3839 3840 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; 3841 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); 3842 udelay(65); 3843 3844 /* Also, disable the extra SDIO pull-ups */ 3845 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); 3846 3847 return 0; 3848 } 3849 3850 static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip, 3851 u32 rstvec) 3852 { 3853 struct brcmf_sdio_dev *sdiodev = ctx; 3854 struct brcmf_core *core = sdiodev->bus->sdio_core; 3855 u32 reg_addr; 3856 3857 /* clear all interrupts */ 3858 reg_addr = core->base + SD_REG(intstatus); 3859 brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL); 3860 3861 if (rstvec) 3862 /* Write reset vector to address 0 */ 3863 brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec, 3864 sizeof(rstvec)); 3865 } 3866 3867 static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr) 3868 { 3869 struct brcmf_sdio_dev *sdiodev = ctx; 3870 u32 val, rev; 3871 3872 val = brcmf_sdiod_readl(sdiodev, addr, NULL); 3873 3874 /* 3875 * this is a bit of special handling if reading the chipcommon chipid 3876 * register. The 4339 is a next-gen of the 4335. It uses the same 3877 * SDIO device id as 4335 and the chipid register returns 4335 as well. 3878 * It can be identified as 4339 by looking at the chip revision. It 3879 * is corrected here so the chip.c module has the right info. 3880 */ 3881 if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) && 3882 (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 || 3883 sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) { 3884 rev = (val & CID_REV_MASK) >> CID_REV_SHIFT; 3885 if (rev >= 2) { 3886 val &= ~CID_ID_MASK; 3887 val |= BRCM_CC_4339_CHIP_ID; 3888 } 3889 } 3890 3891 return val; 3892 } 3893 3894 static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val) 3895 { 3896 struct brcmf_sdio_dev *sdiodev = ctx; 3897 3898 brcmf_sdiod_writel(sdiodev, addr, val, NULL); 3899 } 3900 3901 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = { 3902 .prepare = brcmf_sdio_buscoreprep, 3903 .activate = brcmf_sdio_buscore_activate, 3904 .read32 = brcmf_sdio_buscore_read32, 3905 .write32 = brcmf_sdio_buscore_write32, 3906 }; 3907 3908 static bool 3909 brcmf_sdio_probe_attach(struct brcmf_sdio *bus) 3910 { 3911 struct brcmf_sdio_dev *sdiodev; 3912 u8 clkctl = 0; 3913 int err = 0; 3914 int reg_addr; 3915 u32 reg_val; 3916 u32 drivestrength; 3917 3918 sdiodev = bus->sdiodev; 3919 sdio_claim_host(sdiodev->func1); 3920 3921 pr_debug("F1 signature read @0x18000000=0x%4x\n", 3922 brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL)); 3923 3924 /* 3925 * Force PLL off until brcmf_chip_attach() 3926 * programs PLL control regs 3927 */ 3928 3929 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1, 3930 &err); 3931 if (!err) 3932 clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 3933 &err); 3934 3935 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) { 3936 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", 3937 err, BRCMF_INIT_CLKCTL1, clkctl); 3938 goto fail; 3939 } 3940 3941 bus->ci = brcmf_chip_attach(sdiodev, &brcmf_sdio_buscore_ops); 3942 if (IS_ERR(bus->ci)) { 3943 brcmf_err("brcmf_chip_attach failed!\n"); 3944 bus->ci = NULL; 3945 goto fail; 3946 } 3947 3948 /* Pick up the SDIO core info struct from chip.c */ 3949 bus->sdio_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV); 3950 if (!bus->sdio_core) 3951 goto fail; 3952 3953 /* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */ 3954 sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON); 3955 if (!sdiodev->cc_core) 3956 goto fail; 3957 3958 sdiodev->settings = brcmf_get_module_param(sdiodev->dev, 3959 BRCMF_BUSTYPE_SDIO, 3960 bus->ci->chip, 3961 bus->ci->chiprev); 3962 if (!sdiodev->settings) { 3963 brcmf_err("Failed to get device parameters\n"); 3964 goto fail; 3965 } 3966 /* platform specific configuration: 3967 * alignments must be at least 4 bytes for ADMA 3968 */ 3969 bus->head_align = ALIGNMENT; 3970 bus->sgentry_align = ALIGNMENT; 3971 if (sdiodev->settings->bus.sdio.sd_head_align > ALIGNMENT) 3972 bus->head_align = sdiodev->settings->bus.sdio.sd_head_align; 3973 if (sdiodev->settings->bus.sdio.sd_sgentry_align > ALIGNMENT) 3974 bus->sgentry_align = 3975 sdiodev->settings->bus.sdio.sd_sgentry_align; 3976 3977 /* allocate scatter-gather table. sg support 3978 * will be disabled upon allocation failure. 3979 */ 3980 brcmf_sdiod_sgtable_alloc(sdiodev); 3981 3982 #ifdef CONFIG_PM_SLEEP 3983 /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ 3984 * is true or when platform data OOB irq is true). 3985 */ 3986 if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) && 3987 ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) || 3988 (sdiodev->settings->bus.sdio.oob_irq_supported))) 3989 sdiodev->bus_if->wowl_supported = true; 3990 #endif 3991 3992 if (brcmf_sdio_kso_init(bus)) { 3993 brcmf_err("error enabling KSO\n"); 3994 goto fail; 3995 } 3996 3997 if (sdiodev->settings->bus.sdio.drive_strength) 3998 drivestrength = sdiodev->settings->bus.sdio.drive_strength; 3999 else 4000 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; 4001 brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength); 4002 4003 /* Set card control so an SDIO card reset does a WLAN backplane reset */ 4004 reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err); 4005 if (err) 4006 goto fail; 4007 4008 reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET; 4009 4010 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err); 4011 if (err) 4012 goto fail; 4013 4014 /* set PMUControl so a backplane reset does PMU state reload */ 4015 reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol); 4016 reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err); 4017 if (err) 4018 goto fail; 4019 4020 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT); 4021 4022 brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err); 4023 if (err) 4024 goto fail; 4025 4026 sdio_release_host(sdiodev->func1); 4027 4028 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN); 4029 4030 /* allocate header buffer */ 4031 bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL); 4032 if (!bus->hdrbuf) 4033 return false; 4034 /* Locate an appropriately-aligned portion of hdrbuf */ 4035 bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0], 4036 bus->head_align); 4037 4038 /* Set the poll and/or interrupt flags */ 4039 bus->intr = true; 4040 bus->poll = false; 4041 if (bus->poll) 4042 bus->pollrate = 1; 4043 4044 return true; 4045 4046 fail: 4047 sdio_release_host(sdiodev->func1); 4048 return false; 4049 } 4050 4051 static int 4052 brcmf_sdio_watchdog_thread(void *data) 4053 { 4054 struct brcmf_sdio *bus = (struct brcmf_sdio *)data; 4055 int wait; 4056 4057 allow_signal(SIGTERM); 4058 /* Run until signal received */ 4059 brcmf_sdiod_freezer_count(bus->sdiodev); 4060 while (1) { 4061 if (kthread_should_stop()) 4062 break; 4063 brcmf_sdiod_freezer_uncount(bus->sdiodev); 4064 wait = wait_for_completion_interruptible(&bus->watchdog_wait); 4065 brcmf_sdiod_freezer_count(bus->sdiodev); 4066 brcmf_sdiod_try_freeze(bus->sdiodev); 4067 if (!wait) { 4068 brcmf_sdio_bus_watchdog(bus); 4069 /* Count the tick for reference */ 4070 bus->sdcnt.tickcnt++; 4071 reinit_completion(&bus->watchdog_wait); 4072 } else 4073 break; 4074 } 4075 return 0; 4076 } 4077 4078 static void 4079 brcmf_sdio_watchdog(struct timer_list *t) 4080 { 4081 struct brcmf_sdio *bus = from_timer(bus, t, timer); 4082 4083 if (bus->watchdog_tsk) { 4084 complete(&bus->watchdog_wait); 4085 /* Reschedule the watchdog */ 4086 if (bus->wd_active) 4087 mod_timer(&bus->timer, 4088 jiffies + BRCMF_WD_POLL); 4089 } 4090 } 4091 4092 static 4093 int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name) 4094 { 4095 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 4096 struct brcmf_fw_request *fwreq; 4097 struct brcmf_fw_name fwnames[] = { 4098 { ext, fw_name }, 4099 }; 4100 4101 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev, 4102 brcmf_sdio_fwnames, 4103 ARRAY_SIZE(brcmf_sdio_fwnames), 4104 fwnames, ARRAY_SIZE(fwnames)); 4105 if (!fwreq) 4106 return -ENOMEM; 4107 4108 kfree(fwreq); 4109 return 0; 4110 } 4111 4112 static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { 4113 .stop = brcmf_sdio_bus_stop, 4114 .preinit = brcmf_sdio_bus_preinit, 4115 .txdata = brcmf_sdio_bus_txdata, 4116 .txctl = brcmf_sdio_bus_txctl, 4117 .rxctl = brcmf_sdio_bus_rxctl, 4118 .gettxq = brcmf_sdio_bus_gettxq, 4119 .wowl_config = brcmf_sdio_wowl_config, 4120 .get_ramsize = brcmf_sdio_bus_get_ramsize, 4121 .get_memdump = brcmf_sdio_bus_get_memdump, 4122 .get_fwname = brcmf_sdio_get_fwname, 4123 .debugfs_create = brcmf_sdio_debugfs_create 4124 }; 4125 4126 #define BRCMF_SDIO_FW_CODE 0 4127 #define BRCMF_SDIO_FW_NVRAM 1 4128 4129 static void brcmf_sdio_firmware_callback(struct device *dev, int err, 4130 struct brcmf_fw_request *fwreq) 4131 { 4132 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 4133 struct brcmf_sdio_dev *sdiod = bus_if->bus_priv.sdio; 4134 struct brcmf_sdio *bus = sdiod->bus; 4135 struct brcmf_core *core = bus->sdio_core; 4136 const struct firmware *code; 4137 void *nvram; 4138 u32 nvram_len; 4139 u8 saveclk, bpreq; 4140 u8 devctl; 4141 4142 brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); 4143 4144 if (err) 4145 goto fail; 4146 4147 code = fwreq->items[BRCMF_SDIO_FW_CODE].binary; 4148 nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data; 4149 nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len; 4150 kfree(fwreq); 4151 4152 /* try to download image and nvram to the dongle */ 4153 bus->alp_only = true; 4154 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); 4155 if (err) 4156 goto fail; 4157 bus->alp_only = false; 4158 4159 /* Start the watchdog timer */ 4160 bus->sdcnt.tickcnt = 0; 4161 brcmf_sdio_wd_timer(bus, true); 4162 4163 sdio_claim_host(sdiod->func1); 4164 4165 /* Make sure backplane clock is on, needed to generate F2 interrupt */ 4166 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 4167 if (bus->clkstate != CLK_AVAIL) 4168 goto release; 4169 4170 /* Force clocks on backplane to be sure F2 interrupt propagates */ 4171 saveclk = brcmf_sdiod_readb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, &err); 4172 if (!err) { 4173 bpreq = saveclk; 4174 bpreq |= brcmf_chip_is_ulp(bus->ci) ? 4175 SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT; 4176 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, 4177 bpreq, &err); 4178 } 4179 if (err) { 4180 brcmf_err("Failed to force clock for F2: err %d\n", err); 4181 goto release; 4182 } 4183 4184 /* Enable function 2 (frame transfers) */ 4185 brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata), 4186 SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL); 4187 4188 err = sdio_enable_func(sdiod->func2); 4189 4190 brcmf_dbg(INFO, "enable F2: err=%d\n", err); 4191 4192 /* If F2 successfully enabled, set core and enable interrupts */ 4193 if (!err) { 4194 /* Set up the interrupt mask and enable interrupts */ 4195 bus->hostintmask = HOSTINTMASK; 4196 brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask), 4197 bus->hostintmask, NULL); 4198 4199 switch (sdiod->func1->device) { 4200 case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373: 4201 brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", 4202 CY_4373_F2_WATERMARK); 4203 brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 4204 CY_4373_F2_WATERMARK, &err); 4205 devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, 4206 &err); 4207 devctl |= SBSDIO_DEVCTL_F2WM_ENAB; 4208 brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, 4209 &err); 4210 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, 4211 CY_4373_F1_MESBUSYCTRL, &err); 4212 break; 4213 case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012: 4214 brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", 4215 CY_43012_F2_WATERMARK); 4216 brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 4217 CY_43012_F2_WATERMARK, &err); 4218 devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, 4219 &err); 4220 devctl |= SBSDIO_DEVCTL_F2WM_ENAB; 4221 brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, 4222 &err); 4223 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, 4224 CY_43012_MESBUSYCTRL, &err); 4225 break; 4226 case SDIO_DEVICE_ID_BROADCOM_4339: 4227 brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n", 4228 CY_4339_F2_WATERMARK); 4229 brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 4230 CY_4339_F2_WATERMARK, &err); 4231 devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, 4232 &err); 4233 devctl |= SBSDIO_DEVCTL_F2WM_ENAB; 4234 brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, 4235 &err); 4236 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, 4237 CY_4339_MESBUSYCTRL, &err); 4238 break; 4239 case SDIO_DEVICE_ID_BROADCOM_43455: 4240 brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 43455\n", 4241 CY_43455_F2_WATERMARK); 4242 brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 4243 CY_43455_F2_WATERMARK, &err); 4244 devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, 4245 &err); 4246 devctl |= SBSDIO_DEVCTL_F2WM_ENAB; 4247 brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, 4248 &err); 4249 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, 4250 CY_43455_MESBUSYCTRL, &err); 4251 break; 4252 case SDIO_DEVICE_ID_BROADCOM_4359: 4253 /* fallthrough */ 4254 case SDIO_DEVICE_ID_BROADCOM_4354: 4255 /* fallthrough */ 4256 case SDIO_DEVICE_ID_BROADCOM_4356: 4257 brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", 4258 CY_435X_F2_WATERMARK); 4259 brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 4260 CY_435X_F2_WATERMARK, &err); 4261 devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, 4262 &err); 4263 devctl |= SBSDIO_DEVCTL_F2WM_ENAB; 4264 brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, 4265 &err); 4266 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, 4267 CY_435X_F1_MESBUSYCTRL, &err); 4268 break; 4269 default: 4270 brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 4271 DEFAULT_F2_WATERMARK, &err); 4272 break; 4273 } 4274 } else { 4275 /* Disable F2 again */ 4276 sdio_disable_func(sdiod->func2); 4277 goto checkdied; 4278 } 4279 4280 if (brcmf_chip_sr_capable(bus->ci)) { 4281 brcmf_sdio_sr_init(bus); 4282 } else { 4283 /* Restore previous clock setting */ 4284 brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, 4285 saveclk, &err); 4286 } 4287 4288 if (err == 0) { 4289 /* Assign bus interface call back */ 4290 sdiod->bus_if->dev = sdiod->dev; 4291 sdiod->bus_if->ops = &brcmf_sdio_bus_ops; 4292 sdiod->bus_if->chip = bus->ci->chip; 4293 sdiod->bus_if->chiprev = bus->ci->chiprev; 4294 4295 /* Allow full data communication using DPC from now on. */ 4296 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); 4297 4298 err = brcmf_sdiod_intr_register(sdiod); 4299 if (err != 0) 4300 brcmf_err("intr register failed:%d\n", err); 4301 } 4302 4303 /* If we didn't come up, turn off backplane clock */ 4304 if (err != 0) { 4305 brcmf_sdio_clkctl(bus, CLK_NONE, false); 4306 goto checkdied; 4307 } 4308 4309 sdio_release_host(sdiod->func1); 4310 4311 err = brcmf_alloc(sdiod->dev, sdiod->settings); 4312 if (err) { 4313 brcmf_err("brcmf_alloc failed\n"); 4314 goto claim; 4315 } 4316 4317 /* Attach to the common layer, reserve hdr space */ 4318 err = brcmf_attach(sdiod->dev); 4319 if (err != 0) { 4320 brcmf_err("brcmf_attach failed\n"); 4321 goto free; 4322 } 4323 4324 /* ready */ 4325 return; 4326 4327 free: 4328 brcmf_free(sdiod->dev); 4329 claim: 4330 sdio_claim_host(sdiod->func1); 4331 checkdied: 4332 brcmf_sdio_checkdied(bus); 4333 release: 4334 sdio_release_host(sdiod->func1); 4335 fail: 4336 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4337 device_release_driver(&sdiod->func2->dev); 4338 device_release_driver(dev); 4339 } 4340 4341 static struct brcmf_fw_request * 4342 brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus) 4343 { 4344 struct brcmf_fw_request *fwreq; 4345 struct brcmf_fw_name fwnames[] = { 4346 { ".bin", bus->sdiodev->fw_name }, 4347 { ".txt", bus->sdiodev->nvram_name }, 4348 }; 4349 4350 fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev, 4351 brcmf_sdio_fwnames, 4352 ARRAY_SIZE(brcmf_sdio_fwnames), 4353 fwnames, ARRAY_SIZE(fwnames)); 4354 if (!fwreq) 4355 return NULL; 4356 4357 fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY; 4358 fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; 4359 fwreq->board_type = bus->sdiodev->settings->board_type; 4360 4361 return fwreq; 4362 } 4363 4364 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4365 { 4366 int ret; 4367 struct brcmf_sdio *bus; 4368 struct workqueue_struct *wq; 4369 struct brcmf_fw_request *fwreq; 4370 4371 brcmf_dbg(TRACE, "Enter\n"); 4372 4373 /* Allocate private bus interface state */ 4374 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC); 4375 if (!bus) 4376 goto fail; 4377 4378 bus->sdiodev = sdiodev; 4379 sdiodev->bus = bus; 4380 skb_queue_head_init(&bus->glom); 4381 bus->txbound = BRCMF_TXBOUND; 4382 bus->rxbound = BRCMF_RXBOUND; 4383 bus->txminmax = BRCMF_TXMINMAX; 4384 bus->tx_seq = SDPCM_SEQ_WRAP - 1; 4385 4386 /* single-threaded workqueue */ 4387 wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM, 4388 dev_name(&sdiodev->func1->dev)); 4389 if (!wq) { 4390 brcmf_err("insufficient memory to create txworkqueue\n"); 4391 goto fail; 4392 } 4393 brcmf_sdiod_freezer_count(sdiodev); 4394 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker); 4395 bus->brcmf_wq = wq; 4396 4397 /* attempt to attach to the dongle */ 4398 if (!(brcmf_sdio_probe_attach(bus))) { 4399 brcmf_err("brcmf_sdio_probe_attach failed\n"); 4400 goto fail; 4401 } 4402 4403 spin_lock_init(&bus->rxctl_lock); 4404 spin_lock_init(&bus->txq_lock); 4405 init_waitqueue_head(&bus->ctrl_wait); 4406 init_waitqueue_head(&bus->dcmd_resp_wait); 4407 4408 /* Set up the watchdog timer */ 4409 timer_setup(&bus->timer, brcmf_sdio_watchdog, 0); 4410 /* Initialize watchdog thread */ 4411 init_completion(&bus->watchdog_wait); 4412 bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread, 4413 bus, "brcmf_wdog/%s", 4414 dev_name(&sdiodev->func1->dev)); 4415 if (IS_ERR(bus->watchdog_tsk)) { 4416 pr_warn("brcmf_watchdog thread failed to start\n"); 4417 bus->watchdog_tsk = NULL; 4418 } 4419 /* Initialize DPC thread */ 4420 bus->dpc_triggered = false; 4421 bus->dpc_running = false; 4422 4423 /* default sdio bus header length for tx packet */ 4424 bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; 4425 4426 /* Query the F2 block size, set roundup accordingly */ 4427 bus->blocksize = bus->sdiodev->func2->cur_blksize; 4428 bus->roundup = min(max_roundup, bus->blocksize); 4429 4430 sdio_claim_host(bus->sdiodev->func1); 4431 4432 /* Disable F2 to clear any intermediate frame state on the dongle */ 4433 sdio_disable_func(bus->sdiodev->func2); 4434 4435 bus->rxflow = false; 4436 4437 /* Done with backplane-dependent accesses, can drop clock... */ 4438 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); 4439 4440 sdio_release_host(bus->sdiodev->func1); 4441 4442 /* ...and initialize clock/power states */ 4443 bus->clkstate = CLK_SDONLY; 4444 bus->idletime = BRCMF_IDLE_INTERVAL; 4445 bus->idleclock = BRCMF_IDLE_ACTIVE; 4446 4447 /* SR state */ 4448 bus->sr_enabled = false; 4449 4450 brcmf_dbg(INFO, "completed!!\n"); 4451 4452 fwreq = brcmf_sdio_prepare_fw_request(bus); 4453 if (!fwreq) { 4454 ret = -ENOMEM; 4455 goto fail; 4456 } 4457 4458 ret = brcmf_fw_get_firmwares(sdiodev->dev, fwreq, 4459 brcmf_sdio_firmware_callback); 4460 if (ret != 0) { 4461 brcmf_err("async firmware request failed: %d\n", ret); 4462 kfree(fwreq); 4463 goto fail; 4464 } 4465 4466 return bus; 4467 4468 fail: 4469 brcmf_sdio_remove(bus); 4470 return NULL; 4471 } 4472 4473 /* Detach and free everything */ 4474 void brcmf_sdio_remove(struct brcmf_sdio *bus) 4475 { 4476 brcmf_dbg(TRACE, "Enter\n"); 4477 4478 if (bus) { 4479 /* Stop watchdog task */ 4480 if (bus->watchdog_tsk) { 4481 send_sig(SIGTERM, bus->watchdog_tsk, 1); 4482 kthread_stop(bus->watchdog_tsk); 4483 bus->watchdog_tsk = NULL; 4484 } 4485 4486 /* De-register interrupt handler */ 4487 brcmf_sdiod_intr_unregister(bus->sdiodev); 4488 4489 brcmf_detach(bus->sdiodev->dev); 4490 4491 cancel_work_sync(&bus->datawork); 4492 if (bus->brcmf_wq) 4493 destroy_workqueue(bus->brcmf_wq); 4494 4495 if (bus->ci) { 4496 if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { 4497 sdio_claim_host(bus->sdiodev->func1); 4498 brcmf_sdio_wd_timer(bus, false); 4499 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 4500 /* Leave the device in state where it is 4501 * 'passive'. This is done by resetting all 4502 * necessary cores. 4503 */ 4504 msleep(20); 4505 brcmf_chip_set_passive(bus->ci); 4506 brcmf_sdio_clkctl(bus, CLK_NONE, false); 4507 sdio_release_host(bus->sdiodev->func1); 4508 } 4509 brcmf_chip_detach(bus->ci); 4510 } 4511 if (bus->sdiodev->settings) 4512 brcmf_release_module_param(bus->sdiodev->settings); 4513 4514 kfree(bus->rxbuf); 4515 kfree(bus->hdrbuf); 4516 kfree(bus); 4517 } 4518 4519 brcmf_dbg(TRACE, "Disconnected\n"); 4520 } 4521 4522 void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, bool active) 4523 { 4524 /* Totally stop the timer */ 4525 if (!active && bus->wd_active) { 4526 del_timer_sync(&bus->timer); 4527 bus->wd_active = false; 4528 return; 4529 } 4530 4531 /* don't start the wd until fw is loaded */ 4532 if (bus->sdiodev->state != BRCMF_SDIOD_DATA) 4533 return; 4534 4535 if (active) { 4536 if (!bus->wd_active) { 4537 /* Create timer again when watchdog period is 4538 dynamically changed or in the first instance 4539 */ 4540 bus->timer.expires = jiffies + BRCMF_WD_POLL; 4541 add_timer(&bus->timer); 4542 bus->wd_active = true; 4543 } else { 4544 /* Re arm the timer, at last watchdog period */ 4545 mod_timer(&bus->timer, jiffies + BRCMF_WD_POLL); 4546 } 4547 } 4548 } 4549 4550 int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep) 4551 { 4552 int ret; 4553 4554 sdio_claim_host(bus->sdiodev->func1); 4555 ret = brcmf_sdio_bus_sleep(bus, sleep, false); 4556 sdio_release_host(bus->sdiodev->func1); 4557 4558 return ret; 4559 } 4560 4561