1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers 4 * 5 * Copyright (c) 2010, ST-Ericsson 6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> 7 * 8 * Based on: 9 * ST-Ericsson UMAC CW1200 driver, which is 10 * Copyright (c) 2010, ST-Ericsson 11 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> 12 */ 13 14 #include <linux/module.h> 15 #include <net/mac80211.h> 16 #include <linux/kthread.h> 17 #include <linux/timer.h> 18 19 #include "cw1200.h" 20 #include "bh.h" 21 #include "hwio.h" 22 #include "wsm.h" 23 #include "hwbus.h" 24 #include "debug.h" 25 #include "fwio.h" 26 27 static int cw1200_bh(void *arg); 28 29 #define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4) 30 /* an SPI message cannot be bigger than (2"12-1)*2 bytes 31 * "*2" to cvt to bytes 32 */ 33 #define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2) 34 #define PIGGYBACK_CTRL_REG (2) 35 #define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG) 36 37 /* Suspend state privates */ 38 enum cw1200_bh_pm_state { 39 CW1200_BH_RESUMED = 0, 40 CW1200_BH_SUSPEND, 41 CW1200_BH_SUSPENDED, 42 CW1200_BH_RESUME, 43 }; 44 45 typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv, 46 u8 *data, size_t size); 47 48 static void cw1200_bh_work(struct work_struct *work) 49 { 50 struct cw1200_common *priv = 51 container_of(work, struct cw1200_common, bh_work); 52 cw1200_bh(priv); 53 } 54 55 int cw1200_register_bh(struct cw1200_common *priv) 56 { 57 int err = 0; 58 /* Realtime workqueue */ 59 priv->bh_workqueue = alloc_workqueue("cw1200_bh", 60 WQ_MEM_RECLAIM | WQ_HIGHPRI 61 | WQ_CPU_INTENSIVE, 1); 62 63 if (!priv->bh_workqueue) 64 return -ENOMEM; 65 66 INIT_WORK(&priv->bh_work, cw1200_bh_work); 67 68 pr_debug("[BH] register.\n"); 69 70 atomic_set(&priv->bh_rx, 0); 71 atomic_set(&priv->bh_tx, 0); 72 atomic_set(&priv->bh_term, 0); 73 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); 74 priv->bh_error = 0; 75 priv->hw_bufs_used = 0; 76 priv->buf_id_tx = 0; 77 priv->buf_id_rx = 0; 78 init_waitqueue_head(&priv->bh_wq); 79 init_waitqueue_head(&priv->bh_evt_wq); 80 81 err = !queue_work(priv->bh_workqueue, &priv->bh_work); 82 WARN_ON(err); 83 return err; 84 } 85 86 void cw1200_unregister_bh(struct cw1200_common *priv) 87 { 88 atomic_add(1, &priv->bh_term); 89 wake_up(&priv->bh_wq); 90 91 flush_workqueue(priv->bh_workqueue); 92 93 destroy_workqueue(priv->bh_workqueue); 94 priv->bh_workqueue = NULL; 95 96 pr_debug("[BH] unregistered.\n"); 97 } 98 99 void cw1200_irq_handler(struct cw1200_common *priv) 100 { 101 pr_debug("[BH] irq.\n"); 102 103 /* Disable Interrupts! */ 104 /* NOTE: hwbus_ops->lock already held */ 105 __cw1200_irq_enable(priv, 0); 106 107 if (/* WARN_ON */(priv->bh_error)) 108 return; 109 110 if (atomic_add_return(1, &priv->bh_rx) == 1) 111 wake_up(&priv->bh_wq); 112 } 113 EXPORT_SYMBOL_GPL(cw1200_irq_handler); 114 115 void cw1200_bh_wakeup(struct cw1200_common *priv) 116 { 117 pr_debug("[BH] wakeup.\n"); 118 if (priv->bh_error) { 119 pr_err("[BH] wakeup failed (BH error)\n"); 120 return; 121 } 122 123 if (atomic_add_return(1, &priv->bh_tx) == 1) 124 wake_up(&priv->bh_wq); 125 } 126 127 int cw1200_bh_suspend(struct cw1200_common *priv) 128 { 129 pr_debug("[BH] suspend.\n"); 130 if (priv->bh_error) { 131 wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n"); 132 return -EINVAL; 133 } 134 135 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND); 136 wake_up(&priv->bh_wq); 137 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || 138 (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)), 139 1 * HZ) ? 0 : -ETIMEDOUT; 140 } 141 142 int cw1200_bh_resume(struct cw1200_common *priv) 143 { 144 pr_debug("[BH] resume.\n"); 145 if (priv->bh_error) { 146 wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n"); 147 return -EINVAL; 148 } 149 150 atomic_set(&priv->bh_suspend, CW1200_BH_RESUME); 151 wake_up(&priv->bh_wq); 152 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || 153 (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)), 154 1 * HZ) ? 0 : -ETIMEDOUT; 155 } 156 157 static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv) 158 { 159 ++priv->hw_bufs_used; 160 } 161 162 int wsm_release_tx_buffer(struct cw1200_common *priv, int count) 163 { 164 int ret = 0; 165 int hw_bufs_used = priv->hw_bufs_used; 166 167 priv->hw_bufs_used -= count; 168 if (WARN_ON(priv->hw_bufs_used < 0)) 169 ret = -1; 170 else if (hw_bufs_used >= priv->wsm_caps.input_buffers) 171 ret = 1; 172 if (!priv->hw_bufs_used) 173 wake_up(&priv->bh_evt_wq); 174 return ret; 175 } 176 177 static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv, 178 u16 *ctrl_reg) 179 { 180 int ret; 181 182 ret = cw1200_reg_read_16(priv, 183 ST90TDS_CONTROL_REG_ID, ctrl_reg); 184 if (ret) { 185 ret = cw1200_reg_read_16(priv, 186 ST90TDS_CONTROL_REG_ID, ctrl_reg); 187 if (ret) 188 pr_err("[BH] Failed to read control register.\n"); 189 } 190 191 return ret; 192 } 193 194 static int cw1200_device_wakeup(struct cw1200_common *priv) 195 { 196 u16 ctrl_reg; 197 int ret; 198 199 pr_debug("[BH] Device wakeup.\n"); 200 201 /* First, set the dpll register */ 202 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, 203 cw1200_dpll_from_clk(priv->hw_refclk)); 204 if (WARN_ON(ret)) 205 return ret; 206 207 /* To force the device to be always-on, the host sets WLAN_UP to 1 */ 208 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 209 ST90TDS_CONT_WUP_BIT); 210 if (WARN_ON(ret)) 211 return ret; 212 213 ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg); 214 if (WARN_ON(ret)) 215 return ret; 216 217 /* If the device returns WLAN_RDY as 1, the device is active and will 218 * remain active. 219 */ 220 if (ctrl_reg & ST90TDS_CONT_RDY_BIT) { 221 pr_debug("[BH] Device awake.\n"); 222 return 1; 223 } 224 225 return 0; 226 } 227 228 /* Must be called from BH thraed. */ 229 void cw1200_enable_powersave(struct cw1200_common *priv, 230 bool enable) 231 { 232 pr_debug("[BH] Powerave is %s.\n", 233 enable ? "enabled" : "disabled"); 234 priv->powersave_enabled = enable; 235 } 236 237 static int cw1200_bh_rx_helper(struct cw1200_common *priv, 238 uint16_t *ctrl_reg, 239 int *tx) 240 { 241 size_t read_len = 0; 242 struct sk_buff *skb_rx = NULL; 243 struct wsm_hdr *wsm; 244 size_t wsm_len; 245 u16 wsm_id; 246 u8 wsm_seq; 247 int rx_resync = 1; 248 249 size_t alloc_len; 250 u8 *data; 251 252 read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2; 253 if (!read_len) 254 return 0; /* No more work */ 255 256 if (WARN_ON((read_len < sizeof(struct wsm_hdr)) || 257 (read_len > EFFECTIVE_BUF_SIZE))) { 258 pr_debug("Invalid read len: %zu (%04x)", 259 read_len, *ctrl_reg); 260 goto err; 261 } 262 263 /* Add SIZE of PIGGYBACK reg (CONTROL Reg) 264 * to the NEXT Message length + 2 Bytes for SKB 265 */ 266 read_len = read_len + 2; 267 268 alloc_len = priv->hwbus_ops->align_size( 269 priv->hwbus_priv, read_len); 270 271 /* Check if not exceeding CW1200 capabilities */ 272 if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) { 273 pr_debug("Read aligned len: %zu\n", 274 alloc_len); 275 } 276 277 skb_rx = dev_alloc_skb(alloc_len); 278 if (WARN_ON(!skb_rx)) 279 goto err; 280 281 skb_trim(skb_rx, 0); 282 skb_put(skb_rx, read_len); 283 data = skb_rx->data; 284 if (WARN_ON(!data)) 285 goto err; 286 287 if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) { 288 pr_err("rx blew up, len %zu\n", alloc_len); 289 goto err; 290 } 291 292 /* Piggyback */ 293 *ctrl_reg = __le16_to_cpu( 294 ((__le16 *)data)[alloc_len / 2 - 1]); 295 296 wsm = (struct wsm_hdr *)data; 297 wsm_len = __le16_to_cpu(wsm->len); 298 if (WARN_ON(wsm_len > read_len)) 299 goto err; 300 301 if (priv->wsm_enable_wsm_dumps) 302 print_hex_dump_bytes("<-- ", 303 DUMP_PREFIX_NONE, 304 data, wsm_len); 305 306 wsm_id = __le16_to_cpu(wsm->id) & 0xFFF; 307 wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7; 308 309 skb_trim(skb_rx, wsm_len); 310 311 if (wsm_id == 0x0800) { 312 wsm_handle_exception(priv, 313 &data[sizeof(*wsm)], 314 wsm_len - sizeof(*wsm)); 315 goto err; 316 } else if (!rx_resync) { 317 if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) 318 goto err; 319 } 320 priv->wsm_rx_seq = (wsm_seq + 1) & 7; 321 rx_resync = 0; 322 323 if (wsm_id & 0x0400) { 324 int rc = wsm_release_tx_buffer(priv, 1); 325 if (WARN_ON(rc < 0)) 326 return rc; 327 else if (rc > 0) 328 *tx = 1; 329 } 330 331 /* cw1200_wsm_rx takes care on SKB livetime */ 332 if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx))) 333 goto err; 334 335 if (skb_rx) { 336 dev_kfree_skb(skb_rx); 337 skb_rx = NULL; 338 } 339 340 return 0; 341 342 err: 343 if (skb_rx) { 344 dev_kfree_skb(skb_rx); 345 skb_rx = NULL; 346 } 347 return -1; 348 } 349 350 static int cw1200_bh_tx_helper(struct cw1200_common *priv, 351 int *pending_tx, 352 int *tx_burst) 353 { 354 size_t tx_len; 355 u8 *data; 356 int ret; 357 struct wsm_hdr *wsm; 358 359 if (priv->device_can_sleep) { 360 ret = cw1200_device_wakeup(priv); 361 if (WARN_ON(ret < 0)) { /* Error in wakeup */ 362 *pending_tx = 1; 363 return 0; 364 } else if (ret) { /* Woke up */ 365 priv->device_can_sleep = false; 366 } else { /* Did not awake */ 367 *pending_tx = 1; 368 return 0; 369 } 370 } 371 372 wsm_alloc_tx_buffer(priv); 373 ret = wsm_get_tx(priv, &data, &tx_len, tx_burst); 374 if (ret <= 0) { 375 wsm_release_tx_buffer(priv, 1); 376 if (WARN_ON(ret < 0)) 377 return ret; /* Error */ 378 return 0; /* No work */ 379 } 380 381 wsm = (struct wsm_hdr *)data; 382 BUG_ON(tx_len < sizeof(*wsm)); 383 BUG_ON(__le16_to_cpu(wsm->len) != tx_len); 384 385 atomic_add(1, &priv->bh_tx); 386 387 tx_len = priv->hwbus_ops->align_size( 388 priv->hwbus_priv, tx_len); 389 390 /* Check if not exceeding CW1200 capabilities */ 391 if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE)) 392 pr_debug("Write aligned len: %zu\n", tx_len); 393 394 wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX)); 395 wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq)); 396 397 if (WARN_ON(cw1200_data_write(priv, data, tx_len))) { 398 pr_err("tx blew up, len %zu\n", tx_len); 399 wsm_release_tx_buffer(priv, 1); 400 return -1; /* Error */ 401 } 402 403 if (priv->wsm_enable_wsm_dumps) 404 print_hex_dump_bytes("--> ", 405 DUMP_PREFIX_NONE, 406 data, 407 __le16_to_cpu(wsm->len)); 408 409 wsm_txed(priv, data); 410 priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX; 411 412 if (*tx_burst > 1) { 413 cw1200_debug_tx_burst(priv); 414 return 1; /* Work remains */ 415 } 416 417 return 0; 418 } 419 420 static int cw1200_bh(void *arg) 421 { 422 struct cw1200_common *priv = arg; 423 int rx, tx, term, suspend; 424 u16 ctrl_reg = 0; 425 int tx_allowed; 426 int pending_tx = 0; 427 int tx_burst; 428 long status; 429 u32 dummy; 430 int ret; 431 432 for (;;) { 433 if (!priv->hw_bufs_used && 434 priv->powersave_enabled && 435 !priv->device_can_sleep && 436 !atomic_read(&priv->recent_scan)) { 437 status = 1 * HZ; 438 pr_debug("[BH] Device wakedown. No data.\n"); 439 cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); 440 priv->device_can_sleep = true; 441 } else if (priv->hw_bufs_used) { 442 /* Interrupt loss detection */ 443 status = 1 * HZ; 444 } else { 445 status = MAX_SCHEDULE_TIMEOUT; 446 } 447 448 /* Dummy Read for SDIO retry mechanism*/ 449 if ((priv->hw_type != -1) && 450 (atomic_read(&priv->bh_rx) == 0) && 451 (atomic_read(&priv->bh_tx) == 0)) 452 cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID, 453 &dummy, sizeof(dummy)); 454 455 pr_debug("[BH] waiting ...\n"); 456 status = wait_event_interruptible_timeout(priv->bh_wq, ({ 457 rx = atomic_xchg(&priv->bh_rx, 0); 458 tx = atomic_xchg(&priv->bh_tx, 0); 459 term = atomic_xchg(&priv->bh_term, 0); 460 suspend = pending_tx ? 461 0 : atomic_read(&priv->bh_suspend); 462 (rx || tx || term || suspend || priv->bh_error); 463 }), status); 464 465 pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n", 466 rx, tx, term, suspend, priv->bh_error, status); 467 468 /* Did an error occur? */ 469 if ((status < 0 && status != -ERESTARTSYS) || 470 term || priv->bh_error) { 471 break; 472 } 473 if (!status) { /* wait_event timed out */ 474 unsigned long timestamp = jiffies; 475 long timeout; 476 int pending = 0; 477 int i; 478 479 /* Check to see if we have any outstanding frames */ 480 if (priv->hw_bufs_used && (!rx || !tx)) { 481 wiphy_warn(priv->hw->wiphy, 482 "Missed interrupt? (%d frames outstanding)\n", 483 priv->hw_bufs_used); 484 rx = 1; 485 486 /* Get a timestamp of "oldest" frame */ 487 for (i = 0; i < 4; ++i) 488 pending += cw1200_queue_get_xmit_timestamp( 489 &priv->tx_queue[i], 490 ×tamp, 491 priv->pending_frame_id); 492 493 /* Check if frame transmission is timed out. 494 * Add an extra second with respect to possible 495 * interrupt loss. 496 */ 497 timeout = timestamp + 498 WSM_CMD_LAST_CHANCE_TIMEOUT + 499 1 * HZ - 500 jiffies; 501 502 /* And terminate BH thread if the frame is "stuck" */ 503 if (pending && timeout < 0) { 504 wiphy_warn(priv->hw->wiphy, 505 "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n", 506 priv->hw_bufs_used, pending, 507 timestamp, jiffies); 508 break; 509 } 510 } else if (!priv->device_can_sleep && 511 !atomic_read(&priv->recent_scan)) { 512 pr_debug("[BH] Device wakedown. Timeout.\n"); 513 cw1200_reg_write_16(priv, 514 ST90TDS_CONTROL_REG_ID, 0); 515 priv->device_can_sleep = true; 516 } 517 goto done; 518 } else if (suspend) { 519 pr_debug("[BH] Device suspend.\n"); 520 if (priv->powersave_enabled) { 521 pr_debug("[BH] Device wakedown. Suspend.\n"); 522 cw1200_reg_write_16(priv, 523 ST90TDS_CONTROL_REG_ID, 0); 524 priv->device_can_sleep = true; 525 } 526 527 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED); 528 wake_up(&priv->bh_evt_wq); 529 status = wait_event_interruptible(priv->bh_wq, 530 CW1200_BH_RESUME == atomic_read(&priv->bh_suspend)); 531 if (status < 0) { 532 wiphy_err(priv->hw->wiphy, 533 "Failed to wait for resume: %ld.\n", 534 status); 535 break; 536 } 537 pr_debug("[BH] Device resume.\n"); 538 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); 539 wake_up(&priv->bh_evt_wq); 540 atomic_add(1, &priv->bh_rx); 541 goto done; 542 } 543 544 rx: 545 tx += pending_tx; 546 pending_tx = 0; 547 548 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) 549 break; 550 551 /* Don't bother trying to rx unless we have data to read */ 552 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { 553 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); 554 if (ret < 0) 555 break; 556 /* Double up here if there's more data.. */ 557 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { 558 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); 559 if (ret < 0) 560 break; 561 } 562 } 563 564 tx: 565 if (tx) { 566 tx = 0; 567 568 BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers); 569 tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used; 570 tx_allowed = tx_burst > 0; 571 572 if (!tx_allowed) { 573 /* Buffers full. Ensure we process tx 574 * after we handle rx.. 575 */ 576 pending_tx = tx; 577 goto done_rx; 578 } 579 ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst); 580 if (ret < 0) 581 break; 582 if (ret > 0) /* More to transmit */ 583 tx = ret; 584 585 /* Re-read ctrl reg */ 586 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) 587 break; 588 } 589 590 done_rx: 591 if (priv->bh_error) 592 break; 593 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) 594 goto rx; 595 if (tx) 596 goto tx; 597 598 done: 599 /* Re-enable device interrupts */ 600 priv->hwbus_ops->lock(priv->hwbus_priv); 601 __cw1200_irq_enable(priv, 1); 602 priv->hwbus_ops->unlock(priv->hwbus_priv); 603 } 604 605 /* Explicitly disable device interrupts */ 606 priv->hwbus_ops->lock(priv->hwbus_priv); 607 __cw1200_irq_enable(priv, 0); 608 priv->hwbus_ops->unlock(priv->hwbus_priv); 609 610 if (!term) { 611 pr_err("[BH] Fatal error, exiting.\n"); 612 priv->bh_error = 1; 613 /* TODO: schedule_work(recovery) */ 614 } 615 return 0; 616 } 617