1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #include <linux/sched.h> 32 #include <linux/wait.h> 33 #include <linux/gfp.h> 34 35 #include "iwl-prph.h" 36 #include "iwl-io.h" 37 #include "internal.h" 38 #include "iwl-op-mode.h" 39 40 /****************************************************************************** 41 * 42 * RX path functions 43 * 44 ******************************************************************************/ 45 46 /* 47 * Rx theory of operation 48 * 49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 50 * each of which point to Receive Buffers to be filled by the NIC. These get 51 * used not only for Rx frames, but for any command response or notification 52 * from the NIC. The driver and NIC manage the Rx buffers by means 53 * of indexes into the circular buffer. 54 * 55 * Rx Queue Indexes 56 * The host/firmware share two index registers for managing the Rx buffers. 57 * 58 * The READ index maps to the first position that the firmware may be writing 59 * to -- the driver can read up to (but not including) this position and get 60 * good data. 61 * The READ index is managed by the firmware once the card is enabled. 62 * 63 * The WRITE index maps to the last position the driver has read from -- the 64 * position preceding WRITE is the last slot the firmware can place a packet. 65 * 66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 67 * WRITE = READ. 68 * 69 * During initialization, the host sets up the READ queue position to the first 70 * INDEX position, and WRITE to the last (READ - 1 wrapped) 71 * 72 * When the firmware places a packet in a buffer, it will advance the READ index 73 * and fire the RX interrupt. The driver can then query the READ index and 74 * process as many packets as possible, moving the WRITE index forward as it 75 * resets the Rx queue buffers with new memory. 76 * 77 * The management in the driver is as follows: 78 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 79 * When the interrupt handler is called, the request is processed. 80 * The page is either stolen - transferred to the upper layer 81 * or reused - added immediately to the iwl->rxq->rx_free list. 82 * + When the page is stolen - the driver updates the matching queue's used 83 * count, detaches the RBD and transfers it to the queue used list. 84 * When there are two used RBDs - they are transferred to the allocator empty 85 * list. Work is then scheduled for the allocator to start allocating 86 * eight buffers. 87 * When there are another 6 used RBDs - they are transferred to the allocator 88 * empty list and the driver tries to claim the pre-allocated buffers and 89 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 90 * until ready. 91 * When there are 8+ buffers in the free list - either from allocation or from 92 * 8 reused unstolen pages - restock is called to update the FW and indexes. 93 * + In order to make sure the allocator always has RBDs to use for allocation 94 * the allocator has initial pool in the size of num_queues*(8-2) - the 95 * maximum missing RBDs per allocation request (request posted with 2 96 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 97 * The queues supplies the recycle of the rest of the RBDs. 98 * + A received packet is processed and handed to the kernel network stack, 99 * detached from the iwl->rxq. The driver 'processed' index is updated. 100 * + If there are no allocated buffers in iwl->rxq->rx_free, 101 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 102 * If there were enough free buffers and RX_STALLED is set it is cleared. 103 * 104 * 105 * Driver sequence: 106 * 107 * iwl_rxq_alloc() Allocates rx_free 108 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 109 * iwl_pcie_rxq_restock. 110 * Used only during initialization. 111 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 112 * queue, updates firmware pointers, and updates 113 * the WRITE index. 114 * iwl_pcie_rx_allocator() Background work for allocating pages. 115 * 116 * -- enable interrupts -- 117 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 118 * READ INDEX, detaching the SKB from the pool. 119 * Moves the packet buffer from queue to rx_used. 120 * Posts and claims requests to the allocator. 121 * Calls iwl_pcie_rxq_restock to refill any empty 122 * slots. 123 * 124 * RBD life-cycle: 125 * 126 * Init: 127 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 128 * 129 * Regular Receive interrupt: 130 * Page Stolen: 131 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 132 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 133 * Page not Stolen: 134 * rxq.queue -> rxq.rx_free -> rxq.queue 135 * ... 136 * 137 */ 138 139 /* 140 * iwl_rxq_space - Return number of free slots available in queue. 141 */ 142 static int iwl_rxq_space(const struct iwl_rxq *rxq) 143 { 144 /* Make sure rx queue size is a power of 2 */ 145 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 146 147 /* 148 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 149 * between empty and completely full queues. 150 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 151 * defined for negative dividends. 152 */ 153 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 154 } 155 156 /* 157 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 158 */ 159 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 160 { 161 return cpu_to_le32((u32)(dma_addr >> 8)); 162 } 163 164 /* 165 * iwl_pcie_rx_stop - stops the Rx DMA 166 */ 167 int iwl_pcie_rx_stop(struct iwl_trans *trans) 168 { 169 if (trans->cfg->mq_rx_supported) { 170 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 171 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 172 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 173 } else { 174 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 175 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 176 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 177 1000); 178 } 179 } 180 181 /* 182 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 183 */ 184 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 185 struct iwl_rxq *rxq) 186 { 187 u32 reg; 188 189 lockdep_assert_held(&rxq->lock); 190 191 /* 192 * explicitly wake up the NIC if: 193 * 1. shadow registers aren't enabled 194 * 2. there is a chance that the NIC is asleep 195 */ 196 if (!trans->cfg->base_params->shadow_reg_enable && 197 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 198 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 199 200 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 201 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 202 reg); 203 iwl_set_bit(trans, CSR_GP_CNTRL, 204 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 205 rxq->need_update = true; 206 return; 207 } 208 } 209 210 rxq->write_actual = round_down(rxq->write, 8); 211 if (trans->cfg->mq_rx_supported) 212 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 213 rxq->write_actual); 214 else 215 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 216 } 217 218 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 219 { 220 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 221 int i; 222 223 for (i = 0; i < trans->num_rx_queues; i++) { 224 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 225 226 if (!rxq->need_update) 227 continue; 228 spin_lock(&rxq->lock); 229 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 230 rxq->need_update = false; 231 spin_unlock(&rxq->lock); 232 } 233 } 234 235 /* 236 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 237 */ 238 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 239 struct iwl_rxq *rxq) 240 { 241 struct iwl_rx_mem_buffer *rxb; 242 243 /* 244 * If the device isn't enabled - no need to try to add buffers... 245 * This can happen when we stop the device and still have an interrupt 246 * pending. We stop the APM before we sync the interrupts because we 247 * have to (see comment there). On the other hand, since the APM is 248 * stopped, we cannot access the HW (in particular not prph). 249 * So don't try to restock if the APM has been already stopped. 250 */ 251 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 252 return; 253 254 spin_lock(&rxq->lock); 255 while (rxq->free_count) { 256 __le64 *bd = (__le64 *)rxq->bd; 257 258 /* Get next free Rx buffer, remove from free list */ 259 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 260 list); 261 list_del(&rxb->list); 262 rxb->invalid = false; 263 /* 12 first bits are expected to be empty */ 264 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); 265 /* Point to Rx buffer via next RBD in circular buffer */ 266 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 267 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; 268 rxq->free_count--; 269 } 270 spin_unlock(&rxq->lock); 271 272 /* 273 * If we've added more space for the firmware to place data, tell it. 274 * Increment device's write pointer in multiples of 8. 275 */ 276 if (rxq->write_actual != (rxq->write & ~0x7)) { 277 spin_lock(&rxq->lock); 278 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 279 spin_unlock(&rxq->lock); 280 } 281 } 282 283 /* 284 * iwl_pcie_rxsq_restock - restock implementation for single queue rx 285 */ 286 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 287 struct iwl_rxq *rxq) 288 { 289 struct iwl_rx_mem_buffer *rxb; 290 291 /* 292 * If the device isn't enabled - not need to try to add buffers... 293 * This can happen when we stop the device and still have an interrupt 294 * pending. We stop the APM before we sync the interrupts because we 295 * have to (see comment there). On the other hand, since the APM is 296 * stopped, we cannot access the HW (in particular not prph). 297 * So don't try to restock if the APM has been already stopped. 298 */ 299 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 300 return; 301 302 spin_lock(&rxq->lock); 303 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 304 __le32 *bd = (__le32 *)rxq->bd; 305 /* The overwritten rxb must be a used one */ 306 rxb = rxq->queue[rxq->write]; 307 BUG_ON(rxb && rxb->page); 308 309 /* Get next free Rx buffer, remove from free list */ 310 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 311 list); 312 list_del(&rxb->list); 313 rxb->invalid = false; 314 315 /* Point to Rx buffer via next RBD in circular buffer */ 316 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 317 rxq->queue[rxq->write] = rxb; 318 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 319 rxq->free_count--; 320 } 321 spin_unlock(&rxq->lock); 322 323 /* If we've added more space for the firmware to place data, tell it. 324 * Increment device's write pointer in multiples of 8. */ 325 if (rxq->write_actual != (rxq->write & ~0x7)) { 326 spin_lock(&rxq->lock); 327 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 328 spin_unlock(&rxq->lock); 329 } 330 } 331 332 /* 333 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 334 * 335 * If there are slots in the RX queue that need to be restocked, 336 * and we have free pre-allocated buffers, fill the ranks as much 337 * as we can, pulling from rx_free. 338 * 339 * This moves the 'write' index forward to catch up with 'processed', and 340 * also updates the memory address in the firmware to reference the new 341 * target buffer. 342 */ 343 static 344 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 345 { 346 if (trans->cfg->mq_rx_supported) 347 iwl_pcie_rxmq_restock(trans, rxq); 348 else 349 iwl_pcie_rxsq_restock(trans, rxq); 350 } 351 352 /* 353 * iwl_pcie_rx_alloc_page - allocates and returns a page. 354 * 355 */ 356 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 357 gfp_t priority) 358 { 359 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 360 struct page *page; 361 gfp_t gfp_mask = priority; 362 363 if (trans_pcie->rx_page_order > 0) 364 gfp_mask |= __GFP_COMP; 365 366 /* Alloc a new receive buffer */ 367 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 368 if (!page) { 369 if (net_ratelimit()) 370 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 371 trans_pcie->rx_page_order); 372 /* 373 * Issue an error if we don't have enough pre-allocated 374 * buffers. 375 ` */ 376 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 377 IWL_CRIT(trans, 378 "Failed to alloc_pages\n"); 379 return NULL; 380 } 381 return page; 382 } 383 384 /* 385 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 386 * 387 * A used RBD is an Rx buffer that has been given to the stack. To use it again 388 * a page must be allocated and the RBD must point to the page. This function 389 * doesn't change the HW pointer but handles the list of pages that is used by 390 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 391 * allocated buffers. 392 */ 393 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 394 struct iwl_rxq *rxq) 395 { 396 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 397 struct iwl_rx_mem_buffer *rxb; 398 struct page *page; 399 400 while (1) { 401 spin_lock(&rxq->lock); 402 if (list_empty(&rxq->rx_used)) { 403 spin_unlock(&rxq->lock); 404 return; 405 } 406 spin_unlock(&rxq->lock); 407 408 /* Alloc a new receive buffer */ 409 page = iwl_pcie_rx_alloc_page(trans, priority); 410 if (!page) 411 return; 412 413 spin_lock(&rxq->lock); 414 415 if (list_empty(&rxq->rx_used)) { 416 spin_unlock(&rxq->lock); 417 __free_pages(page, trans_pcie->rx_page_order); 418 return; 419 } 420 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 421 list); 422 list_del(&rxb->list); 423 spin_unlock(&rxq->lock); 424 425 BUG_ON(rxb->page); 426 rxb->page = page; 427 /* Get physical address of the RB */ 428 rxb->page_dma = 429 dma_map_page(trans->dev, page, 0, 430 PAGE_SIZE << trans_pcie->rx_page_order, 431 DMA_FROM_DEVICE); 432 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 433 rxb->page = NULL; 434 spin_lock(&rxq->lock); 435 list_add(&rxb->list, &rxq->rx_used); 436 spin_unlock(&rxq->lock); 437 __free_pages(page, trans_pcie->rx_page_order); 438 return; 439 } 440 441 spin_lock(&rxq->lock); 442 443 list_add_tail(&rxb->list, &rxq->rx_free); 444 rxq->free_count++; 445 446 spin_unlock(&rxq->lock); 447 } 448 } 449 450 static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 451 { 452 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 453 int i; 454 455 for (i = 0; i < RX_POOL_SIZE; i++) { 456 if (!trans_pcie->rx_pool[i].page) 457 continue; 458 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 459 PAGE_SIZE << trans_pcie->rx_page_order, 460 DMA_FROM_DEVICE); 461 __free_pages(trans_pcie->rx_pool[i].page, 462 trans_pcie->rx_page_order); 463 trans_pcie->rx_pool[i].page = NULL; 464 } 465 } 466 467 /* 468 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 469 * 470 * Allocates for each received request 8 pages 471 * Called as a scheduled work item. 472 */ 473 static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 474 { 475 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 476 struct iwl_rb_allocator *rba = &trans_pcie->rba; 477 struct list_head local_empty; 478 int pending = atomic_xchg(&rba->req_pending, 0); 479 480 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); 481 482 /* If we were scheduled - there is at least one request */ 483 spin_lock(&rba->lock); 484 /* swap out the rba->rbd_empty to a local list */ 485 list_replace_init(&rba->rbd_empty, &local_empty); 486 spin_unlock(&rba->lock); 487 488 while (pending) { 489 int i; 490 struct list_head local_allocated; 491 gfp_t gfp_mask = GFP_KERNEL; 492 493 /* Do not post a warning if there are only a few requests */ 494 if (pending < RX_PENDING_WATERMARK) 495 gfp_mask |= __GFP_NOWARN; 496 497 INIT_LIST_HEAD(&local_allocated); 498 499 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 500 struct iwl_rx_mem_buffer *rxb; 501 struct page *page; 502 503 /* List should never be empty - each reused RBD is 504 * returned to the list, and initial pool covers any 505 * possible gap between the time the page is allocated 506 * to the time the RBD is added. 507 */ 508 BUG_ON(list_empty(&local_empty)); 509 /* Get the first rxb from the rbd list */ 510 rxb = list_first_entry(&local_empty, 511 struct iwl_rx_mem_buffer, list); 512 BUG_ON(rxb->page); 513 514 /* Alloc a new receive buffer */ 515 page = iwl_pcie_rx_alloc_page(trans, gfp_mask); 516 if (!page) 517 continue; 518 rxb->page = page; 519 520 /* Get physical address of the RB */ 521 rxb->page_dma = dma_map_page(trans->dev, page, 0, 522 PAGE_SIZE << trans_pcie->rx_page_order, 523 DMA_FROM_DEVICE); 524 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 525 rxb->page = NULL; 526 __free_pages(page, trans_pcie->rx_page_order); 527 continue; 528 } 529 530 /* move the allocated entry to the out list */ 531 list_move(&rxb->list, &local_allocated); 532 i++; 533 } 534 535 pending--; 536 if (!pending) { 537 pending = atomic_xchg(&rba->req_pending, 0); 538 IWL_DEBUG_RX(trans, 539 "Pending allocation requests = %d\n", 540 pending); 541 } 542 543 spin_lock(&rba->lock); 544 /* add the allocated rbds to the allocator allocated list */ 545 list_splice_tail(&local_allocated, &rba->rbd_allocated); 546 /* get more empty RBDs for current pending requests */ 547 list_splice_tail_init(&rba->rbd_empty, &local_empty); 548 spin_unlock(&rba->lock); 549 550 atomic_inc(&rba->req_ready); 551 } 552 553 spin_lock(&rba->lock); 554 /* return unused rbds to the allocator empty list */ 555 list_splice_tail(&local_empty, &rba->rbd_empty); 556 spin_unlock(&rba->lock); 557 } 558 559 /* 560 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 561 .* 562 .* Called by queue when the queue posted allocation request and 563 * has freed 8 RBDs in order to restock itself. 564 * This function directly moves the allocated RBs to the queue's ownership 565 * and updates the relevant counters. 566 */ 567 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 568 struct iwl_rxq *rxq) 569 { 570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 571 struct iwl_rb_allocator *rba = &trans_pcie->rba; 572 int i; 573 574 lockdep_assert_held(&rxq->lock); 575 576 /* 577 * atomic_dec_if_positive returns req_ready - 1 for any scenario. 578 * If req_ready is 0 atomic_dec_if_positive will return -1 and this 579 * function will return early, as there are no ready requests. 580 * atomic_dec_if_positive will perofrm the *actual* decrement only if 581 * req_ready > 0, i.e. - there are ready requests and the function 582 * hands one request to the caller. 583 */ 584 if (atomic_dec_if_positive(&rba->req_ready) < 0) 585 return; 586 587 spin_lock(&rba->lock); 588 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 589 /* Get next free Rx buffer, remove it from free list */ 590 struct iwl_rx_mem_buffer *rxb = 591 list_first_entry(&rba->rbd_allocated, 592 struct iwl_rx_mem_buffer, list); 593 594 list_move(&rxb->list, &rxq->rx_free); 595 } 596 spin_unlock(&rba->lock); 597 598 rxq->used_count -= RX_CLAIM_REQ_ALLOC; 599 rxq->free_count += RX_CLAIM_REQ_ALLOC; 600 } 601 602 static void iwl_pcie_rx_allocator_work(struct work_struct *data) 603 { 604 struct iwl_rb_allocator *rba_p = 605 container_of(data, struct iwl_rb_allocator, rx_alloc); 606 struct iwl_trans_pcie *trans_pcie = 607 container_of(rba_p, struct iwl_trans_pcie, rba); 608 609 iwl_pcie_rx_allocator(trans_pcie->trans); 610 } 611 612 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 613 { 614 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 615 struct iwl_rb_allocator *rba = &trans_pcie->rba; 616 struct device *dev = trans->dev; 617 int i; 618 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 619 sizeof(__le32); 620 621 if (WARN_ON(trans_pcie->rxq)) 622 return -EINVAL; 623 624 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 625 GFP_KERNEL); 626 if (!trans_pcie->rxq) 627 return -EINVAL; 628 629 spin_lock_init(&rba->lock); 630 631 for (i = 0; i < trans->num_rx_queues; i++) { 632 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 633 634 spin_lock_init(&rxq->lock); 635 if (trans->cfg->mq_rx_supported) 636 rxq->queue_size = MQ_RX_TABLE_SIZE; 637 else 638 rxq->queue_size = RX_QUEUE_SIZE; 639 640 /* 641 * Allocate the circular buffer of Read Buffer Descriptors 642 * (RBDs) 643 */ 644 rxq->bd = dma_zalloc_coherent(dev, 645 free_size * rxq->queue_size, 646 &rxq->bd_dma, GFP_KERNEL); 647 if (!rxq->bd) 648 goto err; 649 650 if (trans->cfg->mq_rx_supported) { 651 rxq->used_bd = dma_zalloc_coherent(dev, 652 sizeof(__le32) * 653 rxq->queue_size, 654 &rxq->used_bd_dma, 655 GFP_KERNEL); 656 if (!rxq->used_bd) 657 goto err; 658 } 659 660 /*Allocate the driver's pointer to receive buffer status */ 661 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 662 &rxq->rb_stts_dma, 663 GFP_KERNEL); 664 if (!rxq->rb_stts) 665 goto err; 666 } 667 return 0; 668 669 err: 670 for (i = 0; i < trans->num_rx_queues; i++) { 671 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 672 673 if (rxq->bd) 674 dma_free_coherent(dev, free_size * rxq->queue_size, 675 rxq->bd, rxq->bd_dma); 676 rxq->bd_dma = 0; 677 rxq->bd = NULL; 678 679 if (rxq->rb_stts) 680 dma_free_coherent(trans->dev, 681 sizeof(struct iwl_rb_status), 682 rxq->rb_stts, rxq->rb_stts_dma); 683 684 if (rxq->used_bd) 685 dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, 686 rxq->used_bd, rxq->used_bd_dma); 687 rxq->used_bd_dma = 0; 688 rxq->used_bd = NULL; 689 } 690 kfree(trans_pcie->rxq); 691 692 return -ENOMEM; 693 } 694 695 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 696 { 697 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 698 u32 rb_size; 699 unsigned long flags; 700 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 701 702 switch (trans_pcie->rx_buf_size) { 703 case IWL_AMSDU_4K: 704 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 705 break; 706 case IWL_AMSDU_8K: 707 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 708 break; 709 case IWL_AMSDU_12K: 710 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 711 break; 712 default: 713 WARN_ON(1); 714 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 715 } 716 717 if (!iwl_trans_grab_nic_access(trans, &flags)) 718 return; 719 720 /* Stop Rx DMA */ 721 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 722 /* reset and flush pointers */ 723 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 724 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 725 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 726 727 /* Reset driver's Rx queue write index */ 728 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 729 730 /* Tell device where to find RBD circular buffer in DRAM */ 731 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 732 (u32)(rxq->bd_dma >> 8)); 733 734 /* Tell device where in DRAM to update its Rx status */ 735 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 736 rxq->rb_stts_dma >> 4); 737 738 /* Enable Rx DMA 739 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 740 * the credit mechanism in 5000 HW RX FIFO 741 * Direct rx interrupts to hosts 742 * Rx buffer size 4 or 8k or 12k 743 * RB timeout 0x10 744 * 256 RBDs 745 */ 746 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 747 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 748 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 749 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 750 rb_size | 751 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 752 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 753 754 iwl_trans_release_nic_access(trans, &flags); 755 756 /* Set interrupt coalescing timer to default (2048 usecs) */ 757 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 758 759 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 760 if (trans->cfg->host_interrupt_operation_mode) 761 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 762 } 763 764 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) 765 { 766 /* 767 * Turn on the chicken-bits that cause MAC wakeup for RX-related 768 * values. 769 * This costs some power, but needed for W/A 9000 integrated A-step 770 * bug where shadow registers are not in the retention list and their 771 * value is lost when NIC powers down 772 */ 773 if (trans->cfg->integrated) { 774 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 775 CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); 776 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, 777 CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); 778 } 779 } 780 781 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 782 { 783 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 784 u32 rb_size, enabled = 0; 785 unsigned long flags; 786 int i; 787 788 switch (trans_pcie->rx_buf_size) { 789 case IWL_AMSDU_4K: 790 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 791 break; 792 case IWL_AMSDU_8K: 793 rb_size = RFH_RXF_DMA_RB_SIZE_8K; 794 break; 795 case IWL_AMSDU_12K: 796 rb_size = RFH_RXF_DMA_RB_SIZE_12K; 797 break; 798 default: 799 WARN_ON(1); 800 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 801 } 802 803 if (!iwl_trans_grab_nic_access(trans, &flags)) 804 return; 805 806 /* Stop Rx DMA */ 807 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 808 /* disable free amd used rx queue operation */ 809 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 810 811 for (i = 0; i < trans->num_rx_queues; i++) { 812 /* Tell device where to find RBD free table in DRAM */ 813 iwl_write_prph64_no_grab(trans, 814 RFH_Q_FRBDCB_BA_LSB(i), 815 trans_pcie->rxq[i].bd_dma); 816 /* Tell device where to find RBD used table in DRAM */ 817 iwl_write_prph64_no_grab(trans, 818 RFH_Q_URBDCB_BA_LSB(i), 819 trans_pcie->rxq[i].used_bd_dma); 820 /* Tell device where in DRAM to update its Rx status */ 821 iwl_write_prph64_no_grab(trans, 822 RFH_Q_URBD_STTS_WPTR_LSB(i), 823 trans_pcie->rxq[i].rb_stts_dma); 824 /* Reset device indice tables */ 825 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 826 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 827 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 828 829 enabled |= BIT(i) | BIT(i + 16); 830 } 831 832 /* 833 * Enable Rx DMA 834 * Rx buffer size 4 or 8k or 12k 835 * Min RB size 4 or 8 836 * Drop frames that exceed RB size 837 * 512 RBDs 838 */ 839 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 840 RFH_DMA_EN_ENABLE_VAL | rb_size | 841 RFH_RXF_DMA_MIN_RB_4_8 | 842 RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 843 RFH_RXF_DMA_RBDCB_SIZE_512); 844 845 /* 846 * Activate DMA snooping. 847 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 848 * Default queue is 0 849 */ 850 iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | 851 (DEFAULT_RXQ_NUM << 852 RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | 853 RFH_GEN_CFG_SERVICE_DMA_SNOOP | 854 (trans->cfg->integrated ? 855 RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 856 RFH_GEN_CFG_RB_CHUNK_SIZE_128) << 857 RFH_GEN_CFG_RB_CHUNK_SIZE_POS); 858 /* Enable the relevant rx queues */ 859 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 860 861 iwl_trans_release_nic_access(trans, &flags); 862 863 /* Set interrupt coalescing timer to default (2048 usecs) */ 864 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 865 866 iwl_pcie_enable_rx_wake(trans, true); 867 } 868 869 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 870 { 871 lockdep_assert_held(&rxq->lock); 872 873 INIT_LIST_HEAD(&rxq->rx_free); 874 INIT_LIST_HEAD(&rxq->rx_used); 875 rxq->free_count = 0; 876 rxq->used_count = 0; 877 } 878 879 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 880 { 881 WARN_ON(1); 882 return 0; 883 } 884 885 int iwl_pcie_rx_init(struct iwl_trans *trans) 886 { 887 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 888 struct iwl_rxq *def_rxq; 889 struct iwl_rb_allocator *rba = &trans_pcie->rba; 890 int i, err, queue_size, allocator_pool_size, num_alloc; 891 892 if (!trans_pcie->rxq) { 893 err = iwl_pcie_rx_alloc(trans); 894 if (err) 895 return err; 896 } 897 def_rxq = trans_pcie->rxq; 898 if (!rba->alloc_wq) 899 rba->alloc_wq = alloc_workqueue("rb_allocator", 900 WQ_HIGHPRI | WQ_UNBOUND, 1); 901 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); 902 903 spin_lock(&rba->lock); 904 atomic_set(&rba->req_pending, 0); 905 atomic_set(&rba->req_ready, 0); 906 INIT_LIST_HEAD(&rba->rbd_allocated); 907 INIT_LIST_HEAD(&rba->rbd_empty); 908 spin_unlock(&rba->lock); 909 910 /* free all first - we might be reconfigured for a different size */ 911 iwl_pcie_free_rbs_pool(trans); 912 913 for (i = 0; i < RX_QUEUE_SIZE; i++) 914 def_rxq->queue[i] = NULL; 915 916 for (i = 0; i < trans->num_rx_queues; i++) { 917 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 918 919 rxq->id = i; 920 921 spin_lock(&rxq->lock); 922 /* 923 * Set read write pointer to reflect that we have processed 924 * and used all buffers, but have not restocked the Rx queue 925 * with fresh buffers 926 */ 927 rxq->read = 0; 928 rxq->write = 0; 929 rxq->write_actual = 0; 930 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 931 932 iwl_pcie_rx_init_rxb_lists(rxq); 933 934 if (!rxq->napi.poll) 935 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 936 iwl_pcie_dummy_napi_poll, 64); 937 938 spin_unlock(&rxq->lock); 939 } 940 941 /* move the pool to the default queue and allocator ownerships */ 942 queue_size = trans->cfg->mq_rx_supported ? 943 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; 944 allocator_pool_size = trans->num_rx_queues * 945 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 946 num_alloc = queue_size + allocator_pool_size; 947 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != 948 ARRAY_SIZE(trans_pcie->rx_pool)); 949 for (i = 0; i < num_alloc; i++) { 950 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 951 952 if (i < allocator_pool_size) 953 list_add(&rxb->list, &rba->rbd_empty); 954 else 955 list_add(&rxb->list, &def_rxq->rx_used); 956 trans_pcie->global_table[i] = rxb; 957 rxb->vid = (u16)(i + 1); 958 rxb->invalid = true; 959 } 960 961 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 962 963 if (trans->cfg->mq_rx_supported) 964 iwl_pcie_rx_mq_hw_init(trans); 965 else 966 iwl_pcie_rx_hw_init(trans, def_rxq); 967 968 iwl_pcie_rxq_restock(trans, def_rxq); 969 970 spin_lock(&def_rxq->lock); 971 iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); 972 spin_unlock(&def_rxq->lock); 973 974 return 0; 975 } 976 977 void iwl_pcie_rx_free(struct iwl_trans *trans) 978 { 979 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 980 struct iwl_rb_allocator *rba = &trans_pcie->rba; 981 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 982 sizeof(__le32); 983 int i; 984 985 /* 986 * if rxq is NULL, it means that nothing has been allocated, 987 * exit now 988 */ 989 if (!trans_pcie->rxq) { 990 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 991 return; 992 } 993 994 cancel_work_sync(&rba->rx_alloc); 995 if (rba->alloc_wq) { 996 destroy_workqueue(rba->alloc_wq); 997 rba->alloc_wq = NULL; 998 } 999 1000 iwl_pcie_free_rbs_pool(trans); 1001 1002 for (i = 0; i < trans->num_rx_queues; i++) { 1003 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1004 1005 if (rxq->bd) 1006 dma_free_coherent(trans->dev, 1007 free_size * rxq->queue_size, 1008 rxq->bd, rxq->bd_dma); 1009 rxq->bd_dma = 0; 1010 rxq->bd = NULL; 1011 1012 if (rxq->rb_stts) 1013 dma_free_coherent(trans->dev, 1014 sizeof(struct iwl_rb_status), 1015 rxq->rb_stts, rxq->rb_stts_dma); 1016 else 1017 IWL_DEBUG_INFO(trans, 1018 "Free rxq->rb_stts which is NULL\n"); 1019 1020 if (rxq->used_bd) 1021 dma_free_coherent(trans->dev, 1022 sizeof(__le32) * rxq->queue_size, 1023 rxq->used_bd, rxq->used_bd_dma); 1024 rxq->used_bd_dma = 0; 1025 rxq->used_bd = NULL; 1026 1027 if (rxq->napi.poll) 1028 netif_napi_del(&rxq->napi); 1029 } 1030 kfree(trans_pcie->rxq); 1031 } 1032 1033 /* 1034 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1035 * 1036 * Called when a RBD can be reused. The RBD is transferred to the allocator. 1037 * When there are 2 empty RBDs - a request for allocation is posted 1038 */ 1039 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1040 struct iwl_rx_mem_buffer *rxb, 1041 struct iwl_rxq *rxq, bool emergency) 1042 { 1043 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1044 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1045 1046 /* Move the RBD to the used list, will be moved to allocator in batches 1047 * before claiming or posting a request*/ 1048 list_add_tail(&rxb->list, &rxq->rx_used); 1049 1050 if (unlikely(emergency)) 1051 return; 1052 1053 /* Count the allocator owned RBDs */ 1054 rxq->used_count++; 1055 1056 /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1057 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1058 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1059 * after but we still need to post another request. 1060 */ 1061 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1062 /* Move the 2 RBDs to the allocator ownership. 1063 Allocator has another 6 from pool for the request completion*/ 1064 spin_lock(&rba->lock); 1065 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1066 spin_unlock(&rba->lock); 1067 1068 atomic_inc(&rba->req_pending); 1069 queue_work(rba->alloc_wq, &rba->rx_alloc); 1070 } 1071 } 1072 1073 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 1074 struct iwl_rxq *rxq, 1075 struct iwl_rx_mem_buffer *rxb, 1076 bool emergency) 1077 { 1078 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1079 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1080 bool page_stolen = false; 1081 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 1082 u32 offset = 0; 1083 1084 if (WARN_ON(!rxb)) 1085 return; 1086 1087 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1088 1089 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1090 struct iwl_rx_packet *pkt; 1091 u16 sequence; 1092 bool reclaim; 1093 int index, cmd_index, len; 1094 struct iwl_rx_cmd_buffer rxcb = { 1095 ._offset = offset, 1096 ._rx_page_order = trans_pcie->rx_page_order, 1097 ._page = rxb->page, 1098 ._page_stolen = false, 1099 .truesize = max_len, 1100 }; 1101 1102 pkt = rxb_addr(&rxcb); 1103 1104 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) 1105 break; 1106 1107 WARN_ON((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1108 FH_RSCSR_RXQ_POS != rxq->id); 1109 1110 IWL_DEBUG_RX(trans, 1111 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", 1112 rxcb._offset, 1113 iwl_get_cmd_string(trans, 1114 iwl_cmd_id(pkt->hdr.cmd, 1115 pkt->hdr.group_id, 1116 0)), 1117 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); 1118 1119 len = iwl_rx_packet_len(pkt); 1120 len += sizeof(u32); /* account for status word */ 1121 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1122 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1123 1124 /* Reclaim a command buffer only if this packet is a response 1125 * to a (driver-originated) command. 1126 * If the packet (e.g. Rx frame) originated from uCode, 1127 * there is no command buffer to reclaim. 1128 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1129 * but apparently a few don't get set; catch them here. */ 1130 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1131 if (reclaim) { 1132 int i; 1133 1134 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1135 if (trans_pcie->no_reclaim_cmds[i] == 1136 pkt->hdr.cmd) { 1137 reclaim = false; 1138 break; 1139 } 1140 } 1141 } 1142 1143 sequence = le16_to_cpu(pkt->hdr.sequence); 1144 index = SEQ_TO_INDEX(sequence); 1145 cmd_index = get_cmd_index(&txq->q, index); 1146 1147 if (rxq->id == 0) 1148 iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1149 &rxcb); 1150 else 1151 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1152 &rxcb, rxq->id); 1153 1154 if (reclaim) { 1155 kzfree(txq->entries[cmd_index].free_buf); 1156 txq->entries[cmd_index].free_buf = NULL; 1157 } 1158 1159 /* 1160 * After here, we should always check rxcb._page_stolen, 1161 * if it is true then one of the handlers took the page. 1162 */ 1163 1164 if (reclaim) { 1165 /* Invoke any callbacks, transfer the buffer to caller, 1166 * and fire off the (possibly) blocking 1167 * iwl_trans_send_cmd() 1168 * as we reclaim the driver command queue */ 1169 if (!rxcb._page_stolen) 1170 iwl_pcie_hcmd_complete(trans, &rxcb); 1171 else 1172 IWL_WARN(trans, "Claim null rxb?\n"); 1173 } 1174 1175 page_stolen |= rxcb._page_stolen; 1176 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1177 } 1178 1179 /* page was stolen from us -- free our reference */ 1180 if (page_stolen) { 1181 __free_pages(rxb->page, trans_pcie->rx_page_order); 1182 rxb->page = NULL; 1183 } 1184 1185 /* Reuse the page if possible. For notification packets and 1186 * SKBs that fail to Rx correctly, add them back into the 1187 * rx_free list for reuse later. */ 1188 if (rxb->page != NULL) { 1189 rxb->page_dma = 1190 dma_map_page(trans->dev, rxb->page, 0, 1191 PAGE_SIZE << trans_pcie->rx_page_order, 1192 DMA_FROM_DEVICE); 1193 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1194 /* 1195 * free the page(s) as well to not break 1196 * the invariant that the items on the used 1197 * list have no page(s) 1198 */ 1199 __free_pages(rxb->page, trans_pcie->rx_page_order); 1200 rxb->page = NULL; 1201 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1202 } else { 1203 list_add_tail(&rxb->list, &rxq->rx_free); 1204 rxq->free_count++; 1205 } 1206 } else 1207 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1208 } 1209 1210 /* 1211 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1212 */ 1213 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) 1214 { 1215 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1216 struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; 1217 u32 r, i, count = 0; 1218 bool emergency = false; 1219 1220 restart: 1221 spin_lock(&rxq->lock); 1222 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1223 * buffer that the driver may process (last buffer filled by ucode). */ 1224 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 1225 i = rxq->read; 1226 1227 /* W/A 9000 device step A0 wrap-around bug */ 1228 r &= (rxq->queue_size - 1); 1229 1230 /* Rx interrupt, but nothing sent from uCode */ 1231 if (i == r) 1232 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1233 1234 while (i != r) { 1235 struct iwl_rx_mem_buffer *rxb; 1236 1237 if (unlikely(rxq->used_count == rxq->queue_size / 2)) 1238 emergency = true; 1239 1240 if (trans->cfg->mq_rx_supported) { 1241 /* 1242 * used_bd is a 32 bit but only 12 are used to retrieve 1243 * the vid 1244 */ 1245 u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; 1246 1247 if (WARN(!vid || 1248 vid > ARRAY_SIZE(trans_pcie->global_table), 1249 "Invalid rxb index from HW %u\n", (u32)vid)) { 1250 iwl_force_nmi(trans); 1251 goto out; 1252 } 1253 rxb = trans_pcie->global_table[vid - 1]; 1254 if (WARN(rxb->invalid, 1255 "Invalid rxb from HW %u\n", (u32)vid)) { 1256 iwl_force_nmi(trans); 1257 goto out; 1258 } 1259 rxb->invalid = true; 1260 } else { 1261 rxb = rxq->queue[i]; 1262 rxq->queue[i] = NULL; 1263 } 1264 1265 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 1266 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1267 1268 i = (i + 1) & (rxq->queue_size - 1); 1269 1270 /* 1271 * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1272 * try to claim the pre-allocated buffers from the allocator. 1273 * If not ready - will try to reclaim next time. 1274 * There is no need to reschedule work - allocator exits only 1275 * on success 1276 */ 1277 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1278 iwl_pcie_rx_allocator_get(trans, rxq); 1279 1280 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1281 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1282 1283 /* Add the remaining empty RBDs for allocator use */ 1284 spin_lock(&rba->lock); 1285 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1286 spin_unlock(&rba->lock); 1287 } else if (emergency) { 1288 count++; 1289 if (count == 8) { 1290 count = 0; 1291 if (rxq->used_count < rxq->queue_size / 3) 1292 emergency = false; 1293 1294 rxq->read = i; 1295 spin_unlock(&rxq->lock); 1296 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1297 iwl_pcie_rxq_restock(trans, rxq); 1298 goto restart; 1299 } 1300 } 1301 } 1302 out: 1303 /* Backtrack one entry */ 1304 rxq->read = i; 1305 spin_unlock(&rxq->lock); 1306 1307 /* 1308 * handle a case where in emergency there are some unallocated RBDs. 1309 * those RBDs are in the used list, but are not tracked by the queue's 1310 * used_count which counts allocator owned RBDs. 1311 * unallocated emergency RBDs must be allocated on exit, otherwise 1312 * when called again the function may not be in emergency mode and 1313 * they will be handed to the allocator with no tracking in the RBD 1314 * allocator counters, which will lead to them never being claimed back 1315 * by the queue. 1316 * by allocating them here, they are now in the queue free list, and 1317 * will be restocked by the next call of iwl_pcie_rxq_restock. 1318 */ 1319 if (unlikely(emergency && count)) 1320 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1321 1322 if (rxq->napi.poll) 1323 napi_gro_flush(&rxq->napi, false); 1324 1325 iwl_pcie_rxq_restock(trans, rxq); 1326 } 1327 1328 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 1329 { 1330 u8 queue = entry->entry; 1331 struct msix_entry *entries = entry - queue; 1332 1333 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 1334 } 1335 1336 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 1337 struct msix_entry *entry) 1338 { 1339 /* 1340 * Before sending the interrupt the HW disables it to prevent 1341 * a nested interrupt. This is done by writing 1 to the corresponding 1342 * bit in the mask register. After handling the interrupt, it should be 1343 * re-enabled by clearing this bit. This register is defined as 1344 * write 1 clear (W1C) register, meaning that it's being clear 1345 * by writing 1 to the bit. 1346 */ 1347 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 1348 } 1349 1350 /* 1351 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 1352 * This interrupt handler should be used with RSS queue only. 1353 */ 1354 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 1355 { 1356 struct msix_entry *entry = dev_id; 1357 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1358 struct iwl_trans *trans = trans_pcie->trans; 1359 1360 if (WARN_ON(entry->entry >= trans->num_rx_queues)) 1361 return IRQ_NONE; 1362 1363 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1364 1365 local_bh_disable(); 1366 iwl_pcie_rx_handle(trans, entry->entry); 1367 local_bh_enable(); 1368 1369 iwl_pcie_clear_irq(trans, entry); 1370 1371 lock_map_release(&trans->sync_cmd_lockdep_map); 1372 1373 return IRQ_HANDLED; 1374 } 1375 1376 /* 1377 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1378 */ 1379 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1380 { 1381 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1382 int i; 1383 1384 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1385 if (trans->cfg->internal_wimax_coex && 1386 !trans->cfg->apmg_not_supported && 1387 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1388 APMS_CLK_VAL_MRB_FUNC_MODE) || 1389 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1390 APMG_PS_CTRL_VAL_RESET_REQ))) { 1391 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1392 iwl_op_mode_wimax_active(trans->op_mode); 1393 wake_up(&trans_pcie->wait_command_queue); 1394 return; 1395 } 1396 1397 iwl_pcie_dump_csr(trans); 1398 iwl_dump_fh(trans, NULL); 1399 1400 local_bh_disable(); 1401 /* The STATUS_FW_ERROR bit is set in this function. This must happen 1402 * before we wake up the command caller, to ensure a proper cleanup. */ 1403 iwl_trans_fw_error(trans); 1404 local_bh_enable(); 1405 1406 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 1407 del_timer(&trans_pcie->txq[i].stuck_timer); 1408 1409 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1410 wake_up(&trans_pcie->wait_command_queue); 1411 } 1412 1413 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1414 { 1415 u32 inta; 1416 1417 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1418 1419 trace_iwlwifi_dev_irq(trans->dev); 1420 1421 /* Discover which interrupts are active/pending */ 1422 inta = iwl_read32(trans, CSR_INT); 1423 1424 /* the thread will service interrupts and re-enable them */ 1425 return inta; 1426 } 1427 1428 /* a device (PCI-E) page is 4096 bytes long */ 1429 #define ICT_SHIFT 12 1430 #define ICT_SIZE (1 << ICT_SHIFT) 1431 #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1432 1433 /* interrupt handler using ict table, with this interrupt driver will 1434 * stop using INTA register to get device's interrupt, reading this register 1435 * is expensive, device will write interrupts in ICT dram table, increment 1436 * index then will fire interrupt to driver, driver will OR all ICT table 1437 * entries from current index up to table entry with 0 value. the result is 1438 * the interrupt we need to service, driver will set the entries back to 0 and 1439 * set index. 1440 */ 1441 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1442 { 1443 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1444 u32 inta; 1445 u32 val = 0; 1446 u32 read; 1447 1448 trace_iwlwifi_dev_irq(trans->dev); 1449 1450 /* Ignore interrupt if there's nothing in NIC to service. 1451 * This may be due to IRQ shared with another device, 1452 * or due to sporadic interrupts thrown from our NIC. */ 1453 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1454 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1455 if (!read) 1456 return 0; 1457 1458 /* 1459 * Collect all entries up to the first 0, starting from ict_index; 1460 * note we already read at ict_index. 1461 */ 1462 do { 1463 val |= read; 1464 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1465 trans_pcie->ict_index, read); 1466 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1467 trans_pcie->ict_index = 1468 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1469 1470 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1471 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1472 read); 1473 } while (read); 1474 1475 /* We should not get this value, just ignore it. */ 1476 if (val == 0xffffffff) 1477 val = 0; 1478 1479 /* 1480 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1481 * (bit 15 before shifting it to 31) to clear when using interrupt 1482 * coalescing. fortunately, bits 18 and 19 stay set when this happens 1483 * so we use them to decide on the real state of the Rx bit. 1484 * In order words, bit 15 is set if bit 18 or bit 19 are set. 1485 */ 1486 if (val & 0xC0000) 1487 val |= 0x8000; 1488 1489 inta = (0xff & val) | ((0xff00 & val) << 16); 1490 return inta; 1491 } 1492 1493 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1494 { 1495 struct iwl_trans *trans = dev_id; 1496 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1497 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1498 u32 inta = 0; 1499 u32 handled = 0; 1500 1501 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1502 1503 spin_lock(&trans_pcie->irq_lock); 1504 1505 /* dram interrupt table not set yet, 1506 * use legacy interrupt. 1507 */ 1508 if (likely(trans_pcie->use_ict)) 1509 inta = iwl_pcie_int_cause_ict(trans); 1510 else 1511 inta = iwl_pcie_int_cause_non_ict(trans); 1512 1513 if (iwl_have_debug_level(IWL_DL_ISR)) { 1514 IWL_DEBUG_ISR(trans, 1515 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1516 inta, trans_pcie->inta_mask, 1517 iwl_read32(trans, CSR_INT_MASK), 1518 iwl_read32(trans, CSR_FH_INT_STATUS)); 1519 if (inta & (~trans_pcie->inta_mask)) 1520 IWL_DEBUG_ISR(trans, 1521 "We got a masked interrupt (0x%08x)\n", 1522 inta & (~trans_pcie->inta_mask)); 1523 } 1524 1525 inta &= trans_pcie->inta_mask; 1526 1527 /* 1528 * Ignore interrupt if there's nothing in NIC to service. 1529 * This may be due to IRQ shared with another device, 1530 * or due to sporadic interrupts thrown from our NIC. 1531 */ 1532 if (unlikely(!inta)) { 1533 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1534 /* 1535 * Re-enable interrupts here since we don't 1536 * have anything to service 1537 */ 1538 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1539 _iwl_enable_interrupts(trans); 1540 spin_unlock(&trans_pcie->irq_lock); 1541 lock_map_release(&trans->sync_cmd_lockdep_map); 1542 return IRQ_NONE; 1543 } 1544 1545 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1546 /* 1547 * Hardware disappeared. It might have 1548 * already raised an interrupt. 1549 */ 1550 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1551 spin_unlock(&trans_pcie->irq_lock); 1552 goto out; 1553 } 1554 1555 /* Ack/clear/reset pending uCode interrupts. 1556 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1557 */ 1558 /* There is a hardware bug in the interrupt mask function that some 1559 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1560 * they are disabled in the CSR_INT_MASK register. Furthermore the 1561 * ICT interrupt handling mechanism has another bug that might cause 1562 * these unmasked interrupts fail to be detected. We workaround the 1563 * hardware bugs here by ACKing all the possible interrupts so that 1564 * interrupt coalescing can still be achieved. 1565 */ 1566 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1567 1568 if (iwl_have_debug_level(IWL_DL_ISR)) 1569 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1570 inta, iwl_read32(trans, CSR_INT_MASK)); 1571 1572 spin_unlock(&trans_pcie->irq_lock); 1573 1574 /* Now service all interrupt bits discovered above. */ 1575 if (inta & CSR_INT_BIT_HW_ERR) { 1576 IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1577 1578 /* Tell the device to stop sending interrupts */ 1579 iwl_disable_interrupts(trans); 1580 1581 isr_stats->hw++; 1582 iwl_pcie_irq_handle_error(trans); 1583 1584 handled |= CSR_INT_BIT_HW_ERR; 1585 1586 goto out; 1587 } 1588 1589 if (iwl_have_debug_level(IWL_DL_ISR)) { 1590 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1591 if (inta & CSR_INT_BIT_SCD) { 1592 IWL_DEBUG_ISR(trans, 1593 "Scheduler finished to transmit the frame/frames.\n"); 1594 isr_stats->sch++; 1595 } 1596 1597 /* Alive notification via Rx interrupt will do the real work */ 1598 if (inta & CSR_INT_BIT_ALIVE) { 1599 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1600 isr_stats->alive++; 1601 } 1602 } 1603 1604 /* Safely ignore these bits for debug checks below */ 1605 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1606 1607 /* HW RF KILL switch toggled */ 1608 if (inta & CSR_INT_BIT_RF_KILL) { 1609 bool hw_rfkill; 1610 1611 hw_rfkill = iwl_is_rfkill_set(trans); 1612 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1613 hw_rfkill ? "disable radio" : "enable radio"); 1614 1615 isr_stats->rfkill++; 1616 1617 mutex_lock(&trans_pcie->mutex); 1618 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1619 mutex_unlock(&trans_pcie->mutex); 1620 if (hw_rfkill) { 1621 set_bit(STATUS_RFKILL, &trans->status); 1622 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1623 &trans->status)) 1624 IWL_DEBUG_RF_KILL(trans, 1625 "Rfkill while SYNC HCMD in flight\n"); 1626 wake_up(&trans_pcie->wait_command_queue); 1627 } else { 1628 clear_bit(STATUS_RFKILL, &trans->status); 1629 } 1630 1631 handled |= CSR_INT_BIT_RF_KILL; 1632 } 1633 1634 /* Chip got too hot and stopped itself */ 1635 if (inta & CSR_INT_BIT_CT_KILL) { 1636 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1637 isr_stats->ctkill++; 1638 handled |= CSR_INT_BIT_CT_KILL; 1639 } 1640 1641 /* Error detected by uCode */ 1642 if (inta & CSR_INT_BIT_SW_ERR) { 1643 IWL_ERR(trans, "Microcode SW error detected. " 1644 " Restarting 0x%X.\n", inta); 1645 isr_stats->sw++; 1646 iwl_pcie_irq_handle_error(trans); 1647 handled |= CSR_INT_BIT_SW_ERR; 1648 } 1649 1650 /* uCode wakes up after power-down sleep */ 1651 if (inta & CSR_INT_BIT_WAKEUP) { 1652 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1653 iwl_pcie_rxq_check_wrptr(trans); 1654 iwl_pcie_txq_check_wrptrs(trans); 1655 1656 isr_stats->wakeup++; 1657 1658 handled |= CSR_INT_BIT_WAKEUP; 1659 } 1660 1661 /* All uCode command responses, including Tx command responses, 1662 * Rx "responses" (frame-received notification), and other 1663 * notifications from uCode come through here*/ 1664 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1665 CSR_INT_BIT_RX_PERIODIC)) { 1666 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1667 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1668 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1669 iwl_write32(trans, CSR_FH_INT_STATUS, 1670 CSR_FH_INT_RX_MASK); 1671 } 1672 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1673 handled |= CSR_INT_BIT_RX_PERIODIC; 1674 iwl_write32(trans, 1675 CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1676 } 1677 /* Sending RX interrupt require many steps to be done in the 1678 * the device: 1679 * 1- write interrupt to current index in ICT table. 1680 * 2- dma RX frame. 1681 * 3- update RX shared data to indicate last write index. 1682 * 4- send interrupt. 1683 * This could lead to RX race, driver could receive RX interrupt 1684 * but the shared data changes does not reflect this; 1685 * periodic interrupt will detect any dangling Rx activity. 1686 */ 1687 1688 /* Disable periodic interrupt; we use it as just a one-shot. */ 1689 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1690 CSR_INT_PERIODIC_DIS); 1691 1692 /* 1693 * Enable periodic interrupt in 8 msec only if we received 1694 * real RX interrupt (instead of just periodic int), to catch 1695 * any dangling Rx interrupt. If it was just the periodic 1696 * interrupt, there was no dangling Rx activity, and no need 1697 * to extend the periodic interrupt; one-shot is enough. 1698 */ 1699 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1700 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1701 CSR_INT_PERIODIC_ENA); 1702 1703 isr_stats->rx++; 1704 1705 local_bh_disable(); 1706 iwl_pcie_rx_handle(trans, 0); 1707 local_bh_enable(); 1708 } 1709 1710 /* This "Tx" DMA channel is used only for loading uCode */ 1711 if (inta & CSR_INT_BIT_FH_TX) { 1712 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1713 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1714 isr_stats->tx++; 1715 handled |= CSR_INT_BIT_FH_TX; 1716 /* Wake up uCode load routine, now that load is complete */ 1717 trans_pcie->ucode_write_complete = true; 1718 wake_up(&trans_pcie->ucode_write_waitq); 1719 } 1720 1721 if (inta & ~handled) { 1722 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1723 isr_stats->unhandled++; 1724 } 1725 1726 if (inta & ~(trans_pcie->inta_mask)) { 1727 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 1728 inta & ~trans_pcie->inta_mask); 1729 } 1730 1731 spin_lock(&trans_pcie->irq_lock); 1732 /* only Re-enable all interrupt if disabled by irq */ 1733 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1734 _iwl_enable_interrupts(trans); 1735 /* we are loading the firmware, enable FH_TX interrupt only */ 1736 else if (handled & CSR_INT_BIT_FH_TX) 1737 iwl_enable_fw_load_int(trans); 1738 /* Re-enable RF_KILL if it occurred */ 1739 else if (handled & CSR_INT_BIT_RF_KILL) 1740 iwl_enable_rfkill_int(trans); 1741 spin_unlock(&trans_pcie->irq_lock); 1742 1743 out: 1744 lock_map_release(&trans->sync_cmd_lockdep_map); 1745 return IRQ_HANDLED; 1746 } 1747 1748 /****************************************************************************** 1749 * 1750 * ICT functions 1751 * 1752 ******************************************************************************/ 1753 1754 /* Free dram table */ 1755 void iwl_pcie_free_ict(struct iwl_trans *trans) 1756 { 1757 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1758 1759 if (trans_pcie->ict_tbl) { 1760 dma_free_coherent(trans->dev, ICT_SIZE, 1761 trans_pcie->ict_tbl, 1762 trans_pcie->ict_tbl_dma); 1763 trans_pcie->ict_tbl = NULL; 1764 trans_pcie->ict_tbl_dma = 0; 1765 } 1766 } 1767 1768 /* 1769 * allocate dram shared table, it is an aligned memory 1770 * block of ICT_SIZE. 1771 * also reset all data related to ICT table interrupt. 1772 */ 1773 int iwl_pcie_alloc_ict(struct iwl_trans *trans) 1774 { 1775 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1776 1777 trans_pcie->ict_tbl = 1778 dma_zalloc_coherent(trans->dev, ICT_SIZE, 1779 &trans_pcie->ict_tbl_dma, 1780 GFP_KERNEL); 1781 if (!trans_pcie->ict_tbl) 1782 return -ENOMEM; 1783 1784 /* just an API sanity check ... it is guaranteed to be aligned */ 1785 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 1786 iwl_pcie_free_ict(trans); 1787 return -EINVAL; 1788 } 1789 1790 return 0; 1791 } 1792 1793 /* Device is going up inform it about using ICT interrupt table, 1794 * also we need to tell the driver to start using ICT interrupt. 1795 */ 1796 void iwl_pcie_reset_ict(struct iwl_trans *trans) 1797 { 1798 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1799 u32 val; 1800 1801 if (!trans_pcie->ict_tbl) 1802 return; 1803 1804 spin_lock(&trans_pcie->irq_lock); 1805 _iwl_disable_interrupts(trans); 1806 1807 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 1808 1809 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 1810 1811 val |= CSR_DRAM_INT_TBL_ENABLE | 1812 CSR_DRAM_INIT_TBL_WRAP_CHECK | 1813 CSR_DRAM_INIT_TBL_WRITE_POINTER; 1814 1815 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 1816 1817 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 1818 trans_pcie->use_ict = true; 1819 trans_pcie->ict_index = 0; 1820 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 1821 _iwl_enable_interrupts(trans); 1822 spin_unlock(&trans_pcie->irq_lock); 1823 } 1824 1825 /* Device is going down disable ict interrupt usage */ 1826 void iwl_pcie_disable_ict(struct iwl_trans *trans) 1827 { 1828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1829 1830 spin_lock(&trans_pcie->irq_lock); 1831 trans_pcie->use_ict = false; 1832 spin_unlock(&trans_pcie->irq_lock); 1833 } 1834 1835 irqreturn_t iwl_pcie_isr(int irq, void *data) 1836 { 1837 struct iwl_trans *trans = data; 1838 1839 if (!trans) 1840 return IRQ_NONE; 1841 1842 /* Disable (but don't clear!) interrupts here to avoid 1843 * back-to-back ISRs and sporadic interrupts from our NIC. 1844 * If we have something to service, the tasklet will re-enable ints. 1845 * If we *don't* have something, we'll re-enable before leaving here. 1846 */ 1847 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1848 1849 return IRQ_WAKE_THREAD; 1850 } 1851 1852 irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 1853 { 1854 return IRQ_WAKE_THREAD; 1855 } 1856 1857 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 1858 { 1859 struct msix_entry *entry = dev_id; 1860 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1861 struct iwl_trans *trans = trans_pcie->trans; 1862 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1863 u32 inta_fh, inta_hw; 1864 1865 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1866 1867 spin_lock(&trans_pcie->irq_lock); 1868 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 1869 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 1870 /* 1871 * Clear causes registers to avoid being handling the same cause. 1872 */ 1873 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 1874 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 1875 spin_unlock(&trans_pcie->irq_lock); 1876 1877 if (unlikely(!(inta_fh | inta_hw))) { 1878 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1879 lock_map_release(&trans->sync_cmd_lockdep_map); 1880 return IRQ_NONE; 1881 } 1882 1883 if (iwl_have_debug_level(IWL_DL_ISR)) 1884 IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", 1885 inta_fh, 1886 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 1887 1888 /* This "Tx" DMA channel is used only for loading uCode */ 1889 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 1890 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1891 isr_stats->tx++; 1892 /* 1893 * Wake up uCode load routine, 1894 * now that load is complete 1895 */ 1896 trans_pcie->ucode_write_complete = true; 1897 wake_up(&trans_pcie->ucode_write_waitq); 1898 } 1899 1900 /* Error detected by uCode */ 1901 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || 1902 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { 1903 IWL_ERR(trans, 1904 "Microcode SW error detected. Restarting 0x%X.\n", 1905 inta_fh); 1906 isr_stats->sw++; 1907 iwl_pcie_irq_handle_error(trans); 1908 } 1909 1910 /* After checking FH register check HW register */ 1911 if (iwl_have_debug_level(IWL_DL_ISR)) 1912 IWL_DEBUG_ISR(trans, 1913 "ISR inta_hw 0x%08x, enabled 0x%08x\n", 1914 inta_hw, 1915 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 1916 1917 /* Alive notification via Rx interrupt will do the real work */ 1918 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 1919 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1920 isr_stats->alive++; 1921 } 1922 1923 /* uCode wakes up after power-down sleep */ 1924 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { 1925 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1926 iwl_pcie_rxq_check_wrptr(trans); 1927 iwl_pcie_txq_check_wrptrs(trans); 1928 1929 isr_stats->wakeup++; 1930 } 1931 1932 /* Chip got too hot and stopped itself */ 1933 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 1934 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1935 isr_stats->ctkill++; 1936 } 1937 1938 /* HW RF KILL switch toggled */ 1939 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { 1940 bool hw_rfkill; 1941 1942 hw_rfkill = iwl_is_rfkill_set(trans); 1943 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1944 hw_rfkill ? "disable radio" : "enable radio"); 1945 1946 isr_stats->rfkill++; 1947 1948 mutex_lock(&trans_pcie->mutex); 1949 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1950 mutex_unlock(&trans_pcie->mutex); 1951 if (hw_rfkill) { 1952 set_bit(STATUS_RFKILL, &trans->status); 1953 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1954 &trans->status)) 1955 IWL_DEBUG_RF_KILL(trans, 1956 "Rfkill while SYNC HCMD in flight\n"); 1957 wake_up(&trans_pcie->wait_command_queue); 1958 } else { 1959 clear_bit(STATUS_RFKILL, &trans->status); 1960 } 1961 } 1962 1963 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 1964 IWL_ERR(trans, 1965 "Hardware error detected. Restarting.\n"); 1966 1967 isr_stats->hw++; 1968 iwl_pcie_irq_handle_error(trans); 1969 } 1970 1971 iwl_pcie_clear_irq(trans, entry); 1972 1973 lock_map_release(&trans->sync_cmd_lockdep_map); 1974 1975 return IRQ_HANDLED; 1976 } 1977