1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 #include <net/tso.h> 6 #include <linux/tcp.h> 7 8 #include "iwl-debug.h" 9 #include "iwl-io.h" 10 #include "fw/api/commands.h" 11 #include "fw/api/tx.h" 12 #include "fw/api/datapath.h" 13 #include "queue/tx.h" 14 #include "iwl-fh.h" 15 #include "iwl-scd.h" 16 #include <linux/dmapool.h> 17 18 /* 19 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 20 */ 21 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 22 struct iwl_txq *txq, u16 byte_cnt, 23 int num_tbs) 24 { 25 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 26 u8 filled_tfd_size, num_fetch_chunks; 27 u16 len = byte_cnt; 28 __le16 bc_ent; 29 30 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 31 return; 32 33 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 34 num_tbs * sizeof(struct iwl_tfh_tb); 35 /* 36 * filled_tfd_size contains the number of filled bytes in the TFD. 37 * Dividing it by 64 will give the number of chunks to fetch 38 * to SRAM- 0 for one chunk, 1 for 2 and so on. 39 * If, for example, TFD contains only 3 TBs then 32 bytes 40 * of the TFD are used, and only one chunk of 64 bytes should 41 * be fetched 42 */ 43 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 44 45 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 46 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 47 48 /* Starting from AX210, the HW expects bytes */ 49 WARN_ON(trans->txqs.bc_table_dword); 50 WARN_ON(len > 0x3FFF); 51 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 52 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; 53 } else { 54 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 55 56 /* Before AX210, the HW expects DW */ 57 WARN_ON(!trans->txqs.bc_table_dword); 58 len = DIV_ROUND_UP(len, 4); 59 WARN_ON(len > 0xFFF); 60 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 61 scd_bc_tbl->tfd_offset[idx] = bc_ent; 62 } 63 } 64 65 /* 66 * iwl_txq_inc_wr_ptr - Send new write index to hardware 67 */ 68 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 69 { 70 lockdep_assert_held(&txq->lock); 71 72 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 73 74 /* 75 * if not in power-save mode, uCode will never sleep when we're 76 * trying to tx (during RFKILL, we're not trying to tx). 77 */ 78 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 79 } 80 81 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 82 struct iwl_tfh_tfd *tfd) 83 { 84 return le16_to_cpu(tfd->num_tbs) & 0x1f; 85 } 86 87 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 88 dma_addr_t addr, u16 len) 89 { 90 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 91 struct iwl_tfh_tb *tb; 92 93 /* Only WARN here so we know about the issue, but we mess up our 94 * unmap path because not every place currently checks for errors 95 * returned from this function - it can only return an error if 96 * there's no more space, and so when we know there is enough we 97 * don't always check ... 98 */ 99 WARN(iwl_txq_crosses_4g_boundary(addr, len), 100 "possible DMA problem with iova:0x%llx, len:%d\n", 101 (unsigned long long)addr, len); 102 103 if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 104 return -EINVAL; 105 tb = &tfd->tbs[idx]; 106 107 /* Each TFD can point to a maximum max_tbs Tx buffers */ 108 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 109 IWL_ERR(trans, "Error can not send more than %d chunks\n", 110 trans->txqs.tfd.max_tbs); 111 return -EINVAL; 112 } 113 114 put_unaligned_le64(addr, &tb->addr); 115 tb->tb_len = cpu_to_le16(len); 116 117 tfd->num_tbs = cpu_to_le16(idx + 1); 118 119 return idx; 120 } 121 122 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 123 struct iwl_tfh_tfd *tfd) 124 { 125 int i, num_tbs; 126 127 /* Sanity check on number of chunks */ 128 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 129 130 if (num_tbs > trans->txqs.tfd.max_tbs) { 131 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 132 return; 133 } 134 135 /* first TB is never freed - it's the bidirectional DMA data */ 136 for (i = 1; i < num_tbs; i++) { 137 if (meta->tbs & BIT(i)) 138 dma_unmap_page(trans->dev, 139 le64_to_cpu(tfd->tbs[i].addr), 140 le16_to_cpu(tfd->tbs[i].tb_len), 141 DMA_TO_DEVICE); 142 else 143 dma_unmap_single(trans->dev, 144 le64_to_cpu(tfd->tbs[i].addr), 145 le16_to_cpu(tfd->tbs[i].tb_len), 146 DMA_TO_DEVICE); 147 } 148 149 tfd->num_tbs = 0; 150 } 151 152 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 153 { 154 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 155 * idx is bounded by n_window 156 */ 157 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 158 struct sk_buff *skb; 159 160 lockdep_assert_held(&txq->lock); 161 162 if (!txq->entries) 163 return; 164 165 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 166 iwl_txq_get_tfd(trans, txq, idx)); 167 168 skb = txq->entries[idx].skb; 169 170 /* Can be called from irqs-disabled context 171 * If skb is not NULL, it means that the whole queue is being 172 * freed and that the queue is not empty - free the skb 173 */ 174 if (skb) { 175 iwl_op_mode_free_skb(trans->op_mode, skb); 176 txq->entries[idx].skb = NULL; 177 } 178 } 179 180 static struct page *get_workaround_page(struct iwl_trans *trans, 181 struct sk_buff *skb) 182 { 183 struct page **page_ptr; 184 struct page *ret; 185 186 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 187 188 ret = alloc_page(GFP_ATOMIC); 189 if (!ret) 190 return NULL; 191 192 /* set the chaining pointer to the previous page if there */ 193 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 194 *page_ptr = ret; 195 196 return ret; 197 } 198 199 /* 200 * Add a TB and if needed apply the FH HW bug workaround; 201 * meta != NULL indicates that it's a page mapping and we 202 * need to dma_unmap_page() and set the meta->tbs bit in 203 * this case. 204 */ 205 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 206 struct sk_buff *skb, 207 struct iwl_tfh_tfd *tfd, 208 dma_addr_t phys, void *virt, 209 u16 len, struct iwl_cmd_meta *meta) 210 { 211 dma_addr_t oldphys = phys; 212 struct page *page; 213 int ret; 214 215 if (unlikely(dma_mapping_error(trans->dev, phys))) 216 return -ENOMEM; 217 218 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 219 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 220 221 if (ret < 0) 222 goto unmap; 223 224 if (meta) 225 meta->tbs |= BIT(ret); 226 227 ret = 0; 228 goto trace; 229 } 230 231 /* 232 * Work around a hardware bug. If (as expressed in the 233 * condition above) the TB ends on a 32-bit boundary, 234 * then the next TB may be accessed with the wrong 235 * address. 236 * To work around it, copy the data elsewhere and make 237 * a new mapping for it so the device will not fail. 238 */ 239 240 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 241 ret = -ENOBUFS; 242 goto unmap; 243 } 244 245 page = get_workaround_page(trans, skb); 246 if (!page) { 247 ret = -ENOMEM; 248 goto unmap; 249 } 250 251 memcpy(page_address(page), virt, len); 252 253 phys = dma_map_single(trans->dev, page_address(page), len, 254 DMA_TO_DEVICE); 255 if (unlikely(dma_mapping_error(trans->dev, phys))) 256 return -ENOMEM; 257 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 258 if (ret < 0) { 259 /* unmap the new allocation as single */ 260 oldphys = phys; 261 meta = NULL; 262 goto unmap; 263 } 264 IWL_WARN(trans, 265 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 266 len, (unsigned long long)oldphys, (unsigned long long)phys); 267 268 ret = 0; 269 unmap: 270 if (meta) 271 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 272 else 273 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 274 trace: 275 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 276 277 return ret; 278 } 279 280 #ifdef CONFIG_INET 281 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 282 struct sk_buff *skb) 283 { 284 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 285 struct page **page_ptr; 286 287 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 288 289 if (WARN_ON(*page_ptr)) 290 return NULL; 291 292 if (!p->page) 293 goto alloc; 294 295 /* 296 * Check if there's enough room on this page 297 * 298 * Note that we put a page chaining pointer *last* in the 299 * page - we need it somewhere, and if it's there then we 300 * avoid DMA mapping the last bits of the page which may 301 * trigger the 32-bit boundary hardware bug. 302 * 303 * (see also get_workaround_page() in tx-gen2.c) 304 */ 305 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 306 sizeof(void *)) 307 goto out; 308 309 /* We don't have enough room on this page, get a new one. */ 310 __free_page(p->page); 311 312 alloc: 313 p->page = alloc_page(GFP_ATOMIC); 314 if (!p->page) 315 return NULL; 316 p->pos = page_address(p->page); 317 /* set the chaining pointer to NULL */ 318 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 319 out: 320 *page_ptr = p->page; 321 get_page(p->page); 322 return p; 323 } 324 #endif 325 326 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 327 struct sk_buff *skb, 328 struct iwl_tfh_tfd *tfd, int start_len, 329 u8 hdr_len, 330 struct iwl_device_tx_cmd *dev_cmd) 331 { 332 #ifdef CONFIG_INET 333 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 334 struct ieee80211_hdr *hdr = (void *)skb->data; 335 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 336 unsigned int mss = skb_shinfo(skb)->gso_size; 337 u16 length, amsdu_pad; 338 u8 *start_hdr; 339 struct iwl_tso_hdr_page *hdr_page; 340 struct tso_t tso; 341 342 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 343 &dev_cmd->hdr, start_len, 0); 344 345 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 346 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 347 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 348 amsdu_pad = 0; 349 350 /* total amount of header we may need for this A-MSDU */ 351 hdr_room = DIV_ROUND_UP(total_len, mss) * 352 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 353 354 /* Our device supports 9 segments at most, it will fit in 1 page */ 355 hdr_page = get_page_hdr(trans, hdr_room, skb); 356 if (!hdr_page) 357 return -ENOMEM; 358 359 start_hdr = hdr_page->pos; 360 361 /* 362 * Pull the ieee80211 header to be able to use TSO core, 363 * we will restore it for the tx_status flow. 364 */ 365 skb_pull(skb, hdr_len); 366 367 /* 368 * Remove the length of all the headers that we don't actually 369 * have in the MPDU by themselves, but that we duplicate into 370 * all the different MSDUs inside the A-MSDU. 371 */ 372 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 373 374 tso_start(skb, &tso); 375 376 while (total_len) { 377 /* this is the data left for this subframe */ 378 unsigned int data_left = min_t(unsigned int, mss, total_len); 379 unsigned int tb_len; 380 dma_addr_t tb_phys; 381 u8 *subf_hdrs_start = hdr_page->pos; 382 383 total_len -= data_left; 384 385 memset(hdr_page->pos, 0, amsdu_pad); 386 hdr_page->pos += amsdu_pad; 387 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 388 data_left)) & 0x3; 389 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 390 hdr_page->pos += ETH_ALEN; 391 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 392 hdr_page->pos += ETH_ALEN; 393 394 length = snap_ip_tcp_hdrlen + data_left; 395 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 396 hdr_page->pos += sizeof(length); 397 398 /* 399 * This will copy the SNAP as well which will be considered 400 * as MAC header. 401 */ 402 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 403 404 hdr_page->pos += snap_ip_tcp_hdrlen; 405 406 tb_len = hdr_page->pos - start_hdr; 407 tb_phys = dma_map_single(trans->dev, start_hdr, 408 tb_len, DMA_TO_DEVICE); 409 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 410 goto out_err; 411 /* 412 * No need for _with_wa, this is from the TSO page and 413 * we leave some space at the end of it so can't hit 414 * the buggy scenario. 415 */ 416 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 417 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 418 tb_phys, tb_len); 419 /* add this subframe's headers' length to the tx_cmd */ 420 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 421 422 /* prepare the start_hdr for the next subframe */ 423 start_hdr = hdr_page->pos; 424 425 /* put the payload */ 426 while (data_left) { 427 int ret; 428 429 tb_len = min_t(unsigned int, tso.size, data_left); 430 tb_phys = dma_map_single(trans->dev, tso.data, 431 tb_len, DMA_TO_DEVICE); 432 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 433 tb_phys, tso.data, 434 tb_len, NULL); 435 if (ret) 436 goto out_err; 437 438 data_left -= tb_len; 439 tso_build_data(skb, &tso, tb_len); 440 } 441 } 442 443 /* re -add the WiFi header */ 444 skb_push(skb, hdr_len); 445 446 return 0; 447 448 out_err: 449 #endif 450 return -EINVAL; 451 } 452 453 static struct 454 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 455 struct iwl_txq *txq, 456 struct iwl_device_tx_cmd *dev_cmd, 457 struct sk_buff *skb, 458 struct iwl_cmd_meta *out_meta, 459 int hdr_len, 460 int tx_cmd_len) 461 { 462 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 463 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 464 dma_addr_t tb_phys; 465 int len; 466 void *tb1_addr; 467 468 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 469 470 /* 471 * No need for _with_wa, the first TB allocation is aligned up 472 * to a 64-byte boundary and thus can't be at the end or cross 473 * a page boundary (much less a 2^32 boundary). 474 */ 475 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 476 477 /* 478 * The second TB (tb1) points to the remainder of the TX command 479 * and the 802.11 header - dword aligned size 480 * (This calculation modifies the TX command, so do it before the 481 * setup of the first TB) 482 */ 483 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 484 IWL_FIRST_TB_SIZE; 485 486 /* do not align A-MSDU to dword as the subframe header aligns it */ 487 488 /* map the data for TB1 */ 489 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 490 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 491 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 492 goto out_err; 493 /* 494 * No need for _with_wa(), we ensure (via alignment) that the data 495 * here can never cross or end at a page boundary. 496 */ 497 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 498 499 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 500 hdr_len, dev_cmd)) 501 goto out_err; 502 503 /* building the A-MSDU might have changed this data, memcpy it now */ 504 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 505 return tfd; 506 507 out_err: 508 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 509 return NULL; 510 } 511 512 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 513 struct sk_buff *skb, 514 struct iwl_tfh_tfd *tfd, 515 struct iwl_cmd_meta *out_meta) 516 { 517 int i; 518 519 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 520 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 521 dma_addr_t tb_phys; 522 unsigned int fragsz = skb_frag_size(frag); 523 int ret; 524 525 if (!fragsz) 526 continue; 527 528 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 529 fragsz, DMA_TO_DEVICE); 530 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 531 skb_frag_address(frag), 532 fragsz, out_meta); 533 if (ret) 534 return ret; 535 } 536 537 return 0; 538 } 539 540 static struct 541 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 542 struct iwl_txq *txq, 543 struct iwl_device_tx_cmd *dev_cmd, 544 struct sk_buff *skb, 545 struct iwl_cmd_meta *out_meta, 546 int hdr_len, 547 int tx_cmd_len, 548 bool pad) 549 { 550 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 551 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 552 dma_addr_t tb_phys; 553 int len, tb1_len, tb2_len; 554 void *tb1_addr; 555 struct sk_buff *frag; 556 557 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 558 559 /* The first TB points to bi-directional DMA data */ 560 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 561 562 /* 563 * No need for _with_wa, the first TB allocation is aligned up 564 * to a 64-byte boundary and thus can't be at the end or cross 565 * a page boundary (much less a 2^32 boundary). 566 */ 567 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 568 569 /* 570 * The second TB (tb1) points to the remainder of the TX command 571 * and the 802.11 header - dword aligned size 572 * (This calculation modifies the TX command, so do it before the 573 * setup of the first TB) 574 */ 575 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 576 IWL_FIRST_TB_SIZE; 577 578 if (pad) 579 tb1_len = ALIGN(len, 4); 580 else 581 tb1_len = len; 582 583 /* map the data for TB1 */ 584 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 585 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 586 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 587 goto out_err; 588 /* 589 * No need for _with_wa(), we ensure (via alignment) that the data 590 * here can never cross or end at a page boundary. 591 */ 592 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 593 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 594 IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 595 596 /* set up TFD's third entry to point to remainder of skb's head */ 597 tb2_len = skb_headlen(skb) - hdr_len; 598 599 if (tb2_len > 0) { 600 int ret; 601 602 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 603 tb2_len, DMA_TO_DEVICE); 604 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 605 skb->data + hdr_len, tb2_len, 606 NULL); 607 if (ret) 608 goto out_err; 609 } 610 611 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 612 goto out_err; 613 614 skb_walk_frags(skb, frag) { 615 int ret; 616 617 tb_phys = dma_map_single(trans->dev, frag->data, 618 skb_headlen(frag), DMA_TO_DEVICE); 619 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 620 frag->data, 621 skb_headlen(frag), NULL); 622 if (ret) 623 goto out_err; 624 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 625 goto out_err; 626 } 627 628 return tfd; 629 630 out_err: 631 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 632 return NULL; 633 } 634 635 static 636 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 637 struct iwl_txq *txq, 638 struct iwl_device_tx_cmd *dev_cmd, 639 struct sk_buff *skb, 640 struct iwl_cmd_meta *out_meta) 641 { 642 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 643 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 644 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 645 int len, hdr_len; 646 bool amsdu; 647 648 /* There must be data left over for TB1 or this code must be changed */ 649 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 650 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 651 offsetofend(struct iwl_tx_cmd_gen2, dram_info) > 652 IWL_FIRST_TB_SIZE); 653 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); 654 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 655 offsetofend(struct iwl_tx_cmd_gen3, dram_info) > 656 IWL_FIRST_TB_SIZE); 657 658 memset(tfd, 0, sizeof(*tfd)); 659 660 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 661 len = sizeof(struct iwl_tx_cmd_gen2); 662 else 663 len = sizeof(struct iwl_tx_cmd_gen3); 664 665 amsdu = ieee80211_is_data_qos(hdr->frame_control) && 666 (*ieee80211_get_qos_ctl(hdr) & 667 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 668 669 hdr_len = ieee80211_hdrlen(hdr->frame_control); 670 671 /* 672 * Only build A-MSDUs here if doing so by GSO, otherwise it may be 673 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 674 * built in the higher layers already. 675 */ 676 if (amsdu && skb_shinfo(skb)->gso_size) 677 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 678 out_meta, hdr_len, len); 679 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 680 hdr_len, len, !amsdu); 681 } 682 683 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 684 { 685 unsigned int max; 686 unsigned int used; 687 688 /* 689 * To avoid ambiguity between empty and completely full queues, there 690 * should always be less than max_tfd_queue_size elements in the queue. 691 * If q->n_window is smaller than max_tfd_queue_size, there is no need 692 * to reserve any queue entries for this purpose. 693 */ 694 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 695 max = q->n_window; 696 else 697 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 698 699 /* 700 * max_tfd_queue_size is a power of 2, so the following is equivalent to 701 * modulo by max_tfd_queue_size and is well defined. 702 */ 703 used = (q->write_ptr - q->read_ptr) & 704 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 705 706 if (WARN_ON(used > max)) 707 return 0; 708 709 return max - used; 710 } 711 712 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 713 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 714 { 715 struct iwl_cmd_meta *out_meta; 716 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 717 u16 cmd_len; 718 int idx; 719 void *tfd; 720 721 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 722 "queue %d out of range", txq_id)) 723 return -EINVAL; 724 725 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 726 "TX on unused queue %d\n", txq_id)) 727 return -EINVAL; 728 729 if (skb_is_nonlinear(skb) && 730 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 731 __skb_linearize(skb)) 732 return -ENOMEM; 733 734 spin_lock(&txq->lock); 735 736 if (iwl_txq_space(trans, txq) < txq->high_mark) { 737 iwl_txq_stop(trans, txq); 738 739 /* don't put the packet on the ring, if there is no room */ 740 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 741 struct iwl_device_tx_cmd **dev_cmd_ptr; 742 743 dev_cmd_ptr = (void *)((u8 *)skb->cb + 744 trans->txqs.dev_cmd_offs); 745 746 *dev_cmd_ptr = dev_cmd; 747 __skb_queue_tail(&txq->overflow_q, skb); 748 spin_unlock(&txq->lock); 749 return 0; 750 } 751 } 752 753 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 754 755 /* Set up driver data for this TFD */ 756 txq->entries[idx].skb = skb; 757 txq->entries[idx].cmd = dev_cmd; 758 759 dev_cmd->hdr.sequence = 760 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 761 INDEX_TO_SEQ(idx))); 762 763 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 764 out_meta = &txq->entries[idx].meta; 765 out_meta->flags = 0; 766 767 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 768 if (!tfd) { 769 spin_unlock(&txq->lock); 770 return -1; 771 } 772 773 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 774 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 775 (void *)dev_cmd->payload; 776 777 cmd_len = le16_to_cpu(tx_cmd_gen3->len); 778 } else { 779 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 780 (void *)dev_cmd->payload; 781 782 cmd_len = le16_to_cpu(tx_cmd_gen2->len); 783 } 784 785 /* Set up entry for this TFD in Tx byte-count array */ 786 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 787 iwl_txq_gen2_get_num_tbs(trans, tfd)); 788 789 /* start timer if queue currently empty */ 790 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 791 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 792 793 /* Tell device the write index *just past* this latest filled TFD */ 794 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 795 iwl_txq_inc_wr_ptr(trans, txq); 796 /* 797 * At this point the frame is "transmitted" successfully 798 * and we will get a TX status notification eventually. 799 */ 800 spin_unlock(&txq->lock); 801 return 0; 802 } 803 804 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 805 806 /* 807 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 808 */ 809 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 810 { 811 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 812 813 spin_lock_bh(&txq->lock); 814 while (txq->write_ptr != txq->read_ptr) { 815 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 816 txq_id, txq->read_ptr); 817 818 if (txq_id != trans->txqs.cmd.q_id) { 819 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 820 struct sk_buff *skb = txq->entries[idx].skb; 821 822 if (!WARN_ON_ONCE(!skb)) 823 iwl_txq_free_tso_page(trans, skb); 824 } 825 iwl_txq_gen2_free_tfd(trans, txq); 826 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 827 } 828 829 while (!skb_queue_empty(&txq->overflow_q)) { 830 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 831 832 iwl_op_mode_free_skb(trans->op_mode, skb); 833 } 834 835 spin_unlock_bh(&txq->lock); 836 837 /* just in case - this queue may have been stopped */ 838 iwl_wake_queue(trans, txq); 839 } 840 841 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 842 struct iwl_txq *txq) 843 { 844 struct device *dev = trans->dev; 845 846 /* De-alloc circular buffer of TFDs */ 847 if (txq->tfds) { 848 dma_free_coherent(dev, 849 trans->txqs.tfd.size * txq->n_window, 850 txq->tfds, txq->dma_addr); 851 dma_free_coherent(dev, 852 sizeof(*txq->first_tb_bufs) * txq->n_window, 853 txq->first_tb_bufs, txq->first_tb_dma); 854 } 855 856 kfree(txq->entries); 857 if (txq->bc_tbl.addr) 858 dma_pool_free(trans->txqs.bc_pool, 859 txq->bc_tbl.addr, txq->bc_tbl.dma); 860 kfree(txq); 861 } 862 863 /* 864 * iwl_pcie_txq_free - Deallocate DMA queue. 865 * @txq: Transmit queue to deallocate. 866 * 867 * Empty queue by removing and destroying all BD's. 868 * Free all buffers. 869 * 0-fill, but do not free "txq" descriptor structure. 870 */ 871 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 872 { 873 struct iwl_txq *txq; 874 int i; 875 876 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 877 "queue %d out of range", txq_id)) 878 return; 879 880 txq = trans->txqs.txq[txq_id]; 881 882 if (WARN_ON(!txq)) 883 return; 884 885 iwl_txq_gen2_unmap(trans, txq_id); 886 887 /* De-alloc array of command/tx buffers */ 888 if (txq_id == trans->txqs.cmd.q_id) 889 for (i = 0; i < txq->n_window; i++) { 890 kfree_sensitive(txq->entries[i].cmd); 891 kfree_sensitive(txq->entries[i].free_buf); 892 } 893 del_timer_sync(&txq->stuck_timer); 894 895 iwl_txq_gen2_free_memory(trans, txq); 896 897 trans->txqs.txq[txq_id] = NULL; 898 899 clear_bit(txq_id, trans->txqs.queue_used); 900 } 901 902 /* 903 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 904 */ 905 static int iwl_queue_init(struct iwl_txq *q, int slots_num) 906 { 907 q->n_window = slots_num; 908 909 /* slots_num must be power-of-two size, otherwise 910 * iwl_txq_get_cmd_index is broken. */ 911 if (WARN_ON(!is_power_of_2(slots_num))) 912 return -EINVAL; 913 914 q->low_mark = q->n_window / 4; 915 if (q->low_mark < 4) 916 q->low_mark = 4; 917 918 q->high_mark = q->n_window / 8; 919 if (q->high_mark < 2) 920 q->high_mark = 2; 921 922 q->write_ptr = 0; 923 q->read_ptr = 0; 924 925 return 0; 926 } 927 928 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 929 bool cmd_queue) 930 { 931 int ret; 932 u32 tfd_queue_max_size = 933 trans->trans_cfg->base_params->max_tfd_queue_size; 934 935 txq->need_update = false; 936 937 /* max_tfd_queue_size must be power-of-two size, otherwise 938 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 939 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 940 "Max tfd queue size must be a power of two, but is %d", 941 tfd_queue_max_size)) 942 return -EINVAL; 943 944 /* Initialize queue's high/low-water marks, and head/tail indexes */ 945 ret = iwl_queue_init(txq, slots_num); 946 if (ret) 947 return ret; 948 949 spin_lock_init(&txq->lock); 950 951 if (cmd_queue) { 952 static struct lock_class_key iwl_txq_cmd_queue_lock_class; 953 954 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 955 } 956 957 __skb_queue_head_init(&txq->overflow_q); 958 959 return 0; 960 } 961 962 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 963 { 964 struct page **page_ptr; 965 struct page *next; 966 967 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 968 next = *page_ptr; 969 *page_ptr = NULL; 970 971 while (next) { 972 struct page *tmp = next; 973 974 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - 975 sizeof(void *)); 976 __free_page(tmp); 977 } 978 } 979 980 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 981 { 982 u32 txq_id = txq->id; 983 u32 status; 984 bool active; 985 u8 fifo; 986 987 if (trans->trans_cfg->gen2) { 988 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 989 txq->read_ptr, txq->write_ptr); 990 /* TODO: access new SCD registers and dump them */ 991 return; 992 } 993 994 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 995 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 996 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 997 998 IWL_ERR(trans, 999 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 1000 txq_id, active ? "" : "in", fifo, 1001 jiffies_to_msecs(txq->wd_timeout), 1002 txq->read_ptr, txq->write_ptr, 1003 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 1004 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 1005 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 1006 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 1007 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 1008 } 1009 1010 static void iwl_txq_stuck_timer(struct timer_list *t) 1011 { 1012 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 1013 struct iwl_trans *trans = txq->trans; 1014 1015 spin_lock(&txq->lock); 1016 /* check if triggered erroneously */ 1017 if (txq->read_ptr == txq->write_ptr) { 1018 spin_unlock(&txq->lock); 1019 return; 1020 } 1021 spin_unlock(&txq->lock); 1022 1023 iwl_txq_log_scd_error(trans, txq); 1024 1025 iwl_force_nmi(trans); 1026 } 1027 1028 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 1029 bool cmd_queue) 1030 { 1031 size_t tfd_sz = trans->txqs.tfd.size * 1032 trans->trans_cfg->base_params->max_tfd_queue_size; 1033 size_t tb0_buf_sz; 1034 int i; 1035 1036 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) 1037 return -EINVAL; 1038 1039 if (WARN_ON(txq->entries || txq->tfds)) 1040 return -EINVAL; 1041 1042 if (trans->trans_cfg->gen2) 1043 tfd_sz = trans->txqs.tfd.size * slots_num; 1044 1045 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 1046 txq->trans = trans; 1047 1048 txq->n_window = slots_num; 1049 1050 txq->entries = kcalloc(slots_num, 1051 sizeof(struct iwl_pcie_txq_entry), 1052 GFP_KERNEL); 1053 1054 if (!txq->entries) 1055 goto error; 1056 1057 if (cmd_queue) 1058 for (i = 0; i < slots_num; i++) { 1059 txq->entries[i].cmd = 1060 kmalloc(sizeof(struct iwl_device_cmd), 1061 GFP_KERNEL); 1062 if (!txq->entries[i].cmd) 1063 goto error; 1064 } 1065 1066 /* Circular buffer of transmit frame descriptors (TFDs), 1067 * shared with device */ 1068 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 1069 &txq->dma_addr, GFP_KERNEL); 1070 if (!txq->tfds) 1071 goto error; 1072 1073 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 1074 1075 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 1076 1077 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 1078 &txq->first_tb_dma, 1079 GFP_KERNEL); 1080 if (!txq->first_tb_bufs) 1081 goto err_free_tfds; 1082 1083 return 0; 1084 err_free_tfds: 1085 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 1086 txq->tfds = NULL; 1087 error: 1088 if (txq->entries && cmd_queue) 1089 for (i = 0; i < slots_num; i++) 1090 kfree(txq->entries[i].cmd); 1091 kfree(txq->entries); 1092 txq->entries = NULL; 1093 1094 return -ENOMEM; 1095 } 1096 1097 static struct iwl_txq * 1098 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) 1099 { 1100 size_t bc_tbl_size, bc_tbl_entries; 1101 struct iwl_txq *txq; 1102 int ret; 1103 1104 WARN_ON(!trans->txqs.bc_tbl_size); 1105 1106 bc_tbl_size = trans->txqs.bc_tbl_size; 1107 bc_tbl_entries = bc_tbl_size / sizeof(u16); 1108 1109 if (WARN_ON(size > bc_tbl_entries)) 1110 return ERR_PTR(-EINVAL); 1111 1112 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 1113 if (!txq) 1114 return ERR_PTR(-ENOMEM); 1115 1116 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 1117 &txq->bc_tbl.dma); 1118 if (!txq->bc_tbl.addr) { 1119 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 1120 kfree(txq); 1121 return ERR_PTR(-ENOMEM); 1122 } 1123 1124 ret = iwl_txq_alloc(trans, txq, size, false); 1125 if (ret) { 1126 IWL_ERR(trans, "Tx queue alloc failed\n"); 1127 goto error; 1128 } 1129 ret = iwl_txq_init(trans, txq, size, false); 1130 if (ret) { 1131 IWL_ERR(trans, "Tx queue init failed\n"); 1132 goto error; 1133 } 1134 1135 txq->wd_timeout = msecs_to_jiffies(timeout); 1136 1137 return txq; 1138 1139 error: 1140 iwl_txq_gen2_free_memory(trans, txq); 1141 return ERR_PTR(ret); 1142 } 1143 1144 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 1145 struct iwl_host_cmd *hcmd) 1146 { 1147 struct iwl_tx_queue_cfg_rsp *rsp; 1148 int ret, qid; 1149 u32 wr_ptr; 1150 1151 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 1152 sizeof(*rsp))) { 1153 ret = -EINVAL; 1154 goto error_free_resp; 1155 } 1156 1157 rsp = (void *)hcmd->resp_pkt->data; 1158 qid = le16_to_cpu(rsp->queue_number); 1159 wr_ptr = le16_to_cpu(rsp->write_pointer); 1160 1161 if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 1162 WARN_ONCE(1, "queue index %d unsupported", qid); 1163 ret = -EIO; 1164 goto error_free_resp; 1165 } 1166 1167 if (test_and_set_bit(qid, trans->txqs.queue_used)) { 1168 WARN_ONCE(1, "queue %d already used", qid); 1169 ret = -EIO; 1170 goto error_free_resp; 1171 } 1172 1173 if (WARN_ONCE(trans->txqs.txq[qid], 1174 "queue %d already allocated\n", qid)) { 1175 ret = -EIO; 1176 goto error_free_resp; 1177 } 1178 1179 txq->id = qid; 1180 trans->txqs.txq[qid] = txq; 1181 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 1182 1183 /* Place first TFD at index corresponding to start sequence number */ 1184 txq->read_ptr = wr_ptr; 1185 txq->write_ptr = wr_ptr; 1186 1187 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 1188 1189 iwl_free_resp(hcmd); 1190 return qid; 1191 1192 error_free_resp: 1193 iwl_free_resp(hcmd); 1194 iwl_txq_gen2_free_memory(trans, txq); 1195 return ret; 1196 } 1197 1198 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 1199 u8 tid, int size, unsigned int timeout) 1200 { 1201 struct iwl_txq *txq; 1202 union { 1203 struct iwl_tx_queue_cfg_cmd old; 1204 struct iwl_scd_queue_cfg_cmd new; 1205 } cmd; 1206 struct iwl_host_cmd hcmd = { 1207 .flags = CMD_WANT_SKB, 1208 }; 1209 int ret; 1210 1211 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && 1212 trans->hw_rev_step == SILICON_A_STEP) 1213 size = 4096; 1214 1215 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); 1216 if (IS_ERR(txq)) 1217 return PTR_ERR(txq); 1218 1219 if (trans->txqs.queue_alloc_cmd_ver == 0) { 1220 memset(&cmd.old, 0, sizeof(cmd.old)); 1221 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); 1222 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 1223 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 1224 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); 1225 cmd.old.tid = tid; 1226 1227 if (hweight32(sta_mask) != 1) { 1228 ret = -EINVAL; 1229 goto error; 1230 } 1231 cmd.old.sta_id = ffs(sta_mask) - 1; 1232 1233 hcmd.id = SCD_QUEUE_CFG; 1234 hcmd.len[0] = sizeof(cmd.old); 1235 hcmd.data[0] = &cmd.old; 1236 } else if (trans->txqs.queue_alloc_cmd_ver == 3) { 1237 memset(&cmd.new, 0, sizeof(cmd.new)); 1238 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); 1239 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); 1240 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); 1241 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 1242 cmd.new.u.add.flags = cpu_to_le32(flags); 1243 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); 1244 cmd.new.u.add.tid = tid; 1245 1246 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); 1247 hcmd.len[0] = sizeof(cmd.new); 1248 hcmd.data[0] = &cmd.new; 1249 } else { 1250 ret = -EOPNOTSUPP; 1251 goto error; 1252 } 1253 1254 ret = iwl_trans_send_cmd(trans, &hcmd); 1255 if (ret) 1256 goto error; 1257 1258 return iwl_txq_alloc_response(trans, txq, &hcmd); 1259 1260 error: 1261 iwl_txq_gen2_free_memory(trans, txq); 1262 return ret; 1263 } 1264 1265 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 1266 { 1267 if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 1268 "queue %d out of range", queue)) 1269 return; 1270 1271 /* 1272 * Upon HW Rfkill - we stop the device, and then stop the queues 1273 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1274 * allow the op_mode to call txq_disable after it already called 1275 * stop_device. 1276 */ 1277 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 1278 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1279 "queue %d not used", queue); 1280 return; 1281 } 1282 1283 iwl_txq_gen2_free(trans, queue); 1284 1285 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 1286 } 1287 1288 void iwl_txq_gen2_tx_free(struct iwl_trans *trans) 1289 { 1290 int i; 1291 1292 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 1293 1294 /* Free all TX queues */ 1295 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 1296 if (!trans->txqs.txq[i]) 1297 continue; 1298 1299 iwl_txq_gen2_free(trans, i); 1300 } 1301 } 1302 1303 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 1304 { 1305 struct iwl_txq *queue; 1306 int ret; 1307 1308 /* alloc and init the tx queue */ 1309 if (!trans->txqs.txq[txq_id]) { 1310 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1311 if (!queue) { 1312 IWL_ERR(trans, "Not enough memory for tx queue\n"); 1313 return -ENOMEM; 1314 } 1315 trans->txqs.txq[txq_id] = queue; 1316 ret = iwl_txq_alloc(trans, queue, queue_size, true); 1317 if (ret) { 1318 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1319 goto error; 1320 } 1321 } else { 1322 queue = trans->txqs.txq[txq_id]; 1323 } 1324 1325 ret = iwl_txq_init(trans, queue, queue_size, 1326 (txq_id == trans->txqs.cmd.q_id)); 1327 if (ret) { 1328 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 1329 goto error; 1330 } 1331 trans->txqs.txq[txq_id]->id = txq_id; 1332 set_bit(txq_id, trans->txqs.queue_used); 1333 1334 return 0; 1335 1336 error: 1337 iwl_txq_gen2_tx_free(trans); 1338 return ret; 1339 } 1340 1341 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 1342 struct iwl_tfd *tfd, u8 idx) 1343 { 1344 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 1345 dma_addr_t addr; 1346 dma_addr_t hi_len; 1347 1348 addr = get_unaligned_le32(&tb->lo); 1349 1350 if (sizeof(dma_addr_t) <= sizeof(u32)) 1351 return addr; 1352 1353 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 1354 1355 /* 1356 * shift by 16 twice to avoid warnings on 32-bit 1357 * (where this code never runs anyway due to the 1358 * if statement above) 1359 */ 1360 return addr | ((hi_len << 16) << 16); 1361 } 1362 1363 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 1364 struct iwl_cmd_meta *meta, 1365 struct iwl_txq *txq, int index) 1366 { 1367 int i, num_tbs; 1368 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); 1369 1370 /* Sanity check on number of chunks */ 1371 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 1372 1373 if (num_tbs > trans->txqs.tfd.max_tbs) { 1374 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 1375 /* @todo issue fatal error, it is quite serious situation */ 1376 return; 1377 } 1378 1379 /* first TB is never freed - it's the bidirectional DMA data */ 1380 1381 for (i = 1; i < num_tbs; i++) { 1382 if (meta->tbs & BIT(i)) 1383 dma_unmap_page(trans->dev, 1384 iwl_txq_gen1_tfd_tb_get_addr(trans, 1385 tfd, i), 1386 iwl_txq_gen1_tfd_tb_get_len(trans, 1387 tfd, i), 1388 DMA_TO_DEVICE); 1389 else 1390 dma_unmap_single(trans->dev, 1391 iwl_txq_gen1_tfd_tb_get_addr(trans, 1392 tfd, i), 1393 iwl_txq_gen1_tfd_tb_get_len(trans, 1394 tfd, i), 1395 DMA_TO_DEVICE); 1396 } 1397 1398 meta->tbs = 0; 1399 1400 tfd->num_tbs = 0; 1401 } 1402 1403 #define IWL_TX_CRC_SIZE 4 1404 #define IWL_TX_DELIMITER_SIZE 4 1405 1406 /* 1407 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1408 */ 1409 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 1410 struct iwl_txq *txq, u16 byte_cnt, 1411 int num_tbs) 1412 { 1413 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 1414 int write_ptr = txq->write_ptr; 1415 int txq_id = txq->id; 1416 u8 sec_ctl = 0; 1417 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 1418 __le16 bc_ent; 1419 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 1420 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1421 u8 sta_id = tx_cmd->sta_id; 1422 1423 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 1424 1425 sec_ctl = tx_cmd->sec_ctl; 1426 1427 switch (sec_ctl & TX_CMD_SEC_MSK) { 1428 case TX_CMD_SEC_CCM: 1429 len += IEEE80211_CCMP_MIC_LEN; 1430 break; 1431 case TX_CMD_SEC_TKIP: 1432 len += IEEE80211_TKIP_ICV_LEN; 1433 break; 1434 case TX_CMD_SEC_WEP: 1435 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 1436 break; 1437 } 1438 if (trans->txqs.bc_table_dword) 1439 len = DIV_ROUND_UP(len, 4); 1440 1441 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 1442 return; 1443 1444 bc_ent = cpu_to_le16(len | (sta_id << 12)); 1445 1446 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 1447 1448 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 1449 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 1450 bc_ent; 1451 } 1452 1453 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 1454 struct iwl_txq *txq) 1455 { 1456 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 1457 int txq_id = txq->id; 1458 int read_ptr = txq->read_ptr; 1459 u8 sta_id = 0; 1460 __le16 bc_ent; 1461 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 1462 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1463 1464 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 1465 1466 if (txq_id != trans->txqs.cmd.q_id) 1467 sta_id = tx_cmd->sta_id; 1468 1469 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 1470 1471 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 1472 1473 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 1474 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 1475 bc_ent; 1476 } 1477 1478 /* 1479 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 1480 * @trans - transport private data 1481 * @txq - tx queue 1482 * @dma_dir - the direction of the DMA mapping 1483 * 1484 * Does NOT advance any TFD circular buffer read/write indexes 1485 * Does NOT free the TFD itself (which is within circular buffer) 1486 */ 1487 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1488 { 1489 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1490 * idx is bounded by n_window 1491 */ 1492 int rd_ptr = txq->read_ptr; 1493 int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 1494 struct sk_buff *skb; 1495 1496 lockdep_assert_held(&txq->lock); 1497 1498 if (!txq->entries) 1499 return; 1500 1501 /* We have only q->n_window txq->entries, but we use 1502 * TFD_QUEUE_SIZE_MAX tfds 1503 */ 1504 if (trans->trans_cfg->gen2) 1505 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1506 iwl_txq_get_tfd(trans, txq, rd_ptr)); 1507 else 1508 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, 1509 txq, rd_ptr); 1510 1511 /* free SKB */ 1512 skb = txq->entries[idx].skb; 1513 1514 /* Can be called from irqs-disabled context 1515 * If skb is not NULL, it means that the whole queue is being 1516 * freed and that the queue is not empty - free the skb 1517 */ 1518 if (skb) { 1519 iwl_op_mode_free_skb(trans->op_mode, skb); 1520 txq->entries[idx].skb = NULL; 1521 } 1522 } 1523 1524 void iwl_txq_progress(struct iwl_txq *txq) 1525 { 1526 lockdep_assert_held(&txq->lock); 1527 1528 if (!txq->wd_timeout) 1529 return; 1530 1531 /* 1532 * station is asleep and we send data - that must 1533 * be uAPSD or PS-Poll. Don't rearm the timer. 1534 */ 1535 if (txq->frozen) 1536 return; 1537 1538 /* 1539 * if empty delete timer, otherwise move timer forward 1540 * since we're making progress on this queue 1541 */ 1542 if (txq->read_ptr == txq->write_ptr) 1543 del_timer(&txq->stuck_timer); 1544 else 1545 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1546 } 1547 1548 /* Frees buffers until index _not_ inclusive */ 1549 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1550 struct sk_buff_head *skbs) 1551 { 1552 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1553 int tfd_num, read_ptr, last_to_free; 1554 1555 /* This function is not meant to release cmd queue*/ 1556 if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 1557 return; 1558 1559 if (WARN_ON(!txq)) 1560 return; 1561 1562 tfd_num = iwl_txq_get_cmd_index(txq, ssn); 1563 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1564 1565 spin_lock_bh(&txq->lock); 1566 1567 if (!test_bit(txq_id, trans->txqs.queue_used)) { 1568 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1569 txq_id, ssn); 1570 goto out; 1571 } 1572 1573 if (read_ptr == tfd_num) 1574 goto out; 1575 1576 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1577 txq_id, txq->read_ptr, tfd_num, ssn); 1578 1579 /*Since we free until index _not_ inclusive, the one before index is 1580 * the last we will free. This one must be used */ 1581 last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 1582 1583 if (!iwl_txq_used(txq, last_to_free)) { 1584 IWL_ERR(trans, 1585 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1586 __func__, txq_id, last_to_free, 1587 trans->trans_cfg->base_params->max_tfd_queue_size, 1588 txq->write_ptr, txq->read_ptr); 1589 1590 iwl_op_mode_time_point(trans->op_mode, 1591 IWL_FW_INI_TIME_POINT_FAKE_TX, 1592 NULL); 1593 goto out; 1594 } 1595 1596 if (WARN_ON(!skb_queue_empty(skbs))) 1597 goto out; 1598 1599 for (; 1600 read_ptr != tfd_num; 1601 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 1602 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 1603 struct sk_buff *skb = txq->entries[read_ptr].skb; 1604 1605 if (WARN_ON_ONCE(!skb)) 1606 continue; 1607 1608 iwl_txq_free_tso_page(trans, skb); 1609 1610 __skb_queue_tail(skbs, skb); 1611 1612 txq->entries[read_ptr].skb = NULL; 1613 1614 if (!trans->trans_cfg->gen2) 1615 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 1616 1617 iwl_txq_free_tfd(trans, txq); 1618 } 1619 1620 iwl_txq_progress(txq); 1621 1622 if (iwl_txq_space(trans, txq) > txq->low_mark && 1623 test_bit(txq_id, trans->txqs.queue_stopped)) { 1624 struct sk_buff_head overflow_skbs; 1625 1626 __skb_queue_head_init(&overflow_skbs); 1627 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1628 1629 /* 1630 * We are going to transmit from the overflow queue. 1631 * Remember this state so that wait_for_txq_empty will know we 1632 * are adding more packets to the TFD queue. It cannot rely on 1633 * the state of &txq->overflow_q, as we just emptied it, but 1634 * haven't TXed the content yet. 1635 */ 1636 txq->overflow_tx = true; 1637 1638 /* 1639 * This is tricky: we are in reclaim path which is non 1640 * re-entrant, so noone will try to take the access the 1641 * txq data from that path. We stopped tx, so we can't 1642 * have tx as well. Bottom line, we can unlock and re-lock 1643 * later. 1644 */ 1645 spin_unlock_bh(&txq->lock); 1646 1647 while (!skb_queue_empty(&overflow_skbs)) { 1648 struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1649 struct iwl_device_tx_cmd *dev_cmd_ptr; 1650 1651 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1652 trans->txqs.dev_cmd_offs); 1653 1654 /* 1655 * Note that we can very well be overflowing again. 1656 * In that case, iwl_txq_space will be small again 1657 * and we won't wake mac80211's queue. 1658 */ 1659 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 1660 } 1661 1662 if (iwl_txq_space(trans, txq) > txq->low_mark) 1663 iwl_wake_queue(trans, txq); 1664 1665 spin_lock_bh(&txq->lock); 1666 txq->overflow_tx = false; 1667 } 1668 1669 out: 1670 spin_unlock_bh(&txq->lock); 1671 } 1672 1673 /* Set wr_ptr of specific device and txq */ 1674 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 1675 { 1676 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1677 1678 spin_lock_bh(&txq->lock); 1679 1680 txq->write_ptr = ptr; 1681 txq->read_ptr = txq->write_ptr; 1682 1683 spin_unlock_bh(&txq->lock); 1684 } 1685 1686 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, 1687 bool freeze) 1688 { 1689 int queue; 1690 1691 for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1692 struct iwl_txq *txq = trans->txqs.txq[queue]; 1693 unsigned long now; 1694 1695 spin_lock_bh(&txq->lock); 1696 1697 now = jiffies; 1698 1699 if (txq->frozen == freeze) 1700 goto next_queue; 1701 1702 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1703 freeze ? "Freezing" : "Waking", queue); 1704 1705 txq->frozen = freeze; 1706 1707 if (txq->read_ptr == txq->write_ptr) 1708 goto next_queue; 1709 1710 if (freeze) { 1711 if (unlikely(time_after(now, 1712 txq->stuck_timer.expires))) { 1713 /* 1714 * The timer should have fired, maybe it is 1715 * spinning right now on the lock. 1716 */ 1717 goto next_queue; 1718 } 1719 /* remember how long until the timer fires */ 1720 txq->frozen_expiry_remainder = 1721 txq->stuck_timer.expires - now; 1722 del_timer(&txq->stuck_timer); 1723 goto next_queue; 1724 } 1725 1726 /* 1727 * Wake a non-empty queue -> arm timer with the 1728 * remainder before it froze 1729 */ 1730 mod_timer(&txq->stuck_timer, 1731 now + txq->frozen_expiry_remainder); 1732 1733 next_queue: 1734 spin_unlock_bh(&txq->lock); 1735 } 1736 } 1737 1738 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1739 1740 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, 1741 struct iwl_host_cmd *cmd) 1742 { 1743 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); 1744 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1745 int cmd_idx; 1746 int ret; 1747 1748 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); 1749 1750 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1751 &trans->status), 1752 "Command %s: a command is already active!\n", cmd_str)) 1753 return -EIO; 1754 1755 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); 1756 1757 cmd_idx = trans->ops->send_cmd(trans, cmd); 1758 if (cmd_idx < 0) { 1759 ret = cmd_idx; 1760 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1761 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", 1762 cmd_str, ret); 1763 return ret; 1764 } 1765 1766 ret = wait_event_timeout(trans->wait_command_queue, 1767 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1768 &trans->status), 1769 HOST_COMPLETE_TIMEOUT); 1770 if (!ret) { 1771 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1772 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1773 1774 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1775 txq->read_ptr, txq->write_ptr); 1776 1777 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1778 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1779 cmd_str); 1780 ret = -ETIMEDOUT; 1781 1782 iwl_trans_sync_nmi(trans); 1783 goto cancel; 1784 } 1785 1786 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1787 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, 1788 &trans->status)) { 1789 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); 1790 dump_stack(); 1791 } 1792 ret = -EIO; 1793 goto cancel; 1794 } 1795 1796 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1797 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1798 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1799 ret = -ERFKILL; 1800 goto cancel; 1801 } 1802 1803 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1804 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); 1805 ret = -EIO; 1806 goto cancel; 1807 } 1808 1809 return 0; 1810 1811 cancel: 1812 if (cmd->flags & CMD_WANT_SKB) { 1813 /* 1814 * Cancel the CMD_WANT_SKB flag for the cmd in the 1815 * TX cmd queue. Otherwise in case the cmd comes 1816 * in later, it will possibly set an invalid 1817 * address (cmd->meta.source). 1818 */ 1819 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1820 } 1821 1822 if (cmd->resp_pkt) { 1823 iwl_free_resp(cmd); 1824 cmd->resp_pkt = NULL; 1825 } 1826 1827 return ret; 1828 } 1829 1830 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, 1831 struct iwl_host_cmd *cmd) 1832 { 1833 /* Make sure the NIC is still alive in the bus */ 1834 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1835 return -ENODEV; 1836 1837 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1838 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1839 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1840 cmd->id); 1841 return -ERFKILL; 1842 } 1843 1844 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 1845 !(cmd->flags & CMD_SEND_IN_D3))) { 1846 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); 1847 return -EHOSTDOWN; 1848 } 1849 1850 if (cmd->flags & CMD_ASYNC) { 1851 int ret; 1852 1853 /* An asynchronous command can not expect an SKB to be set. */ 1854 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1855 return -EINVAL; 1856 1857 ret = trans->ops->send_cmd(trans, cmd); 1858 if (ret < 0) { 1859 IWL_ERR(trans, 1860 "Error sending %s: enqueue_hcmd failed: %d\n", 1861 iwl_get_cmd_string(trans, cmd->id), ret); 1862 return ret; 1863 } 1864 return 0; 1865 } 1866 1867 return iwl_trans_txq_send_hcmd_sync(trans, cmd); 1868 } 1869 1870