1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include "htt.h" 20 #include "mac.h" 21 #include "hif.h" 22 #include "txrx.h" 23 #include "debug.h" 24 25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 26 { 27 htt->num_pending_tx--; 28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 29 ieee80211_wake_queues(htt->ar->hw); 30 } 31 32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 33 { 34 spin_lock_bh(&htt->tx_lock); 35 __ath10k_htt_tx_dec_pending(htt); 36 spin_unlock_bh(&htt->tx_lock); 37 } 38 39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) 40 { 41 int ret = 0; 42 43 spin_lock_bh(&htt->tx_lock); 44 45 if (htt->num_pending_tx >= htt->max_num_pending_tx) { 46 ret = -EBUSY; 47 goto exit; 48 } 49 50 htt->num_pending_tx++; 51 if (htt->num_pending_tx == htt->max_num_pending_tx) 52 ieee80211_stop_queues(htt->ar->hw); 53 54 exit: 55 spin_unlock_bh(&htt->tx_lock); 56 return ret; 57 } 58 59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) 60 { 61 int msdu_id; 62 63 lockdep_assert_held(&htt->tx_lock); 64 65 msdu_id = find_first_zero_bit(htt->used_msdu_ids, 66 htt->max_num_pending_tx); 67 if (msdu_id == htt->max_num_pending_tx) 68 return -ENOBUFS; 69 70 ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); 71 __set_bit(msdu_id, htt->used_msdu_ids); 72 return msdu_id; 73 } 74 75 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 76 { 77 lockdep_assert_held(&htt->tx_lock); 78 79 if (!test_bit(msdu_id, htt->used_msdu_ids)) 80 ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); 81 82 ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 83 __clear_bit(msdu_id, htt->used_msdu_ids); 84 } 85 86 int ath10k_htt_tx_attach(struct ath10k_htt *htt) 87 { 88 spin_lock_init(&htt->tx_lock); 89 init_waitqueue_head(&htt->empty_tx_wq); 90 91 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) 92 htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; 93 else 94 htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; 95 96 ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 97 htt->max_num_pending_tx); 98 99 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 100 htt->max_num_pending_tx, GFP_KERNEL); 101 if (!htt->pending_tx) 102 return -ENOMEM; 103 104 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * 105 BITS_TO_LONGS(htt->max_num_pending_tx), 106 GFP_KERNEL); 107 if (!htt->used_msdu_ids) { 108 kfree(htt->pending_tx); 109 return -ENOMEM; 110 } 111 112 return 0; 113 } 114 115 static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) 116 { 117 struct htt_tx_done tx_done = {0}; 118 int msdu_id; 119 120 /* No locks needed. Called after communication with the device has 121 * been stopped. */ 122 123 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { 124 if (!test_bit(msdu_id, htt->used_msdu_ids)) 125 continue; 126 127 ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 128 msdu_id); 129 130 tx_done.discard = 1; 131 tx_done.msdu_id = msdu_id; 132 133 ath10k_txrx_tx_unref(htt, &tx_done); 134 } 135 } 136 137 void ath10k_htt_tx_detach(struct ath10k_htt *htt) 138 { 139 ath10k_htt_tx_cleanup_pending(htt); 140 kfree(htt->pending_tx); 141 kfree(htt->used_msdu_ids); 142 return; 143 } 144 145 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 146 { 147 dev_kfree_skb_any(skb); 148 } 149 150 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 151 { 152 struct sk_buff *skb; 153 struct htt_cmd *cmd; 154 int len = 0; 155 int ret; 156 157 len += sizeof(cmd->hdr); 158 len += sizeof(cmd->ver_req); 159 160 skb = ath10k_htc_alloc_skb(len); 161 if (!skb) 162 return -ENOMEM; 163 164 skb_put(skb, len); 165 cmd = (struct htt_cmd *)skb->data; 166 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 167 168 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 169 if (ret) { 170 dev_kfree_skb_any(skb); 171 return ret; 172 } 173 174 return 0; 175 } 176 177 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) 178 { 179 struct htt_stats_req *req; 180 struct sk_buff *skb; 181 struct htt_cmd *cmd; 182 int len = 0, ret; 183 184 len += sizeof(cmd->hdr); 185 len += sizeof(cmd->stats_req); 186 187 skb = ath10k_htc_alloc_skb(len); 188 if (!skb) 189 return -ENOMEM; 190 191 skb_put(skb, len); 192 cmd = (struct htt_cmd *)skb->data; 193 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; 194 195 req = &cmd->stats_req; 196 197 memset(req, 0, sizeof(*req)); 198 199 /* currently we support only max 8 bit masks so no need to worry 200 * about endian support */ 201 req->upload_types[0] = mask; 202 req->reset_types[0] = mask; 203 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; 204 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); 205 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); 206 207 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 208 if (ret) { 209 ath10k_warn("failed to send htt type stats request: %d", ret); 210 dev_kfree_skb_any(skb); 211 return ret; 212 } 213 214 return 0; 215 } 216 217 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) 218 { 219 struct sk_buff *skb; 220 struct htt_cmd *cmd; 221 struct htt_rx_ring_setup_ring *ring; 222 const int num_rx_ring = 1; 223 u16 flags; 224 u32 fw_idx; 225 int len; 226 int ret; 227 228 /* 229 * the HW expects the buffer to be an integral number of 4-byte 230 * "words" 231 */ 232 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 233 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 234 235 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) 236 + (sizeof(*ring) * num_rx_ring); 237 skb = ath10k_htc_alloc_skb(len); 238 if (!skb) 239 return -ENOMEM; 240 241 skb_put(skb, len); 242 243 cmd = (struct htt_cmd *)skb->data; 244 ring = &cmd->rx_setup.rings[0]; 245 246 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 247 cmd->rx_setup.hdr.num_rings = 1; 248 249 /* FIXME: do we need all of this? */ 250 flags = 0; 251 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 252 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 253 flags |= HTT_RX_RING_FLAGS_PPDU_START; 254 flags |= HTT_RX_RING_FLAGS_PPDU_END; 255 flags |= HTT_RX_RING_FLAGS_MPDU_START; 256 flags |= HTT_RX_RING_FLAGS_MPDU_END; 257 flags |= HTT_RX_RING_FLAGS_MSDU_START; 258 flags |= HTT_RX_RING_FLAGS_MSDU_END; 259 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 260 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 261 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 262 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 263 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 264 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 265 flags |= HTT_RX_RING_FLAGS_NULL_RX; 266 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 267 268 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 269 270 ring->fw_idx_shadow_reg_paddr = 271 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); 272 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); 273 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 274 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 275 ring->flags = __cpu_to_le16(flags); 276 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 277 278 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) 279 280 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 281 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 282 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 283 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 284 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 285 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 286 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 287 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 288 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 289 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 290 291 #undef desc_offset 292 293 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 294 if (ret) { 295 dev_kfree_skb_any(skb); 296 return ret; 297 } 298 299 return 0; 300 } 301 302 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 303 { 304 struct device *dev = htt->ar->dev; 305 struct sk_buff *txdesc = NULL; 306 struct htt_cmd *cmd; 307 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 308 u8 vdev_id = skb_cb->vdev_id; 309 int len = 0; 310 int msdu_id = -1; 311 int res; 312 313 314 res = ath10k_htt_tx_inc_pending(htt); 315 if (res) 316 goto err; 317 318 len += sizeof(cmd->hdr); 319 len += sizeof(cmd->mgmt_tx); 320 321 spin_lock_bh(&htt->tx_lock); 322 res = ath10k_htt_tx_alloc_msdu_id(htt); 323 if (res < 0) { 324 spin_unlock_bh(&htt->tx_lock); 325 goto err_tx_dec; 326 } 327 msdu_id = res; 328 htt->pending_tx[msdu_id] = msdu; 329 spin_unlock_bh(&htt->tx_lock); 330 331 txdesc = ath10k_htc_alloc_skb(len); 332 if (!txdesc) { 333 res = -ENOMEM; 334 goto err_free_msdu_id; 335 } 336 337 res = ath10k_skb_map(dev, msdu); 338 if (res) 339 goto err_free_txdesc; 340 341 skb_put(txdesc, len); 342 cmd = (struct htt_cmd *)txdesc->data; 343 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; 344 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 345 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); 346 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); 347 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); 348 memcpy(cmd->mgmt_tx.hdr, msdu->data, 349 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 350 351 skb_cb->htt.frag_len = 0; 352 skb_cb->htt.pad_len = 0; 353 354 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 355 if (res) 356 goto err_unmap_msdu; 357 358 return 0; 359 360 err_unmap_msdu: 361 ath10k_skb_unmap(dev, msdu); 362 err_free_txdesc: 363 dev_kfree_skb_any(txdesc); 364 err_free_msdu_id: 365 spin_lock_bh(&htt->tx_lock); 366 htt->pending_tx[msdu_id] = NULL; 367 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 368 spin_unlock_bh(&htt->tx_lock); 369 err_tx_dec: 370 ath10k_htt_tx_dec_pending(htt); 371 err: 372 return res; 373 } 374 375 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 376 { 377 struct device *dev = htt->ar->dev; 378 struct htt_cmd *cmd; 379 struct htt_data_tx_desc_frag *tx_frags; 380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 381 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 382 struct sk_buff *txdesc = NULL; 383 bool use_frags; 384 u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; 385 u8 tid; 386 int prefetch_len, desc_len; 387 int msdu_id = -1; 388 int res; 389 u8 flags0; 390 u16 flags1; 391 392 res = ath10k_htt_tx_inc_pending(htt); 393 if (res) 394 goto err; 395 396 spin_lock_bh(&htt->tx_lock); 397 res = ath10k_htt_tx_alloc_msdu_id(htt); 398 if (res < 0) { 399 spin_unlock_bh(&htt->tx_lock); 400 goto err_tx_dec; 401 } 402 msdu_id = res; 403 htt->pending_tx[msdu_id] = msdu; 404 spin_unlock_bh(&htt->tx_lock); 405 406 prefetch_len = min(htt->prefetch_len, msdu->len); 407 prefetch_len = roundup(prefetch_len, 4); 408 409 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; 410 411 txdesc = ath10k_htc_alloc_skb(desc_len); 412 if (!txdesc) { 413 res = -ENOMEM; 414 goto err_free_msdu_id; 415 } 416 417 /* Since HTT 3.0 there is no separate mgmt tx command. However in case 418 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx 419 * fragment list host driver specifies directly frame pointer. */ 420 use_frags = htt->target_version_major < 3 || 421 !ieee80211_is_mgmt(hdr->frame_control); 422 423 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { 424 ath10k_warn("htt alignment check failed. dropping packet.\n"); 425 res = -EIO; 426 goto err_free_txdesc; 427 } 428 429 if (use_frags) { 430 skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; 431 skb_cb->htt.pad_len = (unsigned long)msdu->data - 432 round_down((unsigned long)msdu->data, 4); 433 434 skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 435 } else { 436 skb_cb->htt.frag_len = 0; 437 skb_cb->htt.pad_len = 0; 438 } 439 440 res = ath10k_skb_map(dev, msdu); 441 if (res) 442 goto err_pull_txfrag; 443 444 if (use_frags) { 445 dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len, 446 DMA_TO_DEVICE); 447 448 /* tx fragment list must be terminated with zero-entry */ 449 tx_frags = (struct htt_data_tx_desc_frag *)msdu->data; 450 tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr + 451 skb_cb->htt.frag_len + 452 skb_cb->htt.pad_len); 453 tx_frags[0].len = __cpu_to_le32(msdu->len - 454 skb_cb->htt.frag_len - 455 skb_cb->htt.pad_len); 456 tx_frags[1].paddr = __cpu_to_le32(0); 457 tx_frags[1].len = __cpu_to_le32(0); 458 459 dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len, 460 DMA_TO_DEVICE); 461 } 462 463 ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", 464 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); 465 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", 466 msdu->data, msdu->len); 467 468 skb_put(txdesc, desc_len); 469 cmd = (struct htt_cmd *)txdesc->data; 470 471 tid = ATH10K_SKB_CB(msdu)->htt.tid; 472 473 ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); 474 475 flags0 = 0; 476 if (!ieee80211_has_protected(hdr->frame_control)) 477 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 478 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 479 480 if (use_frags) 481 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, 482 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 483 else 484 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 485 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 486 487 flags1 = 0; 488 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 489 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 490 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 491 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 492 493 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 494 cmd->data_tx.flags0 = flags0; 495 cmd->data_tx.flags1 = __cpu_to_le16(flags1); 496 cmd->data_tx.len = __cpu_to_le16(msdu->len - 497 skb_cb->htt.frag_len - 498 skb_cb->htt.pad_len); 499 cmd->data_tx.id = __cpu_to_le16(msdu_id); 500 cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); 501 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); 502 503 memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); 504 505 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 506 if (res) 507 goto err_unmap_msdu; 508 509 return 0; 510 511 err_unmap_msdu: 512 ath10k_skb_unmap(dev, msdu); 513 err_pull_txfrag: 514 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 515 err_free_txdesc: 516 dev_kfree_skb_any(txdesc); 517 err_free_msdu_id: 518 spin_lock_bh(&htt->tx_lock); 519 htt->pending_tx[msdu_id] = NULL; 520 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 521 spin_unlock_bh(&htt->tx_lock); 522 err_tx_dec: 523 ath10k_htt_tx_dec_pending(htt); 524 err: 525 return res; 526 } 527