1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include "htt.h" 20 #include "mac.h" 21 #include "hif.h" 22 #include "txrx.h" 23 #include "debug.h" 24 25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 26 { 27 htt->num_pending_tx--; 28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 29 ieee80211_wake_queues(htt->ar->hw); 30 } 31 32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 33 { 34 spin_lock_bh(&htt->tx_lock); 35 __ath10k_htt_tx_dec_pending(htt); 36 spin_unlock_bh(&htt->tx_lock); 37 } 38 39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) 40 { 41 int ret = 0; 42 43 spin_lock_bh(&htt->tx_lock); 44 45 if (htt->num_pending_tx >= htt->max_num_pending_tx) { 46 ret = -EBUSY; 47 goto exit; 48 } 49 50 htt->num_pending_tx++; 51 if (htt->num_pending_tx == htt->max_num_pending_tx) 52 ieee80211_stop_queues(htt->ar->hw); 53 54 exit: 55 spin_unlock_bh(&htt->tx_lock); 56 return ret; 57 } 58 59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) 60 { 61 int msdu_id; 62 63 lockdep_assert_held(&htt->tx_lock); 64 65 msdu_id = find_first_zero_bit(htt->used_msdu_ids, 66 htt->max_num_pending_tx); 67 if (msdu_id == htt->max_num_pending_tx) 68 return -ENOBUFS; 69 70 ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); 71 __set_bit(msdu_id, htt->used_msdu_ids); 72 return msdu_id; 73 } 74 75 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 76 { 77 lockdep_assert_held(&htt->tx_lock); 78 79 if (!test_bit(msdu_id, htt->used_msdu_ids)) 80 ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); 81 82 ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 83 __clear_bit(msdu_id, htt->used_msdu_ids); 84 } 85 86 int ath10k_htt_tx_attach(struct ath10k_htt *htt) 87 { 88 u8 pipe; 89 90 spin_lock_init(&htt->tx_lock); 91 init_waitqueue_head(&htt->empty_tx_wq); 92 93 /* At the beginning free queue number should hint us the maximum 94 * queue length */ 95 pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id; 96 htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, 97 pipe); 98 99 ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", 100 htt->max_num_pending_tx); 101 102 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 103 htt->max_num_pending_tx, GFP_KERNEL); 104 if (!htt->pending_tx) 105 return -ENOMEM; 106 107 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * 108 BITS_TO_LONGS(htt->max_num_pending_tx), 109 GFP_KERNEL); 110 if (!htt->used_msdu_ids) { 111 kfree(htt->pending_tx); 112 return -ENOMEM; 113 } 114 115 return 0; 116 } 117 118 static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) 119 { 120 struct sk_buff *txdesc; 121 int msdu_id; 122 123 /* No locks needed. Called after communication with the device has 124 * been stopped. */ 125 126 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { 127 if (!test_bit(msdu_id, htt->used_msdu_ids)) 128 continue; 129 130 txdesc = htt->pending_tx[msdu_id]; 131 if (!txdesc) 132 continue; 133 134 ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 135 msdu_id); 136 137 if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) 138 ATH10K_SKB_CB(txdesc)->htt.refcount = 1; 139 140 ATH10K_SKB_CB(txdesc)->htt.discard = true; 141 ath10k_txrx_tx_unref(htt, txdesc); 142 } 143 } 144 145 void ath10k_htt_tx_detach(struct ath10k_htt *htt) 146 { 147 ath10k_htt_tx_cleanup_pending(htt); 148 kfree(htt->pending_tx); 149 kfree(htt->used_msdu_ids); 150 return; 151 } 152 153 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 154 { 155 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 156 struct ath10k_htt *htt = ar->htt; 157 158 if (skb_cb->htt.is_conf) { 159 dev_kfree_skb_any(skb); 160 return; 161 } 162 163 if (skb_cb->is_aborted) { 164 skb_cb->htt.discard = true; 165 166 /* if the skbuff is aborted we need to make sure we'll free up 167 * the tx resources, we can't simply run tx_unref() 2 times 168 * because if htt tx completion came in earlier we'd access 169 * unallocated memory */ 170 if (skb_cb->htt.refcount > 1) 171 skb_cb->htt.refcount = 1; 172 } 173 174 ath10k_txrx_tx_unref(htt, skb); 175 } 176 177 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 178 { 179 struct sk_buff *skb; 180 struct htt_cmd *cmd; 181 int len = 0; 182 int ret; 183 184 len += sizeof(cmd->hdr); 185 len += sizeof(cmd->ver_req); 186 187 skb = ath10k_htc_alloc_skb(len); 188 if (!skb) 189 return -ENOMEM; 190 191 skb_put(skb, len); 192 cmd = (struct htt_cmd *)skb->data; 193 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 194 195 ATH10K_SKB_CB(skb)->htt.is_conf = true; 196 197 ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); 198 if (ret) { 199 dev_kfree_skb_any(skb); 200 return ret; 201 } 202 203 return 0; 204 } 205 206 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) 207 { 208 struct sk_buff *skb; 209 struct htt_cmd *cmd; 210 struct htt_rx_ring_setup_ring *ring; 211 const int num_rx_ring = 1; 212 u16 flags; 213 u32 fw_idx; 214 int len; 215 int ret; 216 217 /* 218 * the HW expects the buffer to be an integral number of 4-byte 219 * "words" 220 */ 221 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 222 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 223 224 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) 225 + (sizeof(*ring) * num_rx_ring); 226 skb = ath10k_htc_alloc_skb(len); 227 if (!skb) 228 return -ENOMEM; 229 230 skb_put(skb, len); 231 232 cmd = (struct htt_cmd *)skb->data; 233 ring = &cmd->rx_setup.rings[0]; 234 235 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 236 cmd->rx_setup.hdr.num_rings = 1; 237 238 /* FIXME: do we need all of this? */ 239 flags = 0; 240 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 241 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 242 flags |= HTT_RX_RING_FLAGS_PPDU_START; 243 flags |= HTT_RX_RING_FLAGS_PPDU_END; 244 flags |= HTT_RX_RING_FLAGS_MPDU_START; 245 flags |= HTT_RX_RING_FLAGS_MPDU_END; 246 flags |= HTT_RX_RING_FLAGS_MSDU_START; 247 flags |= HTT_RX_RING_FLAGS_MSDU_END; 248 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 249 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 250 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 251 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 252 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 253 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 254 flags |= HTT_RX_RING_FLAGS_NULL_RX; 255 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 256 257 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 258 259 ring->fw_idx_shadow_reg_paddr = 260 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); 261 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); 262 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 263 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 264 ring->flags = __cpu_to_le16(flags); 265 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 266 267 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) 268 269 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 270 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 271 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 272 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 273 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 274 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 275 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 276 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 277 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 278 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 279 280 #undef desc_offset 281 282 ATH10K_SKB_CB(skb)->htt.is_conf = true; 283 284 ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); 285 if (ret) { 286 dev_kfree_skb_any(skb); 287 return ret; 288 } 289 290 return 0; 291 } 292 293 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 294 { 295 struct device *dev = htt->ar->dev; 296 struct ath10k_skb_cb *skb_cb; 297 struct sk_buff *txdesc = NULL; 298 struct htt_cmd *cmd; 299 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; 300 int len = 0; 301 int msdu_id = -1; 302 int res; 303 304 305 res = ath10k_htt_tx_inc_pending(htt); 306 if (res) 307 return res; 308 309 len += sizeof(cmd->hdr); 310 len += sizeof(cmd->mgmt_tx); 311 312 txdesc = ath10k_htc_alloc_skb(len); 313 if (!txdesc) { 314 res = -ENOMEM; 315 goto err; 316 } 317 318 spin_lock_bh(&htt->tx_lock); 319 msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); 320 if (msdu_id < 0) { 321 spin_unlock_bh(&htt->tx_lock); 322 res = msdu_id; 323 goto err; 324 } 325 htt->pending_tx[msdu_id] = txdesc; 326 spin_unlock_bh(&htt->tx_lock); 327 328 res = ath10k_skb_map(dev, msdu); 329 if (res) 330 goto err; 331 332 skb_put(txdesc, len); 333 cmd = (struct htt_cmd *)txdesc->data; 334 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; 335 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 336 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); 337 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); 338 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); 339 memcpy(cmd->mgmt_tx.hdr, msdu->data, 340 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 341 342 /* refcount is decremented by HTC and HTT completions until it reaches 343 * zero and is freed */ 344 skb_cb = ATH10K_SKB_CB(txdesc); 345 skb_cb->htt.msdu_id = msdu_id; 346 skb_cb->htt.refcount = 2; 347 skb_cb->htt.msdu = msdu; 348 349 res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); 350 if (res) 351 goto err; 352 353 return 0; 354 355 err: 356 ath10k_skb_unmap(dev, msdu); 357 358 if (txdesc) 359 dev_kfree_skb_any(txdesc); 360 if (msdu_id >= 0) { 361 spin_lock_bh(&htt->tx_lock); 362 htt->pending_tx[msdu_id] = NULL; 363 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 364 spin_unlock_bh(&htt->tx_lock); 365 } 366 ath10k_htt_tx_dec_pending(htt); 367 return res; 368 } 369 370 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 371 { 372 struct device *dev = htt->ar->dev; 373 struct htt_cmd *cmd; 374 struct htt_data_tx_desc_frag *tx_frags; 375 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 376 struct ath10k_skb_cb *skb_cb; 377 struct sk_buff *txdesc = NULL; 378 struct sk_buff *txfrag = NULL; 379 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; 380 u8 tid; 381 int prefetch_len, desc_len, frag_len; 382 dma_addr_t frags_paddr; 383 int msdu_id = -1; 384 int res; 385 u8 flags0; 386 u16 flags1; 387 388 res = ath10k_htt_tx_inc_pending(htt); 389 if (res) 390 return res; 391 392 prefetch_len = min(htt->prefetch_len, msdu->len); 393 prefetch_len = roundup(prefetch_len, 4); 394 395 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; 396 frag_len = sizeof(*tx_frags) * 2; 397 398 txdesc = ath10k_htc_alloc_skb(desc_len); 399 if (!txdesc) { 400 res = -ENOMEM; 401 goto err; 402 } 403 404 txfrag = dev_alloc_skb(frag_len); 405 if (!txfrag) { 406 res = -ENOMEM; 407 goto err; 408 } 409 410 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { 411 ath10k_warn("htt alignment check failed. dropping packet.\n"); 412 res = -EIO; 413 goto err; 414 } 415 416 spin_lock_bh(&htt->tx_lock); 417 msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); 418 if (msdu_id < 0) { 419 spin_unlock_bh(&htt->tx_lock); 420 res = msdu_id; 421 goto err; 422 } 423 htt->pending_tx[msdu_id] = txdesc; 424 spin_unlock_bh(&htt->tx_lock); 425 426 res = ath10k_skb_map(dev, msdu); 427 if (res) 428 goto err; 429 430 /* tx fragment list must be terminated with zero-entry */ 431 skb_put(txfrag, frag_len); 432 tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; 433 tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 434 tx_frags[0].len = __cpu_to_le32(msdu->len); 435 tx_frags[1].paddr = __cpu_to_le32(0); 436 tx_frags[1].len = __cpu_to_le32(0); 437 438 res = ath10k_skb_map(dev, txfrag); 439 if (res) 440 goto err; 441 442 ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", 443 (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, 444 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); 445 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", 446 txfrag->data, frag_len); 447 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", 448 msdu->data, msdu->len); 449 450 skb_put(txdesc, desc_len); 451 cmd = (struct htt_cmd *)txdesc->data; 452 memset(cmd, 0, desc_len); 453 454 tid = ATH10K_SKB_CB(msdu)->htt.tid; 455 456 ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); 457 458 flags0 = 0; 459 if (!ieee80211_has_protected(hdr->frame_control)) 460 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 461 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 462 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, 463 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 464 465 flags1 = 0; 466 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 467 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 468 469 frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; 470 471 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 472 cmd->data_tx.flags0 = flags0; 473 cmd->data_tx.flags1 = __cpu_to_le16(flags1); 474 cmd->data_tx.len = __cpu_to_le16(msdu->len); 475 cmd->data_tx.id = __cpu_to_le16(msdu_id); 476 cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); 477 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); 478 479 memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); 480 481 /* refcount is decremented by HTC and HTT completions until it reaches 482 * zero and is freed */ 483 skb_cb = ATH10K_SKB_CB(txdesc); 484 skb_cb->htt.msdu_id = msdu_id; 485 skb_cb->htt.refcount = 2; 486 skb_cb->htt.txfrag = txfrag; 487 skb_cb->htt.msdu = msdu; 488 489 res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); 490 if (res) 491 goto err; 492 493 return 0; 494 err: 495 if (txfrag) 496 ath10k_skb_unmap(dev, txfrag); 497 if (txdesc) 498 dev_kfree_skb_any(txdesc); 499 if (txfrag) 500 dev_kfree_skb_any(txfrag); 501 if (msdu_id >= 0) { 502 spin_lock_bh(&htt->tx_lock); 503 htt->pending_tx[msdu_id] = NULL; 504 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 505 spin_unlock_bh(&htt->tx_lock); 506 } 507 ath10k_htt_tx_dec_pending(htt); 508 ath10k_skb_unmap(dev, msdu); 509 return res; 510 } 511