1 /* 2 * Marvell Wireless LAN device driver: AP TX and RX data handling 3 * 4 * Copyright (C) 2012-2014, Marvell International Ltd. 5 * 6 * This software file (the "File") is distributed by Marvell International 7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 * (the "License"). You may use, redistribute and/or modify this File in 9 * accordance with the terms and conditions of the License, a copy of which 10 * is available by writing to the Free Software Foundation, Inc., 11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 * 14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 17 * this warranty disclaimer. 18 */ 19 20 #include "decl.h" 21 #include "ioctl.h" 22 #include "main.h" 23 #include "wmm.h" 24 #include "11n_aggr.h" 25 #include "11n_rxreorder.h" 26 27 /* This function checks if particular RA list has packets more than low bridge 28 * packet threshold and then deletes packet from this RA list. 29 * Function deletes packets from such RA list and returns true. If no such list 30 * is found, false is returned. 31 */ 32 static bool 33 mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv, 34 struct list_head *ra_list_head, 35 int tid) 36 { 37 struct mwifiex_ra_list_tbl *ra_list; 38 struct sk_buff *skb, *tmp; 39 bool pkt_deleted = false; 40 struct mwifiex_txinfo *tx_info; 41 struct mwifiex_adapter *adapter = priv->adapter; 42 43 list_for_each_entry(ra_list, ra_list_head, list) { 44 if (skb_queue_empty(&ra_list->skb_head)) 45 continue; 46 47 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) { 48 tx_info = MWIFIEX_SKB_TXCB(skb); 49 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) { 50 __skb_unlink(skb, &ra_list->skb_head); 51 mwifiex_write_data_complete(adapter, skb, 0, 52 -1); 53 if (ra_list->tx_paused) 54 priv->wmm.pkts_paused[tid]--; 55 else 56 atomic_dec(&priv->wmm.tx_pkts_queued); 57 pkt_deleted = true; 58 } 59 if ((atomic_read(&adapter->pending_bridged_pkts) <= 60 MWIFIEX_BRIDGED_PKTS_THR_LOW)) 61 break; 62 } 63 } 64 65 return pkt_deleted; 66 } 67 68 /* This function deletes packets from particular RA List. RA list index 69 * from which packets are deleted is preserved so that packets from next RA 70 * list are deleted upon subsequent call thus maintaining fairness. 71 */ 72 static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv) 73 { 74 unsigned long flags; 75 struct list_head *ra_list; 76 int i; 77 78 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); 79 80 for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) { 81 if (priv->del_list_idx == MAX_NUM_TID) 82 priv->del_list_idx = 0; 83 ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list; 84 if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) { 85 priv->del_list_idx++; 86 break; 87 } 88 } 89 90 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 91 } 92 93 94 static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, 95 struct sk_buff *skb) 96 { 97 struct mwifiex_adapter *adapter = priv->adapter; 98 struct uap_rxpd *uap_rx_pd; 99 struct rx_packet_hdr *rx_pkt_hdr; 100 struct sk_buff *new_skb; 101 struct mwifiex_txinfo *tx_info; 102 int hdr_chop; 103 struct ethhdr *p_ethhdr; 104 struct mwifiex_sta_node *src_node; 105 int index; 106 107 uap_rx_pd = (struct uap_rxpd *)(skb->data); 108 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 109 110 if ((atomic_read(&adapter->pending_bridged_pkts) >= 111 MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { 112 mwifiex_dbg(priv->adapter, ERROR, 113 "Tx: Bridge packet limit reached. Drop packet!\n"); 114 kfree_skb(skb); 115 mwifiex_uap_cleanup_tx_queues(priv); 116 return; 117 } 118 119 if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, 120 sizeof(bridge_tunnel_header))) || 121 (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, 122 sizeof(rfc1042_header)) && 123 ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && 124 ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { 125 /* Replace the 803 header and rfc1042 header (llc/snap) with 126 * an Ethernet II header, keep the src/dst and snap_type 127 * (ethertype). 128 * 129 * The firmware only passes up SNAP frames converting all RX 130 * data from 802.11 to 802.2/LLC/SNAP frames. 131 * 132 * To create the Ethernet II, just move the src, dst address 133 * right before the snap_type. 134 */ 135 p_ethhdr = (struct ethhdr *) 136 ((u8 *)(&rx_pkt_hdr->eth803_hdr) 137 + sizeof(rx_pkt_hdr->eth803_hdr) 138 + sizeof(rx_pkt_hdr->rfc1042_hdr) 139 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) 140 - sizeof(rx_pkt_hdr->eth803_hdr.h_source) 141 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); 142 memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, 143 sizeof(p_ethhdr->h_source)); 144 memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, 145 sizeof(p_ethhdr->h_dest)); 146 /* Chop off the rxpd + the excess memory from 147 * 802.2/llc/snap header that was removed. 148 */ 149 hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd; 150 } else { 151 /* Chop off the rxpd */ 152 hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; 153 } 154 155 /* Chop off the leading header bytes so that it points 156 * to the start of either the reconstructed EthII frame 157 * or the 802.2/llc/snap frame. 158 */ 159 skb_pull(skb, hdr_chop); 160 161 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { 162 mwifiex_dbg(priv->adapter, ERROR, 163 "data: Tx: insufficient skb headroom %d\n", 164 skb_headroom(skb)); 165 /* Insufficient skb headroom - allocate a new skb */ 166 new_skb = 167 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); 168 if (unlikely(!new_skb)) { 169 mwifiex_dbg(priv->adapter, ERROR, 170 "Tx: cannot allocate new_skb\n"); 171 kfree_skb(skb); 172 priv->stats.tx_dropped++; 173 return; 174 } 175 176 kfree_skb(skb); 177 skb = new_skb; 178 mwifiex_dbg(priv->adapter, INFO, 179 "info: new skb headroom %d\n", 180 skb_headroom(skb)); 181 } 182 183 tx_info = MWIFIEX_SKB_TXCB(skb); 184 memset(tx_info, 0, sizeof(*tx_info)); 185 tx_info->bss_num = priv->bss_num; 186 tx_info->bss_type = priv->bss_type; 187 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 188 189 src_node = mwifiex_get_sta_entry(priv, rx_pkt_hdr->eth803_hdr.h_source); 190 if (src_node) { 191 src_node->stats.last_rx = jiffies; 192 src_node->stats.rx_bytes += skb->len; 193 src_node->stats.rx_packets++; 194 src_node->stats.last_tx_rate = uap_rx_pd->rx_rate; 195 src_node->stats.last_tx_htinfo = uap_rx_pd->ht_info; 196 } 197 198 if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) { 199 /* Update bridge packet statistics as the 200 * packet is not going to kernel/upper layer. 201 */ 202 priv->stats.rx_bytes += skb->len; 203 priv->stats.rx_packets++; 204 205 /* Sending bridge packet to TX queue, so save the packet 206 * length in TXCB to update statistics in TX complete. 207 */ 208 tx_info->pkt_len = skb->len; 209 } 210 211 __net_timestamp(skb); 212 213 index = mwifiex_1d_to_wmm_queue[skb->priority]; 214 atomic_inc(&priv->wmm_tx_pending[index]); 215 mwifiex_wmm_add_buf_txqueue(priv, skb); 216 atomic_inc(&adapter->tx_pending); 217 atomic_inc(&adapter->pending_bridged_pkts); 218 219 mwifiex_queue_main_work(priv->adapter); 220 221 return; 222 } 223 224 /* 225 * This function contains logic for AP packet forwarding. 226 * 227 * If a packet is multicast/broadcast, it is sent to kernel/upper layer 228 * as well as queued back to AP TX queue so that it can be sent to other 229 * associated stations. 230 * If a packet is unicast and RA is present in associated station list, 231 * it is again requeued into AP TX queue. 232 * If a packet is unicast and RA is not in associated station list, 233 * packet is forwarded to kernel to handle routing logic. 234 */ 235 int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, 236 struct sk_buff *skb) 237 { 238 struct mwifiex_adapter *adapter = priv->adapter; 239 struct uap_rxpd *uap_rx_pd; 240 struct rx_packet_hdr *rx_pkt_hdr; 241 u8 ra[ETH_ALEN]; 242 struct sk_buff *skb_uap; 243 244 uap_rx_pd = (struct uap_rxpd *)(skb->data); 245 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 246 247 /* don't do packet forwarding in disconnected state */ 248 if (!priv->media_connected) { 249 mwifiex_dbg(adapter, ERROR, 250 "drop packet in disconnected state.\n"); 251 dev_kfree_skb_any(skb); 252 return 0; 253 } 254 255 memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN); 256 257 if (is_multicast_ether_addr(ra)) { 258 skb_uap = skb_copy(skb, GFP_ATOMIC); 259 mwifiex_uap_queue_bridged_pkt(priv, skb_uap); 260 } else { 261 if (mwifiex_get_sta_entry(priv, ra)) { 262 /* Requeue Intra-BSS packet */ 263 mwifiex_uap_queue_bridged_pkt(priv, skb); 264 return 0; 265 } 266 } 267 268 /* Forward unicat/Inter-BSS packets to kernel. */ 269 return mwifiex_process_rx_packet(priv, skb); 270 } 271 272 int mwifiex_uap_recv_packet(struct mwifiex_private *priv, 273 struct sk_buff *skb) 274 { 275 struct mwifiex_adapter *adapter = priv->adapter; 276 struct mwifiex_sta_node *src_node; 277 struct ethhdr *p_ethhdr; 278 struct sk_buff *skb_uap; 279 struct mwifiex_txinfo *tx_info; 280 281 if (!skb) 282 return -1; 283 284 p_ethhdr = (void *)skb->data; 285 src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source); 286 if (src_node) { 287 src_node->stats.last_rx = jiffies; 288 src_node->stats.rx_bytes += skb->len; 289 src_node->stats.rx_packets++; 290 } 291 292 skb->dev = priv->netdev; 293 skb->protocol = eth_type_trans(skb, priv->netdev); 294 skb->ip_summed = CHECKSUM_NONE; 295 296 /* This is required only in case of 11n and USB/PCIE as we alloc 297 * a buffer of 4K only if its 11N (to be able to receive 4K 298 * AMSDU packets). In case of SD we allocate buffers based 299 * on the size of packet and hence this is not needed. 300 * 301 * Modifying the truesize here as our allocation for each 302 * skb is 4K but we only receive 2K packets and this cause 303 * the kernel to start dropping packets in case where 304 * application has allocated buffer based on 2K size i.e. 305 * if there a 64K packet received (in IP fragments and 306 * application allocates 64K to receive this packet but 307 * this packet would almost double up because we allocate 308 * each 1.5K fragment in 4K and pass it up. As soon as the 309 * 64K limit hits kernel will start to drop rest of the 310 * fragments. Currently we fail the Filesndl-ht.scr script 311 * for UDP, hence this fix 312 */ 313 if ((adapter->iface_type == MWIFIEX_USB || 314 adapter->iface_type == MWIFIEX_PCIE) && 315 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) 316 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); 317 318 if (is_multicast_ether_addr(p_ethhdr->h_dest) || 319 mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) { 320 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) 321 skb_uap = 322 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); 323 else 324 skb_uap = skb_copy(skb, GFP_ATOMIC); 325 326 if (likely(skb_uap)) { 327 tx_info = MWIFIEX_SKB_TXCB(skb_uap); 328 memset(tx_info, 0, sizeof(*tx_info)); 329 tx_info->bss_num = priv->bss_num; 330 tx_info->bss_type = priv->bss_type; 331 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 332 __net_timestamp(skb_uap); 333 mwifiex_wmm_add_buf_txqueue(priv, skb_uap); 334 atomic_inc(&adapter->tx_pending); 335 atomic_inc(&adapter->pending_bridged_pkts); 336 if ((atomic_read(&adapter->pending_bridged_pkts) >= 337 MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { 338 mwifiex_dbg(adapter, ERROR, 339 "Tx: Bridge packet limit reached. Drop packet!\n"); 340 mwifiex_uap_cleanup_tx_queues(priv); 341 } 342 343 } else { 344 mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap"); 345 } 346 347 mwifiex_queue_main_work(adapter); 348 /* Don't forward Intra-BSS unicast packet to upper layer*/ 349 if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) 350 return 0; 351 } 352 353 /* Forward multicast/broadcast packet to upper layer*/ 354 if (in_interrupt()) 355 netif_rx(skb); 356 else 357 netif_rx_ni(skb); 358 359 return 0; 360 } 361 362 /* 363 * This function processes the packet received on AP interface. 364 * 365 * The function looks into the RxPD and performs sanity tests on the 366 * received buffer to ensure its a valid packet before processing it 367 * further. If the packet is determined to be aggregated, it is 368 * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic. 369 * 370 * The completion callback is called after processing is complete. 371 */ 372 int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, 373 struct sk_buff *skb) 374 { 375 struct mwifiex_adapter *adapter = priv->adapter; 376 int ret; 377 struct uap_rxpd *uap_rx_pd; 378 struct rx_packet_hdr *rx_pkt_hdr; 379 u16 rx_pkt_type; 380 u8 ta[ETH_ALEN], pkt_type; 381 unsigned long flags; 382 struct mwifiex_sta_node *node; 383 384 uap_rx_pd = (struct uap_rxpd *)(skb->data); 385 rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); 386 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 387 388 ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source); 389 390 if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + 391 le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) { 392 mwifiex_dbg(adapter, ERROR, 393 "wrong rx packet: len=%d, offset=%d, length=%d\n", 394 skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), 395 le16_to_cpu(uap_rx_pd->rx_pkt_length)); 396 priv->stats.rx_dropped++; 397 398 node = mwifiex_get_sta_entry(priv, ta); 399 if (node) 400 node->stats.tx_failed++; 401 402 dev_kfree_skb_any(skb); 403 return 0; 404 } 405 406 if (rx_pkt_type == PKT_TYPE_MGMT) { 407 ret = mwifiex_process_mgmt_packet(priv, skb); 408 if (ret) 409 mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed"); 410 dev_kfree_skb_any(skb); 411 return ret; 412 } 413 414 415 if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) { 416 spin_lock_irqsave(&priv->sta_list_spinlock, flags); 417 node = mwifiex_get_sta_entry(priv, ta); 418 if (node) 419 node->rx_seq[uap_rx_pd->priority] = 420 le16_to_cpu(uap_rx_pd->seq_num); 421 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 422 } 423 424 if (!priv->ap_11n_enabled || 425 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && 426 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { 427 ret = mwifiex_handle_uap_rx_forward(priv, skb); 428 return ret; 429 } 430 431 /* Reorder and send to kernel */ 432 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); 433 ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num), 434 uap_rx_pd->priority, ta, pkt_type, 435 skb); 436 437 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) 438 dev_kfree_skb_any(skb); 439 440 if (ret) 441 priv->stats.rx_dropped++; 442 443 return ret; 444 } 445 446 /* 447 * This function fills the TxPD for AP tx packets. 448 * 449 * The Tx buffer received by this function should already have the 450 * header space allocated for TxPD. 451 * 452 * This function inserts the TxPD in between interface header and actual 453 * data and adjusts the buffer pointers accordingly. 454 * 455 * The following TxPD fields are set by this function, as required - 456 * - BSS number 457 * - Tx packet length and offset 458 * - Priority 459 * - Packet delay 460 * - Priority specific Tx control 461 * - Flags 462 */ 463 void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, 464 struct sk_buff *skb) 465 { 466 struct mwifiex_adapter *adapter = priv->adapter; 467 struct uap_txpd *txpd; 468 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 469 int pad; 470 u16 pkt_type, pkt_offset; 471 int hroom = adapter->intf_hdr_len; 472 473 if (!skb->len) { 474 mwifiex_dbg(adapter, ERROR, 475 "Tx: bad packet length: %d\n", skb->len); 476 tx_info->status_code = -1; 477 return skb->data; 478 } 479 480 BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); 481 482 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; 483 484 pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) & 485 (MWIFIEX_DMA_ALIGN_SZ - 1); 486 487 skb_push(skb, sizeof(*txpd) + pad); 488 489 txpd = (struct uap_txpd *)skb->data; 490 memset(txpd, 0, sizeof(*txpd)); 491 txpd->bss_num = priv->bss_num; 492 txpd->bss_type = priv->bss_type; 493 txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) + 494 pad))); 495 txpd->priority = (u8)skb->priority; 496 497 txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); 498 499 if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS || 500 tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) { 501 txpd->tx_token_id = tx_info->ack_frame_id; 502 txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS; 503 } 504 505 if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) 506 /* 507 * Set the priority specific tx_control field, setting of 0 will 508 * cause the default value to be used later in this function. 509 */ 510 txpd->tx_control = 511 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]); 512 513 /* Offset of actual data */ 514 pkt_offset = sizeof(*txpd) + pad; 515 if (pkt_type == PKT_TYPE_MGMT) { 516 /* Set the packet type and add header for management frame */ 517 txpd->tx_pkt_type = cpu_to_le16(pkt_type); 518 pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE; 519 } 520 521 txpd->tx_pkt_offset = cpu_to_le16(pkt_offset); 522 523 /* make space for adapter->intf_hdr_len */ 524 skb_push(skb, hroom); 525 526 if (!txpd->tx_control) 527 /* TxCtrl set by user or default */ 528 txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); 529 530 return skb->data; 531 } 532