1203c4805SLuis R. Rodriguez /* 2203c4805SLuis R. Rodriguez * Copyright (c) 2008-2009 Atheros Communications Inc. 3203c4805SLuis R. Rodriguez * 4203c4805SLuis R. Rodriguez * Permission to use, copy, modify, and/or distribute this software for any 5203c4805SLuis R. Rodriguez * purpose with or without fee is hereby granted, provided that the above 6203c4805SLuis R. Rodriguez * copyright notice and this permission notice appear in all copies. 7203c4805SLuis R. Rodriguez * 8203c4805SLuis R. Rodriguez * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9203c4805SLuis R. Rodriguez * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10203c4805SLuis R. Rodriguez * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11203c4805SLuis R. Rodriguez * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12203c4805SLuis R. Rodriguez * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13203c4805SLuis R. Rodriguez * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14203c4805SLuis R. Rodriguez * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15203c4805SLuis R. Rodriguez */ 16203c4805SLuis R. Rodriguez 17203c4805SLuis R. Rodriguez #include "ath9k.h" 18b622a720SLuis R. Rodriguez #include "ar9003_mac.h" 19203c4805SLuis R. Rodriguez 20b5c80475SFelix Fietkau #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21b5c80475SFelix Fietkau 22ededf1f8SVasanthakumar Thiagarajan static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23ededf1f8SVasanthakumar Thiagarajan { 24ededf1f8SVasanthakumar Thiagarajan return sc->ps_enabled && 25ededf1f8SVasanthakumar Thiagarajan (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 26ededf1f8SVasanthakumar Thiagarajan } 27ededf1f8SVasanthakumar Thiagarajan 28203c4805SLuis R. Rodriguez static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 29203c4805SLuis R. Rodriguez struct ieee80211_hdr *hdr) 30203c4805SLuis R. Rodriguez { 31203c4805SLuis R. Rodriguez struct ieee80211_hw *hw = sc->pri_wiphy->hw; 32203c4805SLuis R. Rodriguez int i; 33203c4805SLuis R. Rodriguez 34203c4805SLuis R. Rodriguez spin_lock_bh(&sc->wiphy_lock); 35203c4805SLuis R. Rodriguez for (i = 0; i < sc->num_sec_wiphy; i++) { 36203c4805SLuis R. Rodriguez struct ath_wiphy *aphy = sc->sec_wiphy[i]; 37203c4805SLuis R. Rodriguez if (aphy == NULL) 38203c4805SLuis R. Rodriguez continue; 39203c4805SLuis R. Rodriguez if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 40203c4805SLuis R. Rodriguez == 0) { 41203c4805SLuis R. Rodriguez hw = aphy->hw; 42203c4805SLuis R. Rodriguez break; 43203c4805SLuis R. Rodriguez } 44203c4805SLuis R. Rodriguez } 45203c4805SLuis R. Rodriguez spin_unlock_bh(&sc->wiphy_lock); 46203c4805SLuis R. Rodriguez return hw; 47203c4805SLuis R. Rodriguez } 48203c4805SLuis R. Rodriguez 49203c4805SLuis R. Rodriguez /* 50203c4805SLuis R. Rodriguez * Setup and link descriptors. 51203c4805SLuis R. Rodriguez * 52203c4805SLuis R. Rodriguez * 11N: we can no longer afford to self link the last descriptor. 53203c4805SLuis R. Rodriguez * MAC acknowledges BA status as long as it copies frames to host 54203c4805SLuis R. Rodriguez * buffer (or rx fifo). This can incorrectly acknowledge packets 55203c4805SLuis R. Rodriguez * to a sender if last desc is self-linked. 56203c4805SLuis R. Rodriguez */ 57203c4805SLuis R. Rodriguez static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 58203c4805SLuis R. Rodriguez { 59203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 60cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 61203c4805SLuis R. Rodriguez struct ath_desc *ds; 62203c4805SLuis R. Rodriguez struct sk_buff *skb; 63203c4805SLuis R. Rodriguez 64203c4805SLuis R. Rodriguez ATH_RXBUF_RESET(bf); 65203c4805SLuis R. Rodriguez 66203c4805SLuis R. Rodriguez ds = bf->bf_desc; 67203c4805SLuis R. Rodriguez ds->ds_link = 0; /* link to null */ 68203c4805SLuis R. Rodriguez ds->ds_data = bf->bf_buf_addr; 69203c4805SLuis R. Rodriguez 70203c4805SLuis R. Rodriguez /* virtual addr of the beginning of the buffer. */ 71203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 729680e8a3SLuis R. Rodriguez BUG_ON(skb == NULL); 73203c4805SLuis R. Rodriguez ds->ds_vdata = skb->data; 74203c4805SLuis R. Rodriguez 75cc861f74SLuis R. Rodriguez /* 76cc861f74SLuis R. Rodriguez * setup rx descriptors. The rx_bufsize here tells the hardware 77203c4805SLuis R. Rodriguez * how much data it can DMA to us and that we are prepared 78cc861f74SLuis R. Rodriguez * to process 79cc861f74SLuis R. Rodriguez */ 80203c4805SLuis R. Rodriguez ath9k_hw_setuprxdesc(ah, ds, 81cc861f74SLuis R. Rodriguez common->rx_bufsize, 82203c4805SLuis R. Rodriguez 0); 83203c4805SLuis R. Rodriguez 84203c4805SLuis R. Rodriguez if (sc->rx.rxlink == NULL) 85203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 86203c4805SLuis R. Rodriguez else 87203c4805SLuis R. Rodriguez *sc->rx.rxlink = bf->bf_daddr; 88203c4805SLuis R. Rodriguez 89203c4805SLuis R. Rodriguez sc->rx.rxlink = &ds->ds_link; 90203c4805SLuis R. Rodriguez ath9k_hw_rxena(ah); 91203c4805SLuis R. Rodriguez } 92203c4805SLuis R. Rodriguez 93203c4805SLuis R. Rodriguez static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 94203c4805SLuis R. Rodriguez { 95203c4805SLuis R. Rodriguez /* XXX block beacon interrupts */ 96203c4805SLuis R. Rodriguez ath9k_hw_setantenna(sc->sc_ah, antenna); 97203c4805SLuis R. Rodriguez sc->rx.defant = antenna; 98203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 99203c4805SLuis R. Rodriguez } 100203c4805SLuis R. Rodriguez 101203c4805SLuis R. Rodriguez static void ath_opmode_init(struct ath_softc *sc) 102203c4805SLuis R. Rodriguez { 103203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 1041510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 1051510718dSLuis R. Rodriguez 106203c4805SLuis R. Rodriguez u32 rfilt, mfilt[2]; 107203c4805SLuis R. Rodriguez 108203c4805SLuis R. Rodriguez /* configure rx filter */ 109203c4805SLuis R. Rodriguez rfilt = ath_calcrxfilter(sc); 110203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, rfilt); 111203c4805SLuis R. Rodriguez 112203c4805SLuis R. Rodriguez /* configure bssid mask */ 113203c4805SLuis R. Rodriguez if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 11413b81559SLuis R. Rodriguez ath_hw_setbssidmask(common); 115203c4805SLuis R. Rodriguez 116203c4805SLuis R. Rodriguez /* configure operational mode */ 117203c4805SLuis R. Rodriguez ath9k_hw_setopmode(ah); 118203c4805SLuis R. Rodriguez 119203c4805SLuis R. Rodriguez /* calculate and install multicast filter */ 120203c4805SLuis R. Rodriguez mfilt[0] = mfilt[1] = ~0; 121203c4805SLuis R. Rodriguez ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 122203c4805SLuis R. Rodriguez } 123203c4805SLuis R. Rodriguez 124b5c80475SFelix Fietkau static bool ath_rx_edma_buf_link(struct ath_softc *sc, 125b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 126b5c80475SFelix Fietkau { 127b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 128b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 129b5c80475SFelix Fietkau struct sk_buff *skb; 130b5c80475SFelix Fietkau struct ath_buf *bf; 131b5c80475SFelix Fietkau 132b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 133b5c80475SFelix Fietkau if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 134b5c80475SFelix Fietkau return false; 135b5c80475SFelix Fietkau 136b5c80475SFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 137b5c80475SFelix Fietkau list_del_init(&bf->list); 138b5c80475SFelix Fietkau 139b5c80475SFelix Fietkau skb = bf->bf_mpdu; 140b5c80475SFelix Fietkau 141b5c80475SFelix Fietkau ATH_RXBUF_RESET(bf); 142b5c80475SFelix Fietkau memset(skb->data, 0, ah->caps.rx_status_len); 143b5c80475SFelix Fietkau dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 144b5c80475SFelix Fietkau ah->caps.rx_status_len, DMA_TO_DEVICE); 145b5c80475SFelix Fietkau 146b5c80475SFelix Fietkau SKB_CB_ATHBUF(skb) = bf; 147b5c80475SFelix Fietkau ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 148b5c80475SFelix Fietkau skb_queue_tail(&rx_edma->rx_fifo, skb); 149b5c80475SFelix Fietkau 150b5c80475SFelix Fietkau return true; 151b5c80475SFelix Fietkau } 152b5c80475SFelix Fietkau 153b5c80475SFelix Fietkau static void ath_rx_addbuffer_edma(struct ath_softc *sc, 154b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype, int size) 155b5c80475SFelix Fietkau { 156b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 157b5c80475SFelix Fietkau u32 nbuf = 0; 158b5c80475SFelix Fietkau 159b5c80475SFelix Fietkau if (list_empty(&sc->rx.rxbuf)) { 160b5c80475SFelix Fietkau ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 161b5c80475SFelix Fietkau return; 162b5c80475SFelix Fietkau } 163b5c80475SFelix Fietkau 164b5c80475SFelix Fietkau while (!list_empty(&sc->rx.rxbuf)) { 165b5c80475SFelix Fietkau nbuf++; 166b5c80475SFelix Fietkau 167b5c80475SFelix Fietkau if (!ath_rx_edma_buf_link(sc, qtype)) 168b5c80475SFelix Fietkau break; 169b5c80475SFelix Fietkau 170b5c80475SFelix Fietkau if (nbuf >= size) 171b5c80475SFelix Fietkau break; 172b5c80475SFelix Fietkau } 173b5c80475SFelix Fietkau } 174b5c80475SFelix Fietkau 175b5c80475SFelix Fietkau static void ath_rx_remove_buffer(struct ath_softc *sc, 176b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 177b5c80475SFelix Fietkau { 178b5c80475SFelix Fietkau struct ath_buf *bf; 179b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 180b5c80475SFelix Fietkau struct sk_buff *skb; 181b5c80475SFelix Fietkau 182b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 183b5c80475SFelix Fietkau 184b5c80475SFelix Fietkau while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 185b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 186b5c80475SFelix Fietkau BUG_ON(!bf); 187b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 188b5c80475SFelix Fietkau } 189b5c80475SFelix Fietkau } 190b5c80475SFelix Fietkau 191b5c80475SFelix Fietkau static void ath_rx_edma_cleanup(struct ath_softc *sc) 192b5c80475SFelix Fietkau { 193b5c80475SFelix Fietkau struct ath_buf *bf; 194b5c80475SFelix Fietkau 195b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 196b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 197b5c80475SFelix Fietkau 198b5c80475SFelix Fietkau list_for_each_entry(bf, &sc->rx.rxbuf, list) { 199b5c80475SFelix Fietkau if (bf->bf_mpdu) 200b5c80475SFelix Fietkau dev_kfree_skb_any(bf->bf_mpdu); 201b5c80475SFelix Fietkau } 202b5c80475SFelix Fietkau 203b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 204b5c80475SFelix Fietkau 205b5c80475SFelix Fietkau kfree(sc->rx.rx_bufptr); 206b5c80475SFelix Fietkau sc->rx.rx_bufptr = NULL; 207b5c80475SFelix Fietkau } 208b5c80475SFelix Fietkau 209b5c80475SFelix Fietkau static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 210b5c80475SFelix Fietkau { 211b5c80475SFelix Fietkau skb_queue_head_init(&rx_edma->rx_fifo); 212b5c80475SFelix Fietkau skb_queue_head_init(&rx_edma->rx_buffers); 213b5c80475SFelix Fietkau rx_edma->rx_fifo_hwsize = size; 214b5c80475SFelix Fietkau } 215b5c80475SFelix Fietkau 216b5c80475SFelix Fietkau static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 217b5c80475SFelix Fietkau { 218b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 219b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 220b5c80475SFelix Fietkau struct sk_buff *skb; 221b5c80475SFelix Fietkau struct ath_buf *bf; 222b5c80475SFelix Fietkau int error = 0, i; 223b5c80475SFelix Fietkau u32 size; 224b5c80475SFelix Fietkau 225b5c80475SFelix Fietkau 226b5c80475SFelix Fietkau common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + 227b5c80475SFelix Fietkau ah->caps.rx_status_len, 228b5c80475SFelix Fietkau min(common->cachelsz, (u16)64)); 229b5c80475SFelix Fietkau 230b5c80475SFelix Fietkau ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 231b5c80475SFelix Fietkau ah->caps.rx_status_len); 232b5c80475SFelix Fietkau 233b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 234b5c80475SFelix Fietkau ah->caps.rx_lp_qdepth); 235b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 236b5c80475SFelix Fietkau ah->caps.rx_hp_qdepth); 237b5c80475SFelix Fietkau 238b5c80475SFelix Fietkau size = sizeof(struct ath_buf) * nbufs; 239b5c80475SFelix Fietkau bf = kzalloc(size, GFP_KERNEL); 240b5c80475SFelix Fietkau if (!bf) 241b5c80475SFelix Fietkau return -ENOMEM; 242b5c80475SFelix Fietkau 243b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 244b5c80475SFelix Fietkau sc->rx.rx_bufptr = bf; 245b5c80475SFelix Fietkau 246b5c80475SFelix Fietkau for (i = 0; i < nbufs; i++, bf++) { 247b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 248b5c80475SFelix Fietkau if (!skb) { 249b5c80475SFelix Fietkau error = -ENOMEM; 250b5c80475SFelix Fietkau goto rx_init_fail; 251b5c80475SFelix Fietkau } 252b5c80475SFelix Fietkau 253b5c80475SFelix Fietkau memset(skb->data, 0, common->rx_bufsize); 254b5c80475SFelix Fietkau bf->bf_mpdu = skb; 255b5c80475SFelix Fietkau 256b5c80475SFelix Fietkau bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 257b5c80475SFelix Fietkau common->rx_bufsize, 258b5c80475SFelix Fietkau DMA_BIDIRECTIONAL); 259b5c80475SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, 260b5c80475SFelix Fietkau bf->bf_buf_addr))) { 261b5c80475SFelix Fietkau dev_kfree_skb_any(skb); 262b5c80475SFelix Fietkau bf->bf_mpdu = NULL; 263b5c80475SFelix Fietkau ath_print(common, ATH_DBG_FATAL, 264b5c80475SFelix Fietkau "dma_mapping_error() on RX init\n"); 265b5c80475SFelix Fietkau error = -ENOMEM; 266b5c80475SFelix Fietkau goto rx_init_fail; 267b5c80475SFelix Fietkau } 268b5c80475SFelix Fietkau 269b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 270b5c80475SFelix Fietkau } 271b5c80475SFelix Fietkau 272b5c80475SFelix Fietkau return 0; 273b5c80475SFelix Fietkau 274b5c80475SFelix Fietkau rx_init_fail: 275b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 276b5c80475SFelix Fietkau return error; 277b5c80475SFelix Fietkau } 278b5c80475SFelix Fietkau 279b5c80475SFelix Fietkau static void ath_edma_start_recv(struct ath_softc *sc) 280b5c80475SFelix Fietkau { 281b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 282b5c80475SFelix Fietkau 283b5c80475SFelix Fietkau ath9k_hw_rxena(sc->sc_ah); 284b5c80475SFelix Fietkau 285b5c80475SFelix Fietkau ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 286b5c80475SFelix Fietkau sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 287b5c80475SFelix Fietkau 288b5c80475SFelix Fietkau ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 289b5c80475SFelix Fietkau sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 290b5c80475SFelix Fietkau 291b5c80475SFelix Fietkau spin_unlock_bh(&sc->rx.rxbuflock); 292b5c80475SFelix Fietkau 293b5c80475SFelix Fietkau ath_opmode_init(sc); 294b5c80475SFelix Fietkau 29540346b66SLuis R. Rodriguez ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING)); 296b5c80475SFelix Fietkau } 297b5c80475SFelix Fietkau 298b5c80475SFelix Fietkau static void ath_edma_stop_recv(struct ath_softc *sc) 299b5c80475SFelix Fietkau { 300b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 301b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 302b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 303b5c80475SFelix Fietkau spin_unlock_bh(&sc->rx.rxbuflock); 304b5c80475SFelix Fietkau } 305b5c80475SFelix Fietkau 306203c4805SLuis R. Rodriguez int ath_rx_init(struct ath_softc *sc, int nbufs) 307203c4805SLuis R. Rodriguez { 30827c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 309203c4805SLuis R. Rodriguez struct sk_buff *skb; 310203c4805SLuis R. Rodriguez struct ath_buf *bf; 311203c4805SLuis R. Rodriguez int error = 0; 312203c4805SLuis R. Rodriguez 313203c4805SLuis R. Rodriguez spin_lock_init(&sc->rx.rxflushlock); 314203c4805SLuis R. Rodriguez sc->sc_flags &= ~SC_OP_RXFLUSH; 315203c4805SLuis R. Rodriguez spin_lock_init(&sc->rx.rxbuflock); 316203c4805SLuis R. Rodriguez 317b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 318b5c80475SFelix Fietkau return ath_rx_edma_init(sc, nbufs); 319b5c80475SFelix Fietkau } else { 320cc861f74SLuis R. Rodriguez common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 32127c51f1aSLuis R. Rodriguez min(common->cachelsz, (u16)64)); 322203c4805SLuis R. Rodriguez 323c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 324cc861f74SLuis R. Rodriguez common->cachelsz, common->rx_bufsize); 325203c4805SLuis R. Rodriguez 326203c4805SLuis R. Rodriguez /* Initialize rx descriptors */ 327203c4805SLuis R. Rodriguez 328203c4805SLuis R. Rodriguez error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 3294adfcdedSVasanthakumar Thiagarajan "rx", nbufs, 1, 0); 330203c4805SLuis R. Rodriguez if (error != 0) { 331c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_FATAL, 332b5c80475SFelix Fietkau "failed to allocate rx descriptors: %d\n", 333b5c80475SFelix Fietkau error); 334203c4805SLuis R. Rodriguez goto err; 335203c4805SLuis R. Rodriguez } 336203c4805SLuis R. Rodriguez 337203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 338b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, 339b5c80475SFelix Fietkau GFP_KERNEL); 340203c4805SLuis R. Rodriguez if (skb == NULL) { 341203c4805SLuis R. Rodriguez error = -ENOMEM; 342203c4805SLuis R. Rodriguez goto err; 343203c4805SLuis R. Rodriguez } 344203c4805SLuis R. Rodriguez 345203c4805SLuis R. Rodriguez bf->bf_mpdu = skb; 346203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 347cc861f74SLuis R. Rodriguez common->rx_bufsize, 348203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 349203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 350203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 351203c4805SLuis R. Rodriguez dev_kfree_skb_any(skb); 352203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 353c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_FATAL, 354203c4805SLuis R. Rodriguez "dma_mapping_error() on RX init\n"); 355203c4805SLuis R. Rodriguez error = -ENOMEM; 356203c4805SLuis R. Rodriguez goto err; 357203c4805SLuis R. Rodriguez } 358203c4805SLuis R. Rodriguez bf->bf_dmacontext = bf->bf_buf_addr; 359203c4805SLuis R. Rodriguez } 360203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 361b5c80475SFelix Fietkau } 362203c4805SLuis R. Rodriguez 363203c4805SLuis R. Rodriguez err: 364203c4805SLuis R. Rodriguez if (error) 365203c4805SLuis R. Rodriguez ath_rx_cleanup(sc); 366203c4805SLuis R. Rodriguez 367203c4805SLuis R. Rodriguez return error; 368203c4805SLuis R. Rodriguez } 369203c4805SLuis R. Rodriguez 370203c4805SLuis R. Rodriguez void ath_rx_cleanup(struct ath_softc *sc) 371203c4805SLuis R. Rodriguez { 372cc861f74SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 373cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 374203c4805SLuis R. Rodriguez struct sk_buff *skb; 375203c4805SLuis R. Rodriguez struct ath_buf *bf; 376203c4805SLuis R. Rodriguez 377b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 378b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 379b5c80475SFelix Fietkau return; 380b5c80475SFelix Fietkau } else { 381203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 382203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 383203c4805SLuis R. Rodriguez if (skb) { 384203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 385b5c80475SFelix Fietkau common->rx_bufsize, 386b5c80475SFelix Fietkau DMA_FROM_DEVICE); 387203c4805SLuis R. Rodriguez dev_kfree_skb(skb); 388203c4805SLuis R. Rodriguez } 389203c4805SLuis R. Rodriguez } 390203c4805SLuis R. Rodriguez 391203c4805SLuis R. Rodriguez if (sc->rx.rxdma.dd_desc_len != 0) 392203c4805SLuis R. Rodriguez ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 393203c4805SLuis R. Rodriguez } 394b5c80475SFelix Fietkau } 395203c4805SLuis R. Rodriguez 396203c4805SLuis R. Rodriguez /* 397203c4805SLuis R. Rodriguez * Calculate the receive filter according to the 398203c4805SLuis R. Rodriguez * operating mode and state: 399203c4805SLuis R. Rodriguez * 400203c4805SLuis R. Rodriguez * o always accept unicast, broadcast, and multicast traffic 401203c4805SLuis R. Rodriguez * o maintain current state of phy error reception (the hal 402203c4805SLuis R. Rodriguez * may enable phy error frames for noise immunity work) 403203c4805SLuis R. Rodriguez * o probe request frames are accepted only when operating in 404203c4805SLuis R. Rodriguez * hostap, adhoc, or monitor modes 405203c4805SLuis R. Rodriguez * o enable promiscuous mode according to the interface state 406203c4805SLuis R. Rodriguez * o accept beacons: 407203c4805SLuis R. Rodriguez * - when operating in adhoc mode so the 802.11 layer creates 408203c4805SLuis R. Rodriguez * node table entries for peers, 409203c4805SLuis R. Rodriguez * - when operating in station mode for collecting rssi data when 410203c4805SLuis R. Rodriguez * the station is otherwise quiet, or 411203c4805SLuis R. Rodriguez * - when operating as a repeater so we see repeater-sta beacons 412203c4805SLuis R. Rodriguez * - when scanning 413203c4805SLuis R. Rodriguez */ 414203c4805SLuis R. Rodriguez 415203c4805SLuis R. Rodriguez u32 ath_calcrxfilter(struct ath_softc *sc) 416203c4805SLuis R. Rodriguez { 417203c4805SLuis R. Rodriguez #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 418203c4805SLuis R. Rodriguez 419203c4805SLuis R. Rodriguez u32 rfilt; 420203c4805SLuis R. Rodriguez 421203c4805SLuis R. Rodriguez rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 422203c4805SLuis R. Rodriguez | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 423203c4805SLuis R. Rodriguez | ATH9K_RX_FILTER_MCAST; 424203c4805SLuis R. Rodriguez 425203c4805SLuis R. Rodriguez /* If not a STA, enable processing of Probe Requests */ 426203c4805SLuis R. Rodriguez if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 427203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROBEREQ; 428203c4805SLuis R. Rodriguez 429203c4805SLuis R. Rodriguez /* 430203c4805SLuis R. Rodriguez * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 431203c4805SLuis R. Rodriguez * mode interface or when in monitor mode. AP mode does not need this 432203c4805SLuis R. Rodriguez * since it receives all in-BSS frames anyway. 433203c4805SLuis R. Rodriguez */ 434203c4805SLuis R. Rodriguez if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 435203c4805SLuis R. Rodriguez (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 436203c4805SLuis R. Rodriguez (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 437203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROM; 438203c4805SLuis R. Rodriguez 439203c4805SLuis R. Rodriguez if (sc->rx.rxfilter & FIF_CONTROL) 440203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_CONTROL; 441203c4805SLuis R. Rodriguez 442203c4805SLuis R. Rodriguez if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 443203c4805SLuis R. Rodriguez !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 444203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MYBEACON; 445203c4805SLuis R. Rodriguez else 446203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_BEACON; 447203c4805SLuis R. Rodriguez 44866afad01SSenthil Balasubramanian if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || 44966afad01SSenthil Balasubramanian AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && 45066afad01SSenthil Balasubramanian (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && 45166afad01SSenthil Balasubramanian (sc->rx.rxfilter & FIF_PSPOLL)) 452203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PSPOLL; 453203c4805SLuis R. Rodriguez 4547ea310beSSujith if (conf_is_ht(&sc->hw->conf)) 4557ea310beSSujith rfilt |= ATH9K_RX_FILTER_COMP_BAR; 4567ea310beSSujith 4575eb6ba83SJavier Cardona if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 458203c4805SLuis R. Rodriguez /* TODO: only needed if more than one BSSID is in use in 459203c4805SLuis R. Rodriguez * station/adhoc mode */ 4605eb6ba83SJavier Cardona /* The following may also be needed for other older chips */ 4615eb6ba83SJavier Cardona if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 4625eb6ba83SJavier Cardona rfilt |= ATH9K_RX_FILTER_PROM; 463203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 464203c4805SLuis R. Rodriguez } 465203c4805SLuis R. Rodriguez 466203c4805SLuis R. Rodriguez return rfilt; 467203c4805SLuis R. Rodriguez 468203c4805SLuis R. Rodriguez #undef RX_FILTER_PRESERVE 469203c4805SLuis R. Rodriguez } 470203c4805SLuis R. Rodriguez 471203c4805SLuis R. Rodriguez int ath_startrecv(struct ath_softc *sc) 472203c4805SLuis R. Rodriguez { 473203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 474203c4805SLuis R. Rodriguez struct ath_buf *bf, *tbf; 475203c4805SLuis R. Rodriguez 476b5c80475SFelix Fietkau if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 477b5c80475SFelix Fietkau ath_edma_start_recv(sc); 478b5c80475SFelix Fietkau return 0; 479b5c80475SFelix Fietkau } 480b5c80475SFelix Fietkau 481203c4805SLuis R. Rodriguez spin_lock_bh(&sc->rx.rxbuflock); 482203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 483203c4805SLuis R. Rodriguez goto start_recv; 484203c4805SLuis R. Rodriguez 485203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 486203c4805SLuis R. Rodriguez list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 487203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 488203c4805SLuis R. Rodriguez } 489203c4805SLuis R. Rodriguez 490203c4805SLuis R. Rodriguez /* We could have deleted elements so the list may be empty now */ 491203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 492203c4805SLuis R. Rodriguez goto start_recv; 493203c4805SLuis R. Rodriguez 494203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 495203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 496203c4805SLuis R. Rodriguez ath9k_hw_rxena(ah); 497203c4805SLuis R. Rodriguez 498203c4805SLuis R. Rodriguez start_recv: 499203c4805SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 500203c4805SLuis R. Rodriguez ath_opmode_init(sc); 50140346b66SLuis R. Rodriguez ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING)); 502203c4805SLuis R. Rodriguez 503203c4805SLuis R. Rodriguez return 0; 504203c4805SLuis R. Rodriguez } 505203c4805SLuis R. Rodriguez 506203c4805SLuis R. Rodriguez bool ath_stoprecv(struct ath_softc *sc) 507203c4805SLuis R. Rodriguez { 508203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 509203c4805SLuis R. Rodriguez bool stopped; 510203c4805SLuis R. Rodriguez 511203c4805SLuis R. Rodriguez ath9k_hw_stoppcurecv(ah); 512203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, 0); 513203c4805SLuis R. Rodriguez stopped = ath9k_hw_stopdmarecv(ah); 514b5c80475SFelix Fietkau 515b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 516b5c80475SFelix Fietkau ath_edma_stop_recv(sc); 517b5c80475SFelix Fietkau else 518203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 519203c4805SLuis R. Rodriguez 520203c4805SLuis R. Rodriguez return stopped; 521203c4805SLuis R. Rodriguez } 522203c4805SLuis R. Rodriguez 523203c4805SLuis R. Rodriguez void ath_flushrecv(struct ath_softc *sc) 524203c4805SLuis R. Rodriguez { 525203c4805SLuis R. Rodriguez spin_lock_bh(&sc->rx.rxflushlock); 526203c4805SLuis R. Rodriguez sc->sc_flags |= SC_OP_RXFLUSH; 527b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 528b5c80475SFelix Fietkau ath_rx_tasklet(sc, 1, true); 529b5c80475SFelix Fietkau ath_rx_tasklet(sc, 1, false); 530203c4805SLuis R. Rodriguez sc->sc_flags &= ~SC_OP_RXFLUSH; 531203c4805SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxflushlock); 532203c4805SLuis R. Rodriguez } 533203c4805SLuis R. Rodriguez 534cc65965cSJouni Malinen static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 535cc65965cSJouni Malinen { 536cc65965cSJouni Malinen /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 537cc65965cSJouni Malinen struct ieee80211_mgmt *mgmt; 538cc65965cSJouni Malinen u8 *pos, *end, id, elen; 539cc65965cSJouni Malinen struct ieee80211_tim_ie *tim; 540cc65965cSJouni Malinen 541cc65965cSJouni Malinen mgmt = (struct ieee80211_mgmt *)skb->data; 542cc65965cSJouni Malinen pos = mgmt->u.beacon.variable; 543cc65965cSJouni Malinen end = skb->data + skb->len; 544cc65965cSJouni Malinen 545cc65965cSJouni Malinen while (pos + 2 < end) { 546cc65965cSJouni Malinen id = *pos++; 547cc65965cSJouni Malinen elen = *pos++; 548cc65965cSJouni Malinen if (pos + elen > end) 549cc65965cSJouni Malinen break; 550cc65965cSJouni Malinen 551cc65965cSJouni Malinen if (id == WLAN_EID_TIM) { 552cc65965cSJouni Malinen if (elen < sizeof(*tim)) 553cc65965cSJouni Malinen break; 554cc65965cSJouni Malinen tim = (struct ieee80211_tim_ie *) pos; 555cc65965cSJouni Malinen if (tim->dtim_count != 0) 556cc65965cSJouni Malinen break; 557cc65965cSJouni Malinen return tim->bitmap_ctrl & 0x01; 558cc65965cSJouni Malinen } 559cc65965cSJouni Malinen 560cc65965cSJouni Malinen pos += elen; 561cc65965cSJouni Malinen } 562cc65965cSJouni Malinen 563cc65965cSJouni Malinen return false; 564cc65965cSJouni Malinen } 565cc65965cSJouni Malinen 566cc65965cSJouni Malinen static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 567cc65965cSJouni Malinen { 568cc65965cSJouni Malinen struct ieee80211_mgmt *mgmt; 5691510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 570cc65965cSJouni Malinen 571cc65965cSJouni Malinen if (skb->len < 24 + 8 + 2 + 2) 572cc65965cSJouni Malinen return; 573cc65965cSJouni Malinen 574cc65965cSJouni Malinen mgmt = (struct ieee80211_mgmt *)skb->data; 5751510718dSLuis R. Rodriguez if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 576cc65965cSJouni Malinen return; /* not from our current AP */ 577cc65965cSJouni Malinen 5781b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 579293dc5dfSGabor Juhos 5801b04b930SSujith if (sc->ps_flags & PS_BEACON_SYNC) { 5811b04b930SSujith sc->ps_flags &= ~PS_BEACON_SYNC; 582c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_PS, 583c46917bbSLuis R. Rodriguez "Reconfigure Beacon timers based on " 584ccdfeab6SJouni Malinen "timestamp from the AP\n"); 585ccdfeab6SJouni Malinen ath_beacon_config(sc, NULL); 586ccdfeab6SJouni Malinen } 587ccdfeab6SJouni Malinen 588cc65965cSJouni Malinen if (ath_beacon_dtim_pending_cab(skb)) { 589cc65965cSJouni Malinen /* 590cc65965cSJouni Malinen * Remain awake waiting for buffered broadcast/multicast 59158f5fffdSGabor Juhos * frames. If the last broadcast/multicast frame is not 59258f5fffdSGabor Juhos * received properly, the next beacon frame will work as 59358f5fffdSGabor Juhos * a backup trigger for returning into NETWORK SLEEP state, 59458f5fffdSGabor Juhos * so we are waiting for it as well. 595cc65965cSJouni Malinen */ 596c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 597cc65965cSJouni Malinen "buffered broadcast/multicast frame(s)\n"); 5981b04b930SSujith sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 599cc65965cSJouni Malinen return; 600cc65965cSJouni Malinen } 601cc65965cSJouni Malinen 6021b04b930SSujith if (sc->ps_flags & PS_WAIT_FOR_CAB) { 603cc65965cSJouni Malinen /* 604cc65965cSJouni Malinen * This can happen if a broadcast frame is dropped or the AP 605cc65965cSJouni Malinen * fails to send a frame indicating that all CAB frames have 606cc65965cSJouni Malinen * been delivered. 607cc65965cSJouni Malinen */ 6081b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_CAB; 609c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_PS, 610c46917bbSLuis R. Rodriguez "PS wait for CAB frames timed out\n"); 611cc65965cSJouni Malinen } 612cc65965cSJouni Malinen } 613cc65965cSJouni Malinen 614cc65965cSJouni Malinen static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 615cc65965cSJouni Malinen { 616cc65965cSJouni Malinen struct ieee80211_hdr *hdr; 617c46917bbSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 618cc65965cSJouni Malinen 619cc65965cSJouni Malinen hdr = (struct ieee80211_hdr *)skb->data; 620cc65965cSJouni Malinen 621cc65965cSJouni Malinen /* Process Beacon and CAB receive in PS state */ 622ededf1f8SVasanthakumar Thiagarajan if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 623ededf1f8SVasanthakumar Thiagarajan && ieee80211_is_beacon(hdr->frame_control)) 624cc65965cSJouni Malinen ath_rx_ps_beacon(sc, skb); 6251b04b930SSujith else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 626cc65965cSJouni Malinen (ieee80211_is_data(hdr->frame_control) || 627cc65965cSJouni Malinen ieee80211_is_action(hdr->frame_control)) && 628cc65965cSJouni Malinen is_multicast_ether_addr(hdr->addr1) && 629cc65965cSJouni Malinen !ieee80211_has_moredata(hdr->frame_control)) { 630cc65965cSJouni Malinen /* 631cc65965cSJouni Malinen * No more broadcast/multicast frames to be received at this 632cc65965cSJouni Malinen * point. 633cc65965cSJouni Malinen */ 6341b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_CAB; 635c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_PS, 636c46917bbSLuis R. Rodriguez "All PS CAB frames received, back to sleep\n"); 6371b04b930SSujith } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 6389a23f9caSJouni Malinen !is_multicast_ether_addr(hdr->addr1) && 6399a23f9caSJouni Malinen !ieee80211_has_morefrags(hdr->frame_control)) { 6401b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 641c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_PS, 642c46917bbSLuis R. Rodriguez "Going back to sleep after having received " 643f643e51dSPavel Roskin "PS-Poll data (0x%lx)\n", 6441b04b930SSujith sc->ps_flags & (PS_WAIT_FOR_BEACON | 6451b04b930SSujith PS_WAIT_FOR_CAB | 6461b04b930SSujith PS_WAIT_FOR_PSPOLL_DATA | 6471b04b930SSujith PS_WAIT_FOR_TX_ACK)); 648cc65965cSJouni Malinen } 649cc65965cSJouni Malinen } 650cc65965cSJouni Malinen 651b4afffc0SLuis R. Rodriguez static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, 652b4afffc0SLuis R. Rodriguez struct ath_softc *sc, struct sk_buff *skb, 6535ca42627SLuis R. Rodriguez struct ieee80211_rx_status *rxs) 6549d64a3cfSJouni Malinen { 6559d64a3cfSJouni Malinen struct ieee80211_hdr *hdr; 6569d64a3cfSJouni Malinen 6579d64a3cfSJouni Malinen hdr = (struct ieee80211_hdr *)skb->data; 6589d64a3cfSJouni Malinen 6599d64a3cfSJouni Malinen /* Send the frame to mac80211 */ 6609d64a3cfSJouni Malinen if (is_multicast_ether_addr(hdr->addr1)) { 6619d64a3cfSJouni Malinen int i; 6629d64a3cfSJouni Malinen /* 6639d64a3cfSJouni Malinen * Deliver broadcast/multicast frames to all suitable 6649d64a3cfSJouni Malinen * virtual wiphys. 6659d64a3cfSJouni Malinen */ 6669d64a3cfSJouni Malinen /* TODO: filter based on channel configuration */ 6679d64a3cfSJouni Malinen for (i = 0; i < sc->num_sec_wiphy; i++) { 6689d64a3cfSJouni Malinen struct ath_wiphy *aphy = sc->sec_wiphy[i]; 6699d64a3cfSJouni Malinen struct sk_buff *nskb; 6709d64a3cfSJouni Malinen if (aphy == NULL) 6719d64a3cfSJouni Malinen continue; 6729d64a3cfSJouni Malinen nskb = skb_copy(skb, GFP_ATOMIC); 6735ca42627SLuis R. Rodriguez if (!nskb) 6745ca42627SLuis R. Rodriguez continue; 675f1d58c25SJohannes Berg ieee80211_rx(aphy->hw, nskb); 6769d64a3cfSJouni Malinen } 677f1d58c25SJohannes Berg ieee80211_rx(sc->hw, skb); 6785ca42627SLuis R. Rodriguez } else 6799d64a3cfSJouni Malinen /* Deliver unicast frames based on receiver address */ 680b4afffc0SLuis R. Rodriguez ieee80211_rx(hw, skb); 6819d64a3cfSJouni Malinen } 6829d64a3cfSJouni Malinen 683b5c80475SFelix Fietkau static bool ath_edma_get_buffers(struct ath_softc *sc, 684b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 685203c4805SLuis R. Rodriguez { 686b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 687203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 68827c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 689b5c80475SFelix Fietkau struct sk_buff *skb; 690b5c80475SFelix Fietkau struct ath_buf *bf; 691b5c80475SFelix Fietkau int ret; 692203c4805SLuis R. Rodriguez 693b5c80475SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 694b5c80475SFelix Fietkau if (!skb) 695b5c80475SFelix Fietkau return false; 696203c4805SLuis R. Rodriguez 697b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 698b5c80475SFelix Fietkau BUG_ON(!bf); 699b5c80475SFelix Fietkau 700ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 701b5c80475SFelix Fietkau common->rx_bufsize, DMA_FROM_DEVICE); 702b5c80475SFelix Fietkau 703b5c80475SFelix Fietkau ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 704ce9426d1SMing Lei if (ret == -EINPROGRESS) { 705ce9426d1SMing Lei /*let device gain the buffer again*/ 706ce9426d1SMing Lei dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 707ce9426d1SMing Lei common->rx_bufsize, DMA_FROM_DEVICE); 708b5c80475SFelix Fietkau return false; 709ce9426d1SMing Lei } 710b5c80475SFelix Fietkau 711b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 712b5c80475SFelix Fietkau if (ret == -EINVAL) { 713b5c80475SFelix Fietkau /* corrupt descriptor, skip this one and the following one */ 714b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 715b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 716b5c80475SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 717b5c80475SFelix Fietkau if (!skb) 718b5c80475SFelix Fietkau return true; 719b5c80475SFelix Fietkau 720b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 721b5c80475SFelix Fietkau BUG_ON(!bf); 722b5c80475SFelix Fietkau 723b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 724b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 725b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 726083e3e8dSVasanthakumar Thiagarajan return true; 727b5c80475SFelix Fietkau } 728b5c80475SFelix Fietkau skb_queue_tail(&rx_edma->rx_buffers, skb); 729b5c80475SFelix Fietkau 730b5c80475SFelix Fietkau return true; 731b5c80475SFelix Fietkau } 732b5c80475SFelix Fietkau 733b5c80475SFelix Fietkau static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 734b5c80475SFelix Fietkau struct ath_rx_status *rs, 735b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 736b5c80475SFelix Fietkau { 737b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 738b5c80475SFelix Fietkau struct sk_buff *skb; 739b5c80475SFelix Fietkau struct ath_buf *bf; 740b5c80475SFelix Fietkau 741b5c80475SFelix Fietkau while (ath_edma_get_buffers(sc, qtype)); 742b5c80475SFelix Fietkau skb = __skb_dequeue(&rx_edma->rx_buffers); 743b5c80475SFelix Fietkau if (!skb) 744b5c80475SFelix Fietkau return NULL; 745b5c80475SFelix Fietkau 746b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 747b5c80475SFelix Fietkau ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 748b5c80475SFelix Fietkau return bf; 749b5c80475SFelix Fietkau } 750b5c80475SFelix Fietkau 751b5c80475SFelix Fietkau static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 752b5c80475SFelix Fietkau struct ath_rx_status *rs) 753b5c80475SFelix Fietkau { 754b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 755b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 756b5c80475SFelix Fietkau struct ath_desc *ds; 757b5c80475SFelix Fietkau struct ath_buf *bf; 758b5c80475SFelix Fietkau int ret; 759203c4805SLuis R. Rodriguez 760203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) { 761203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 762b5c80475SFelix Fietkau return NULL; 763203c4805SLuis R. Rodriguez } 764203c4805SLuis R. Rodriguez 765203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 766203c4805SLuis R. Rodriguez ds = bf->bf_desc; 767203c4805SLuis R. Rodriguez 768203c4805SLuis R. Rodriguez /* 769203c4805SLuis R. Rodriguez * Must provide the virtual address of the current 770203c4805SLuis R. Rodriguez * descriptor, the physical address, and the virtual 771203c4805SLuis R. Rodriguez * address of the next descriptor in the h/w chain. 772203c4805SLuis R. Rodriguez * This allows the HAL to look ahead to see if the 773203c4805SLuis R. Rodriguez * hardware is done with a descriptor by checking the 774203c4805SLuis R. Rodriguez * done bit in the following descriptor and the address 775203c4805SLuis R. Rodriguez * of the current descriptor the DMA engine is working 776203c4805SLuis R. Rodriguez * on. All this is necessary because of our use of 777203c4805SLuis R. Rodriguez * a self-linked list to avoid rx overruns. 778203c4805SLuis R. Rodriguez */ 779b5c80475SFelix Fietkau ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 780b5c80475SFelix Fietkau if (ret == -EINPROGRESS) { 78129bffa96SFelix Fietkau struct ath_rx_status trs; 782203c4805SLuis R. Rodriguez struct ath_buf *tbf; 783203c4805SLuis R. Rodriguez struct ath_desc *tds; 784203c4805SLuis R. Rodriguez 78529bffa96SFelix Fietkau memset(&trs, 0, sizeof(trs)); 786203c4805SLuis R. Rodriguez if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 787203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 788b5c80475SFelix Fietkau return NULL; 789203c4805SLuis R. Rodriguez } 790203c4805SLuis R. Rodriguez 791203c4805SLuis R. Rodriguez tbf = list_entry(bf->list.next, struct ath_buf, list); 792203c4805SLuis R. Rodriguez 793203c4805SLuis R. Rodriguez /* 794203c4805SLuis R. Rodriguez * On some hardware the descriptor status words could 795203c4805SLuis R. Rodriguez * get corrupted, including the done bit. Because of 796203c4805SLuis R. Rodriguez * this, check if the next descriptor's done bit is 797203c4805SLuis R. Rodriguez * set or not. 798203c4805SLuis R. Rodriguez * 799203c4805SLuis R. Rodriguez * If the next descriptor's done bit is set, the current 800203c4805SLuis R. Rodriguez * descriptor has been corrupted. Force s/w to discard 801203c4805SLuis R. Rodriguez * this descriptor and continue... 802203c4805SLuis R. Rodriguez */ 803203c4805SLuis R. Rodriguez 804203c4805SLuis R. Rodriguez tds = tbf->bf_desc; 805b5c80475SFelix Fietkau ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 806b5c80475SFelix Fietkau if (ret == -EINPROGRESS) 807b5c80475SFelix Fietkau return NULL; 808203c4805SLuis R. Rodriguez } 809203c4805SLuis R. Rodriguez 810b5c80475SFelix Fietkau if (!bf->bf_mpdu) 811b5c80475SFelix Fietkau return bf; 812203c4805SLuis R. Rodriguez 813203c4805SLuis R. Rodriguez /* 814203c4805SLuis R. Rodriguez * Synchronize the DMA transfer with CPU before 815203c4805SLuis R. Rodriguez * 1. accessing the frame 816203c4805SLuis R. Rodriguez * 2. requeueing the same buffer to h/w 817203c4805SLuis R. Rodriguez */ 818ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 819cc861f74SLuis R. Rodriguez common->rx_bufsize, 820203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 821203c4805SLuis R. Rodriguez 822b5c80475SFelix Fietkau return bf; 823b5c80475SFelix Fietkau } 824b5c80475SFelix Fietkau 825d435700fSSujith /* Assumes you've already done the endian to CPU conversion */ 826d435700fSSujith static bool ath9k_rx_accept(struct ath_common *common, 8279f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 828d435700fSSujith struct ieee80211_rx_status *rxs, 829d435700fSSujith struct ath_rx_status *rx_stats, 830d435700fSSujith bool *decrypt_error) 831d435700fSSujith { 832d435700fSSujith struct ath_hw *ah = common->ah; 833d435700fSSujith __le16 fc; 834b7b1b512SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 835d435700fSSujith 836d435700fSSujith fc = hdr->frame_control; 837d435700fSSujith 838d435700fSSujith if (!rx_stats->rs_datalen) 839d435700fSSujith return false; 840d435700fSSujith /* 841d435700fSSujith * rs_status follows rs_datalen so if rs_datalen is too large 842d435700fSSujith * we can take a hint that hardware corrupted it, so ignore 843d435700fSSujith * those frames. 844d435700fSSujith */ 845b7b1b512SVasanthakumar Thiagarajan if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 846d435700fSSujith return false; 847d435700fSSujith 848d435700fSSujith /* 849d435700fSSujith * rs_more indicates chained descriptors which can be used 850d435700fSSujith * to link buffers together for a sort of scatter-gather 851d435700fSSujith * operation. 852d435700fSSujith * reject the frame, we don't support scatter-gather yet and 853d435700fSSujith * the frame is probably corrupt anyway 854d435700fSSujith */ 855d435700fSSujith if (rx_stats->rs_more) 856d435700fSSujith return false; 857d435700fSSujith 858d435700fSSujith /* 859d435700fSSujith * The rx_stats->rs_status will not be set until the end of the 860d435700fSSujith * chained descriptors so it can be ignored if rs_more is set. The 861d435700fSSujith * rs_more will be false at the last element of the chained 862d435700fSSujith * descriptors. 863d435700fSSujith */ 864d435700fSSujith if (rx_stats->rs_status != 0) { 865d435700fSSujith if (rx_stats->rs_status & ATH9K_RXERR_CRC) 866d435700fSSujith rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 867d435700fSSujith if (rx_stats->rs_status & ATH9K_RXERR_PHY) 868d435700fSSujith return false; 869d435700fSSujith 870d435700fSSujith if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 871d435700fSSujith *decrypt_error = true; 872d435700fSSujith } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 873d435700fSSujith /* 874*56363ddeSFelix Fietkau * The MIC error bit is only valid if the frame 875*56363ddeSFelix Fietkau * is not a control frame or fragment, and it was 876*56363ddeSFelix Fietkau * decrypted using a valid TKIP key. 877d435700fSSujith */ 878*56363ddeSFelix Fietkau if (!ieee80211_is_ctl(fc) && 879*56363ddeSFelix Fietkau !ieee80211_has_morefrags(fc) && 880*56363ddeSFelix Fietkau !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 881*56363ddeSFelix Fietkau test_bit(rx_stats->rs_keyix, common->tkip_keymap)) 882d435700fSSujith rxs->flag |= RX_FLAG_MMIC_ERROR; 883*56363ddeSFelix Fietkau else 884*56363ddeSFelix Fietkau rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 885d435700fSSujith } 886d435700fSSujith /* 887d435700fSSujith * Reject error frames with the exception of 888d435700fSSujith * decryption and MIC failures. For monitor mode, 889d435700fSSujith * we also ignore the CRC error. 890d435700fSSujith */ 891d435700fSSujith if (ah->opmode == NL80211_IFTYPE_MONITOR) { 892d435700fSSujith if (rx_stats->rs_status & 893d435700fSSujith ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 894d435700fSSujith ATH9K_RXERR_CRC)) 895d435700fSSujith return false; 896d435700fSSujith } else { 897d435700fSSujith if (rx_stats->rs_status & 898d435700fSSujith ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 899d435700fSSujith return false; 900d435700fSSujith } 901d435700fSSujith } 902d435700fSSujith } 903d435700fSSujith return true; 904d435700fSSujith } 905d435700fSSujith 906d435700fSSujith static int ath9k_process_rate(struct ath_common *common, 907d435700fSSujith struct ieee80211_hw *hw, 908d435700fSSujith struct ath_rx_status *rx_stats, 9099f167f64SVasanthakumar Thiagarajan struct ieee80211_rx_status *rxs) 910d435700fSSujith { 911d435700fSSujith struct ieee80211_supported_band *sband; 912d435700fSSujith enum ieee80211_band band; 913d435700fSSujith unsigned int i = 0; 914d435700fSSujith 915d435700fSSujith band = hw->conf.channel->band; 916d435700fSSujith sband = hw->wiphy->bands[band]; 917d435700fSSujith 918d435700fSSujith if (rx_stats->rs_rate & 0x80) { 919d435700fSSujith /* HT rate */ 920d435700fSSujith rxs->flag |= RX_FLAG_HT; 921d435700fSSujith if (rx_stats->rs_flags & ATH9K_RX_2040) 922d435700fSSujith rxs->flag |= RX_FLAG_40MHZ; 923d435700fSSujith if (rx_stats->rs_flags & ATH9K_RX_GI) 924d435700fSSujith rxs->flag |= RX_FLAG_SHORT_GI; 925d435700fSSujith rxs->rate_idx = rx_stats->rs_rate & 0x7f; 926d435700fSSujith return 0; 927d435700fSSujith } 928d435700fSSujith 929d435700fSSujith for (i = 0; i < sband->n_bitrates; i++) { 930d435700fSSujith if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 931d435700fSSujith rxs->rate_idx = i; 932d435700fSSujith return 0; 933d435700fSSujith } 934d435700fSSujith if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 935d435700fSSujith rxs->flag |= RX_FLAG_SHORTPRE; 936d435700fSSujith rxs->rate_idx = i; 937d435700fSSujith return 0; 938d435700fSSujith } 939d435700fSSujith } 940d435700fSSujith 941d435700fSSujith /* 942d435700fSSujith * No valid hardware bitrate found -- we should not get here 943d435700fSSujith * because hardware has already validated this frame as OK. 944d435700fSSujith */ 945d435700fSSujith ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 946d435700fSSujith "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 947d435700fSSujith 948d435700fSSujith return -EINVAL; 949d435700fSSujith } 950d435700fSSujith 951d435700fSSujith static void ath9k_process_rssi(struct ath_common *common, 952d435700fSSujith struct ieee80211_hw *hw, 9539f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 954d435700fSSujith struct ath_rx_status *rx_stats) 955d435700fSSujith { 956d435700fSSujith struct ath_hw *ah = common->ah; 957d435700fSSujith struct ieee80211_sta *sta; 958d435700fSSujith struct ath_node *an; 959d435700fSSujith int last_rssi = ATH_RSSI_DUMMY_MARKER; 960d435700fSSujith __le16 fc; 961d435700fSSujith 962d435700fSSujith fc = hdr->frame_control; 963d435700fSSujith 964d435700fSSujith rcu_read_lock(); 965d435700fSSujith /* 966d435700fSSujith * XXX: use ieee80211_find_sta! This requires quite a bit of work 967d435700fSSujith * under the current ath9k virtual wiphy implementation as we have 968d435700fSSujith * no way of tying a vif to wiphy. Typically vifs are attached to 969d435700fSSujith * at least one sdata of a wiphy on mac80211 but with ath9k virtual 970d435700fSSujith * wiphy you'd have to iterate over every wiphy and each sdata. 971d435700fSSujith */ 972d435700fSSujith sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); 973d435700fSSujith if (sta) { 974d435700fSSujith an = (struct ath_node *) sta->drv_priv; 975d435700fSSujith if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && 976d435700fSSujith !rx_stats->rs_moreaggr) 977d435700fSSujith ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); 978d435700fSSujith last_rssi = an->last_rssi; 979d435700fSSujith } 980d435700fSSujith rcu_read_unlock(); 981d435700fSSujith 982d435700fSSujith if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 983d435700fSSujith rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 984d435700fSSujith ATH_RSSI_EP_MULTIPLIER); 985d435700fSSujith if (rx_stats->rs_rssi < 0) 986d435700fSSujith rx_stats->rs_rssi = 0; 987d435700fSSujith 988d435700fSSujith /* Update Beacon RSSI, this is used by ANI. */ 989d435700fSSujith if (ieee80211_is_beacon(fc)) 990d435700fSSujith ah->stats.avgbrssi = rx_stats->rs_rssi; 991d435700fSSujith } 992d435700fSSujith 993d435700fSSujith /* 994d435700fSSujith * For Decrypt or Demic errors, we only mark packet status here and always push 995d435700fSSujith * up the frame up to let mac80211 handle the actual error case, be it no 996d435700fSSujith * decryption key or real decryption error. This let us keep statistics there. 997d435700fSSujith */ 998d435700fSSujith static int ath9k_rx_skb_preprocess(struct ath_common *common, 999d435700fSSujith struct ieee80211_hw *hw, 10009f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 1001d435700fSSujith struct ath_rx_status *rx_stats, 1002d435700fSSujith struct ieee80211_rx_status *rx_status, 1003d435700fSSujith bool *decrypt_error) 1004d435700fSSujith { 1005d435700fSSujith memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1006d435700fSSujith 1007d435700fSSujith /* 1008d435700fSSujith * everything but the rate is checked here, the rate check is done 1009d435700fSSujith * separately to avoid doing two lookups for a rate for each frame. 1010d435700fSSujith */ 10119f167f64SVasanthakumar Thiagarajan if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1012d435700fSSujith return -EINVAL; 1013d435700fSSujith 10149f167f64SVasanthakumar Thiagarajan ath9k_process_rssi(common, hw, hdr, rx_stats); 1015d435700fSSujith 10169f167f64SVasanthakumar Thiagarajan if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1017d435700fSSujith return -EINVAL; 1018d435700fSSujith 1019d435700fSSujith rx_status->band = hw->conf.channel->band; 1020d435700fSSujith rx_status->freq = hw->conf.channel->center_freq; 1021d435700fSSujith rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 1022d435700fSSujith rx_status->antenna = rx_stats->rs_antenna; 1023d435700fSSujith rx_status->flag |= RX_FLAG_TSFT; 1024d435700fSSujith 1025d435700fSSujith return 0; 1026d435700fSSujith } 1027d435700fSSujith 1028d435700fSSujith static void ath9k_rx_skb_postprocess(struct ath_common *common, 1029d435700fSSujith struct sk_buff *skb, 1030d435700fSSujith struct ath_rx_status *rx_stats, 1031d435700fSSujith struct ieee80211_rx_status *rxs, 1032d435700fSSujith bool decrypt_error) 1033d435700fSSujith { 1034d435700fSSujith struct ath_hw *ah = common->ah; 1035d435700fSSujith struct ieee80211_hdr *hdr; 1036d435700fSSujith int hdrlen, padpos, padsize; 1037d435700fSSujith u8 keyix; 1038d435700fSSujith __le16 fc; 1039d435700fSSujith 1040d435700fSSujith /* see if any padding is done by the hw and remove it */ 1041d435700fSSujith hdr = (struct ieee80211_hdr *) skb->data; 1042d435700fSSujith hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1043d435700fSSujith fc = hdr->frame_control; 1044d435700fSSujith padpos = ath9k_cmn_padpos(hdr->frame_control); 1045d435700fSSujith 1046d435700fSSujith /* The MAC header is padded to have 32-bit boundary if the 1047d435700fSSujith * packet payload is non-zero. The general calculation for 1048d435700fSSujith * padsize would take into account odd header lengths: 1049d435700fSSujith * padsize = (4 - padpos % 4) % 4; However, since only 1050d435700fSSujith * even-length headers are used, padding can only be 0 or 2 1051d435700fSSujith * bytes and we can optimize this a bit. In addition, we must 1052d435700fSSujith * not try to remove padding from short control frames that do 1053d435700fSSujith * not have payload. */ 1054d435700fSSujith padsize = padpos & 3; 1055d435700fSSujith if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1056d435700fSSujith memmove(skb->data + padsize, skb->data, padpos); 1057d435700fSSujith skb_pull(skb, padsize); 1058d435700fSSujith } 1059d435700fSSujith 1060d435700fSSujith keyix = rx_stats->rs_keyix; 1061d435700fSSujith 1062d435700fSSujith if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1063d435700fSSujith ieee80211_has_protected(fc)) { 1064d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1065d435700fSSujith } else if (ieee80211_has_protected(fc) 1066d435700fSSujith && !decrypt_error && skb->len >= hdrlen + 4) { 1067d435700fSSujith keyix = skb->data[hdrlen + 3] >> 6; 1068d435700fSSujith 1069d435700fSSujith if (test_bit(keyix, common->keymap)) 1070d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1071d435700fSSujith } 1072d435700fSSujith if (ah->sw_mgmt_crypto && 1073d435700fSSujith (rxs->flag & RX_FLAG_DECRYPTED) && 1074d435700fSSujith ieee80211_is_mgmt(fc)) 1075d435700fSSujith /* Use software decrypt for management frames. */ 1076d435700fSSujith rxs->flag &= ~RX_FLAG_DECRYPTED; 1077d435700fSSujith } 1078b5c80475SFelix Fietkau 1079b5c80475SFelix Fietkau int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1080b5c80475SFelix Fietkau { 1081b5c80475SFelix Fietkau struct ath_buf *bf; 1082b5c80475SFelix Fietkau struct sk_buff *skb = NULL, *requeue_skb; 1083b5c80475SFelix Fietkau struct ieee80211_rx_status *rxs; 1084b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 1085b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 1086b5c80475SFelix Fietkau /* 1087b5c80475SFelix Fietkau * The hw can techncically differ from common->hw when using ath9k 1088b5c80475SFelix Fietkau * virtual wiphy so to account for that we iterate over the active 1089b5c80475SFelix Fietkau * wiphys and find the appropriate wiphy and therefore hw. 1090b5c80475SFelix Fietkau */ 1091b5c80475SFelix Fietkau struct ieee80211_hw *hw = NULL; 1092b5c80475SFelix Fietkau struct ieee80211_hdr *hdr; 1093b5c80475SFelix Fietkau int retval; 1094b5c80475SFelix Fietkau bool decrypt_error = false; 1095b5c80475SFelix Fietkau struct ath_rx_status rs; 1096b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype; 1097b5c80475SFelix Fietkau bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1098b5c80475SFelix Fietkau int dma_type; 10995c6dd921SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 1100a6d2055bSFelix Fietkau u64 tsf = 0; 1101a6d2055bSFelix Fietkau u32 tsf_lower = 0; 1102b5c80475SFelix Fietkau 1103b5c80475SFelix Fietkau if (edma) 1104b5c80475SFelix Fietkau dma_type = DMA_BIDIRECTIONAL; 110556824223SMing Lei else 110656824223SMing Lei dma_type = DMA_FROM_DEVICE; 1107b5c80475SFelix Fietkau 1108b5c80475SFelix Fietkau qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1109b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 1110b5c80475SFelix Fietkau 1111a6d2055bSFelix Fietkau tsf = ath9k_hw_gettsf64(ah); 1112a6d2055bSFelix Fietkau tsf_lower = tsf & 0xffffffff; 1113a6d2055bSFelix Fietkau 1114b5c80475SFelix Fietkau do { 1115b5c80475SFelix Fietkau /* If handling rx interrupt and flush is in progress => exit */ 1116b5c80475SFelix Fietkau if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1117b5c80475SFelix Fietkau break; 1118b5c80475SFelix Fietkau 1119b5c80475SFelix Fietkau memset(&rs, 0, sizeof(rs)); 1120b5c80475SFelix Fietkau if (edma) 1121b5c80475SFelix Fietkau bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1122b5c80475SFelix Fietkau else 1123b5c80475SFelix Fietkau bf = ath_get_next_rx_buf(sc, &rs); 1124b5c80475SFelix Fietkau 1125b5c80475SFelix Fietkau if (!bf) 1126b5c80475SFelix Fietkau break; 1127b5c80475SFelix Fietkau 1128b5c80475SFelix Fietkau skb = bf->bf_mpdu; 1129b5c80475SFelix Fietkau if (!skb) 1130b5c80475SFelix Fietkau continue; 1131b5c80475SFelix Fietkau 11325c6dd921SVasanthakumar Thiagarajan hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 11335ca42627SLuis R. Rodriguez rxs = IEEE80211_SKB_RXCB(skb); 11345ca42627SLuis R. Rodriguez 1135b4afffc0SLuis R. Rodriguez hw = ath_get_virt_hw(sc, hdr); 1136b4afffc0SLuis R. Rodriguez 113729bffa96SFelix Fietkau ath_debug_stat_rx(sc, &rs); 11381395d3f0SSujith 1139203c4805SLuis R. Rodriguez /* 1140203c4805SLuis R. Rodriguez * If we're asked to flush receive queue, directly 1141203c4805SLuis R. Rodriguez * chain it back at the queue without processing it. 1142203c4805SLuis R. Rodriguez */ 1143203c4805SLuis R. Rodriguez if (flush) 1144203c4805SLuis R. Rodriguez goto requeue; 1145203c4805SLuis R. Rodriguez 1146c8f3b721SJan Friedrich retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1147c8f3b721SJan Friedrich rxs, &decrypt_error); 1148c8f3b721SJan Friedrich if (retval) 1149c8f3b721SJan Friedrich goto requeue; 1150c8f3b721SJan Friedrich 1151a6d2055bSFelix Fietkau rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1152a6d2055bSFelix Fietkau if (rs.rs_tstamp > tsf_lower && 1153a6d2055bSFelix Fietkau unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1154a6d2055bSFelix Fietkau rxs->mactime -= 0x100000000ULL; 1155a6d2055bSFelix Fietkau 1156a6d2055bSFelix Fietkau if (rs.rs_tstamp < tsf_lower && 1157a6d2055bSFelix Fietkau unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1158a6d2055bSFelix Fietkau rxs->mactime += 0x100000000ULL; 1159a6d2055bSFelix Fietkau 1160203c4805SLuis R. Rodriguez /* Ensure we always have an skb to requeue once we are done 1161203c4805SLuis R. Rodriguez * processing the current buffer's skb */ 1162cc861f74SLuis R. Rodriguez requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1163203c4805SLuis R. Rodriguez 1164203c4805SLuis R. Rodriguez /* If there is no memory we ignore the current RX'd frame, 1165203c4805SLuis R. Rodriguez * tell hardware it can give us a new frame using the old 1166203c4805SLuis R. Rodriguez * skb and put it at the tail of the sc->rx.rxbuf list for 1167203c4805SLuis R. Rodriguez * processing. */ 1168203c4805SLuis R. Rodriguez if (!requeue_skb) 1169203c4805SLuis R. Rodriguez goto requeue; 1170203c4805SLuis R. Rodriguez 1171203c4805SLuis R. Rodriguez /* Unmap the frame */ 1172203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 1173cc861f74SLuis R. Rodriguez common->rx_bufsize, 1174b5c80475SFelix Fietkau dma_type); 1175203c4805SLuis R. Rodriguez 1176b5c80475SFelix Fietkau skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1177b5c80475SFelix Fietkau if (ah->caps.rx_status_len) 1178b5c80475SFelix Fietkau skb_pull(skb, ah->caps.rx_status_len); 1179203c4805SLuis R. Rodriguez 1180d435700fSSujith ath9k_rx_skb_postprocess(common, skb, &rs, 1181c9b14170SLuis R. Rodriguez rxs, decrypt_error); 1182203c4805SLuis R. Rodriguez 1183203c4805SLuis R. Rodriguez /* We will now give hardware our shiny new allocated skb */ 1184203c4805SLuis R. Rodriguez bf->bf_mpdu = requeue_skb; 1185203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1186cc861f74SLuis R. Rodriguez common->rx_bufsize, 1187b5c80475SFelix Fietkau dma_type); 1188203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 1189203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 1190203c4805SLuis R. Rodriguez dev_kfree_skb_any(requeue_skb); 1191203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 1192c46917bbSLuis R. Rodriguez ath_print(common, ATH_DBG_FATAL, 1193203c4805SLuis R. Rodriguez "dma_mapping_error() on RX\n"); 11945ca42627SLuis R. Rodriguez ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1195203c4805SLuis R. Rodriguez break; 1196203c4805SLuis R. Rodriguez } 1197203c4805SLuis R. Rodriguez bf->bf_dmacontext = bf->bf_buf_addr; 1198203c4805SLuis R. Rodriguez 1199203c4805SLuis R. Rodriguez /* 1200203c4805SLuis R. Rodriguez * change the default rx antenna if rx diversity chooses the 1201203c4805SLuis R. Rodriguez * other antenna 3 times in a row. 1202203c4805SLuis R. Rodriguez */ 120329bffa96SFelix Fietkau if (sc->rx.defant != rs.rs_antenna) { 1204203c4805SLuis R. Rodriguez if (++sc->rx.rxotherant >= 3) 120529bffa96SFelix Fietkau ath_setdefantenna(sc, rs.rs_antenna); 1206203c4805SLuis R. Rodriguez } else { 1207203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 1208203c4805SLuis R. Rodriguez } 1209203c4805SLuis R. Rodriguez 1210ededf1f8SVasanthakumar Thiagarajan if (unlikely(ath9k_check_auto_sleep(sc) || 1211ededf1f8SVasanthakumar Thiagarajan (sc->ps_flags & (PS_WAIT_FOR_BEACON | 12121b04b930SSujith PS_WAIT_FOR_CAB | 1213ededf1f8SVasanthakumar Thiagarajan PS_WAIT_FOR_PSPOLL_DATA)))) 1214cc65965cSJouni Malinen ath_rx_ps(sc, skb); 1215cc65965cSJouni Malinen 12165ca42627SLuis R. Rodriguez ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1217cc65965cSJouni Malinen 1218203c4805SLuis R. Rodriguez requeue: 1219b5c80475SFelix Fietkau if (edma) { 1220b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 1221b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 1222b5c80475SFelix Fietkau } else { 1223203c4805SLuis R. Rodriguez list_move_tail(&bf->list, &sc->rx.rxbuf); 1224203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 1225b5c80475SFelix Fietkau } 1226203c4805SLuis R. Rodriguez } while (1); 1227203c4805SLuis R. Rodriguez 1228203c4805SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 1229203c4805SLuis R. Rodriguez 1230203c4805SLuis R. Rodriguez return 0; 1231203c4805SLuis R. Rodriguez } 1232