1203c4805SLuis R. Rodriguez /* 25b68138eSSujith Manoharan * Copyright (c) 2008-2011 Atheros Communications Inc. 3203c4805SLuis R. Rodriguez * 4203c4805SLuis R. Rodriguez * Permission to use, copy, modify, and/or distribute this software for any 5203c4805SLuis R. Rodriguez * purpose with or without fee is hereby granted, provided that the above 6203c4805SLuis R. Rodriguez * copyright notice and this permission notice appear in all copies. 7203c4805SLuis R. Rodriguez * 8203c4805SLuis R. Rodriguez * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9203c4805SLuis R. Rodriguez * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10203c4805SLuis R. Rodriguez * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11203c4805SLuis R. Rodriguez * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12203c4805SLuis R. Rodriguez * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13203c4805SLuis R. Rodriguez * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14203c4805SLuis R. Rodriguez * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15203c4805SLuis R. Rodriguez */ 16203c4805SLuis R. Rodriguez 17b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h> 18e93d083fSSimon Wunderlich #include <linux/relay.h> 19203c4805SLuis R. Rodriguez #include "ath9k.h" 20b622a720SLuis R. Rodriguez #include "ar9003_mac.h" 21203c4805SLuis R. Rodriguez 22b5c80475SFelix Fietkau #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 23b5c80475SFelix Fietkau 24ededf1f8SVasanthakumar Thiagarajan static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 25ededf1f8SVasanthakumar Thiagarajan { 26ededf1f8SVasanthakumar Thiagarajan return sc->ps_enabled && 27ededf1f8SVasanthakumar Thiagarajan (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 28ededf1f8SVasanthakumar Thiagarajan } 29ededf1f8SVasanthakumar Thiagarajan 30203c4805SLuis R. Rodriguez /* 31203c4805SLuis R. Rodriguez * Setup and link descriptors. 32203c4805SLuis R. Rodriguez * 33203c4805SLuis R. Rodriguez * 11N: we can no longer afford to self link the last descriptor. 34203c4805SLuis R. Rodriguez * MAC acknowledges BA status as long as it copies frames to host 35203c4805SLuis R. Rodriguez * buffer (or rx fifo). This can incorrectly acknowledge packets 36203c4805SLuis R. Rodriguez * to a sender if last desc is self-linked. 37203c4805SLuis R. Rodriguez */ 38203c4805SLuis R. Rodriguez static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 39203c4805SLuis R. Rodriguez { 40203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 41cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 42203c4805SLuis R. Rodriguez struct ath_desc *ds; 43203c4805SLuis R. Rodriguez struct sk_buff *skb; 44203c4805SLuis R. Rodriguez 45203c4805SLuis R. Rodriguez ds = bf->bf_desc; 46203c4805SLuis R. Rodriguez ds->ds_link = 0; /* link to null */ 47203c4805SLuis R. Rodriguez ds->ds_data = bf->bf_buf_addr; 48203c4805SLuis R. Rodriguez 49203c4805SLuis R. Rodriguez /* virtual addr of the beginning of the buffer. */ 50203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 519680e8a3SLuis R. Rodriguez BUG_ON(skb == NULL); 52203c4805SLuis R. Rodriguez ds->ds_vdata = skb->data; 53203c4805SLuis R. Rodriguez 54cc861f74SLuis R. Rodriguez /* 55cc861f74SLuis R. Rodriguez * setup rx descriptors. The rx_bufsize here tells the hardware 56203c4805SLuis R. Rodriguez * how much data it can DMA to us and that we are prepared 57cc861f74SLuis R. Rodriguez * to process 58cc861f74SLuis R. Rodriguez */ 59203c4805SLuis R. Rodriguez ath9k_hw_setuprxdesc(ah, ds, 60cc861f74SLuis R. Rodriguez common->rx_bufsize, 61203c4805SLuis R. Rodriguez 0); 62203c4805SLuis R. Rodriguez 63203c4805SLuis R. Rodriguez if (sc->rx.rxlink == NULL) 64203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 65203c4805SLuis R. Rodriguez else 66203c4805SLuis R. Rodriguez *sc->rx.rxlink = bf->bf_daddr; 67203c4805SLuis R. Rodriguez 68203c4805SLuis R. Rodriguez sc->rx.rxlink = &ds->ds_link; 69203c4805SLuis R. Rodriguez } 70203c4805SLuis R. Rodriguez 71e96542e5SFelix Fietkau static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf) 72e96542e5SFelix Fietkau { 73e96542e5SFelix Fietkau if (sc->rx.buf_hold) 74e96542e5SFelix Fietkau ath_rx_buf_link(sc, sc->rx.buf_hold); 75e96542e5SFelix Fietkau 76e96542e5SFelix Fietkau sc->rx.buf_hold = bf; 77e96542e5SFelix Fietkau } 78e96542e5SFelix Fietkau 79203c4805SLuis R. Rodriguez static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 80203c4805SLuis R. Rodriguez { 81203c4805SLuis R. Rodriguez /* XXX block beacon interrupts */ 82203c4805SLuis R. Rodriguez ath9k_hw_setantenna(sc->sc_ah, antenna); 83203c4805SLuis R. Rodriguez sc->rx.defant = antenna; 84203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 85203c4805SLuis R. Rodriguez } 86203c4805SLuis R. Rodriguez 87203c4805SLuis R. Rodriguez static void ath_opmode_init(struct ath_softc *sc) 88203c4805SLuis R. Rodriguez { 89203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 901510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 911510718dSLuis R. Rodriguez 92203c4805SLuis R. Rodriguez u32 rfilt, mfilt[2]; 93203c4805SLuis R. Rodriguez 94203c4805SLuis R. Rodriguez /* configure rx filter */ 95203c4805SLuis R. Rodriguez rfilt = ath_calcrxfilter(sc); 96203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, rfilt); 97203c4805SLuis R. Rodriguez 98203c4805SLuis R. Rodriguez /* configure bssid mask */ 9913b81559SLuis R. Rodriguez ath_hw_setbssidmask(common); 100203c4805SLuis R. Rodriguez 101203c4805SLuis R. Rodriguez /* configure operational mode */ 102203c4805SLuis R. Rodriguez ath9k_hw_setopmode(ah); 103203c4805SLuis R. Rodriguez 104203c4805SLuis R. Rodriguez /* calculate and install multicast filter */ 105203c4805SLuis R. Rodriguez mfilt[0] = mfilt[1] = ~0; 106203c4805SLuis R. Rodriguez ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 107203c4805SLuis R. Rodriguez } 108203c4805SLuis R. Rodriguez 109b5c80475SFelix Fietkau static bool ath_rx_edma_buf_link(struct ath_softc *sc, 110b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 111b5c80475SFelix Fietkau { 112b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 113b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 114b5c80475SFelix Fietkau struct sk_buff *skb; 115b5c80475SFelix Fietkau struct ath_buf *bf; 116b5c80475SFelix Fietkau 117b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 118b5c80475SFelix Fietkau if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 119b5c80475SFelix Fietkau return false; 120b5c80475SFelix Fietkau 121b5c80475SFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 122b5c80475SFelix Fietkau list_del_init(&bf->list); 123b5c80475SFelix Fietkau 124b5c80475SFelix Fietkau skb = bf->bf_mpdu; 125b5c80475SFelix Fietkau 126b5c80475SFelix Fietkau memset(skb->data, 0, ah->caps.rx_status_len); 127b5c80475SFelix Fietkau dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 128b5c80475SFelix Fietkau ah->caps.rx_status_len, DMA_TO_DEVICE); 129b5c80475SFelix Fietkau 130b5c80475SFelix Fietkau SKB_CB_ATHBUF(skb) = bf; 131b5c80475SFelix Fietkau ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 13207236bf3SSujith Manoharan __skb_queue_tail(&rx_edma->rx_fifo, skb); 133b5c80475SFelix Fietkau 134b5c80475SFelix Fietkau return true; 135b5c80475SFelix Fietkau } 136b5c80475SFelix Fietkau 137b5c80475SFelix Fietkau static void ath_rx_addbuffer_edma(struct ath_softc *sc, 1387a897203SSujith Manoharan enum ath9k_rx_qtype qtype) 139b5c80475SFelix Fietkau { 140b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1416a01f0c0SMohammed Shafi Shajakhan struct ath_buf *bf, *tbf; 142b5c80475SFelix Fietkau 143b5c80475SFelix Fietkau if (list_empty(&sc->rx.rxbuf)) { 144d2182b69SJoe Perches ath_dbg(common, QUEUE, "No free rx buf available\n"); 145b5c80475SFelix Fietkau return; 146b5c80475SFelix Fietkau } 147b5c80475SFelix Fietkau 1486a01f0c0SMohammed Shafi Shajakhan list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 149b5c80475SFelix Fietkau if (!ath_rx_edma_buf_link(sc, qtype)) 150b5c80475SFelix Fietkau break; 151b5c80475SFelix Fietkau 152b5c80475SFelix Fietkau } 153b5c80475SFelix Fietkau 154b5c80475SFelix Fietkau static void ath_rx_remove_buffer(struct ath_softc *sc, 155b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 156b5c80475SFelix Fietkau { 157b5c80475SFelix Fietkau struct ath_buf *bf; 158b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 159b5c80475SFelix Fietkau struct sk_buff *skb; 160b5c80475SFelix Fietkau 161b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 162b5c80475SFelix Fietkau 16307236bf3SSujith Manoharan while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 164b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 165b5c80475SFelix Fietkau BUG_ON(!bf); 166b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 167b5c80475SFelix Fietkau } 168b5c80475SFelix Fietkau } 169b5c80475SFelix Fietkau 170b5c80475SFelix Fietkau static void ath_rx_edma_cleanup(struct ath_softc *sc) 171b5c80475SFelix Fietkau { 172ba542385SMohammed Shafi Shajakhan struct ath_hw *ah = sc->sc_ah; 173ba542385SMohammed Shafi Shajakhan struct ath_common *common = ath9k_hw_common(ah); 174b5c80475SFelix Fietkau struct ath_buf *bf; 175b5c80475SFelix Fietkau 176b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 177b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 178b5c80475SFelix Fietkau 179b5c80475SFelix Fietkau list_for_each_entry(bf, &sc->rx.rxbuf, list) { 180ba542385SMohammed Shafi Shajakhan if (bf->bf_mpdu) { 181ba542385SMohammed Shafi Shajakhan dma_unmap_single(sc->dev, bf->bf_buf_addr, 182ba542385SMohammed Shafi Shajakhan common->rx_bufsize, 183ba542385SMohammed Shafi Shajakhan DMA_BIDIRECTIONAL); 184b5c80475SFelix Fietkau dev_kfree_skb_any(bf->bf_mpdu); 185ba542385SMohammed Shafi Shajakhan bf->bf_buf_addr = 0; 186ba542385SMohammed Shafi Shajakhan bf->bf_mpdu = NULL; 187ba542385SMohammed Shafi Shajakhan } 188b5c80475SFelix Fietkau } 189b5c80475SFelix Fietkau } 190b5c80475SFelix Fietkau 191b5c80475SFelix Fietkau static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 192b5c80475SFelix Fietkau { 193b5c80475SFelix Fietkau skb_queue_head_init(&rx_edma->rx_fifo); 194b5c80475SFelix Fietkau rx_edma->rx_fifo_hwsize = size; 195b5c80475SFelix Fietkau } 196b5c80475SFelix Fietkau 197b5c80475SFelix Fietkau static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 198b5c80475SFelix Fietkau { 199b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 200b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 201b5c80475SFelix Fietkau struct sk_buff *skb; 202b5c80475SFelix Fietkau struct ath_buf *bf; 203b5c80475SFelix Fietkau int error = 0, i; 204b5c80475SFelix Fietkau u32 size; 205b5c80475SFelix Fietkau 206b5c80475SFelix Fietkau ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 207b5c80475SFelix Fietkau ah->caps.rx_status_len); 208b5c80475SFelix Fietkau 209b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 210b5c80475SFelix Fietkau ah->caps.rx_lp_qdepth); 211b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 212b5c80475SFelix Fietkau ah->caps.rx_hp_qdepth); 213b5c80475SFelix Fietkau 214b5c80475SFelix Fietkau size = sizeof(struct ath_buf) * nbufs; 215b81950b1SFelix Fietkau bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); 216b5c80475SFelix Fietkau if (!bf) 217b5c80475SFelix Fietkau return -ENOMEM; 218b5c80475SFelix Fietkau 219b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 220b5c80475SFelix Fietkau 221b5c80475SFelix Fietkau for (i = 0; i < nbufs; i++, bf++) { 222b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 223b5c80475SFelix Fietkau if (!skb) { 224b5c80475SFelix Fietkau error = -ENOMEM; 225b5c80475SFelix Fietkau goto rx_init_fail; 226b5c80475SFelix Fietkau } 227b5c80475SFelix Fietkau 228b5c80475SFelix Fietkau memset(skb->data, 0, common->rx_bufsize); 229b5c80475SFelix Fietkau bf->bf_mpdu = skb; 230b5c80475SFelix Fietkau 231b5c80475SFelix Fietkau bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 232b5c80475SFelix Fietkau common->rx_bufsize, 233b5c80475SFelix Fietkau DMA_BIDIRECTIONAL); 234b5c80475SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, 235b5c80475SFelix Fietkau bf->bf_buf_addr))) { 236b5c80475SFelix Fietkau dev_kfree_skb_any(skb); 237b5c80475SFelix Fietkau bf->bf_mpdu = NULL; 2386cf9e995SBen Greear bf->bf_buf_addr = 0; 2393800276aSJoe Perches ath_err(common, 240b5c80475SFelix Fietkau "dma_mapping_error() on RX init\n"); 241b5c80475SFelix Fietkau error = -ENOMEM; 242b5c80475SFelix Fietkau goto rx_init_fail; 243b5c80475SFelix Fietkau } 244b5c80475SFelix Fietkau 245b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 246b5c80475SFelix Fietkau } 247b5c80475SFelix Fietkau 248b5c80475SFelix Fietkau return 0; 249b5c80475SFelix Fietkau 250b5c80475SFelix Fietkau rx_init_fail: 251b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 252b5c80475SFelix Fietkau return error; 253b5c80475SFelix Fietkau } 254b5c80475SFelix Fietkau 255b5c80475SFelix Fietkau static void ath_edma_start_recv(struct ath_softc *sc) 256b5c80475SFelix Fietkau { 257b5c80475SFelix Fietkau ath9k_hw_rxena(sc->sc_ah); 2587a897203SSujith Manoharan ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP); 2597a897203SSujith Manoharan ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP); 260b5c80475SFelix Fietkau ath_opmode_init(sc); 2614cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 262b5c80475SFelix Fietkau } 263b5c80475SFelix Fietkau 264b5c80475SFelix Fietkau static void ath_edma_stop_recv(struct ath_softc *sc) 265b5c80475SFelix Fietkau { 266b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 267b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 268b5c80475SFelix Fietkau } 269b5c80475SFelix Fietkau 270203c4805SLuis R. Rodriguez int ath_rx_init(struct ath_softc *sc, int nbufs) 271203c4805SLuis R. Rodriguez { 27227c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 273203c4805SLuis R. Rodriguez struct sk_buff *skb; 274203c4805SLuis R. Rodriguez struct ath_buf *bf; 275203c4805SLuis R. Rodriguez int error = 0; 276203c4805SLuis R. Rodriguez 2774bdd1e97SLuis R. Rodriguez spin_lock_init(&sc->sc_pcu_lock); 278203c4805SLuis R. Rodriguez 2790d95521eSFelix Fietkau common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 2800d95521eSFelix Fietkau sc->sc_ah->caps.rx_status_len; 2810d95521eSFelix Fietkau 282e87f3d53SSujith Manoharan if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 283b5c80475SFelix Fietkau return ath_rx_edma_init(sc, nbufs); 284e87f3d53SSujith Manoharan 285d2182b69SJoe Perches ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 286cc861f74SLuis R. Rodriguez common->cachelsz, common->rx_bufsize); 287203c4805SLuis R. Rodriguez 288203c4805SLuis R. Rodriguez /* Initialize rx descriptors */ 289203c4805SLuis R. Rodriguez 290203c4805SLuis R. Rodriguez error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 2914adfcdedSVasanthakumar Thiagarajan "rx", nbufs, 1, 0); 292203c4805SLuis R. Rodriguez if (error != 0) { 2933800276aSJoe Perches ath_err(common, 294b5c80475SFelix Fietkau "failed to allocate rx descriptors: %d\n", 295b5c80475SFelix Fietkau error); 296203c4805SLuis R. Rodriguez goto err; 297203c4805SLuis R. Rodriguez } 298203c4805SLuis R. Rodriguez 299203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 300b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, 301b5c80475SFelix Fietkau GFP_KERNEL); 302203c4805SLuis R. Rodriguez if (skb == NULL) { 303203c4805SLuis R. Rodriguez error = -ENOMEM; 304203c4805SLuis R. Rodriguez goto err; 305203c4805SLuis R. Rodriguez } 306203c4805SLuis R. Rodriguez 307203c4805SLuis R. Rodriguez bf->bf_mpdu = skb; 308203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 309cc861f74SLuis R. Rodriguez common->rx_bufsize, 310203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 311203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 312203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 313203c4805SLuis R. Rodriguez dev_kfree_skb_any(skb); 314203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 3156cf9e995SBen Greear bf->bf_buf_addr = 0; 3163800276aSJoe Perches ath_err(common, 317203c4805SLuis R. Rodriguez "dma_mapping_error() on RX init\n"); 318203c4805SLuis R. Rodriguez error = -ENOMEM; 319203c4805SLuis R. Rodriguez goto err; 320203c4805SLuis R. Rodriguez } 321203c4805SLuis R. Rodriguez } 322203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 323203c4805SLuis R. Rodriguez err: 324203c4805SLuis R. Rodriguez if (error) 325203c4805SLuis R. Rodriguez ath_rx_cleanup(sc); 326203c4805SLuis R. Rodriguez 327203c4805SLuis R. Rodriguez return error; 328203c4805SLuis R. Rodriguez } 329203c4805SLuis R. Rodriguez 330203c4805SLuis R. Rodriguez void ath_rx_cleanup(struct ath_softc *sc) 331203c4805SLuis R. Rodriguez { 332cc861f74SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 333cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 334203c4805SLuis R. Rodriguez struct sk_buff *skb; 335203c4805SLuis R. Rodriguez struct ath_buf *bf; 336203c4805SLuis R. Rodriguez 337b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 338b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 339b5c80475SFelix Fietkau return; 340e87f3d53SSujith Manoharan } 341e87f3d53SSujith Manoharan 342203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 343203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 344203c4805SLuis R. Rodriguez if (skb) { 345203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 346b5c80475SFelix Fietkau common->rx_bufsize, 347b5c80475SFelix Fietkau DMA_FROM_DEVICE); 348203c4805SLuis R. Rodriguez dev_kfree_skb(skb); 3496cf9e995SBen Greear bf->bf_buf_addr = 0; 3506cf9e995SBen Greear bf->bf_mpdu = NULL; 351203c4805SLuis R. Rodriguez } 352203c4805SLuis R. Rodriguez } 353203c4805SLuis R. Rodriguez } 354203c4805SLuis R. Rodriguez 355203c4805SLuis R. Rodriguez /* 356203c4805SLuis R. Rodriguez * Calculate the receive filter according to the 357203c4805SLuis R. Rodriguez * operating mode and state: 358203c4805SLuis R. Rodriguez * 359203c4805SLuis R. Rodriguez * o always accept unicast, broadcast, and multicast traffic 360203c4805SLuis R. Rodriguez * o maintain current state of phy error reception (the hal 361203c4805SLuis R. Rodriguez * may enable phy error frames for noise immunity work) 362203c4805SLuis R. Rodriguez * o probe request frames are accepted only when operating in 363203c4805SLuis R. Rodriguez * hostap, adhoc, or monitor modes 364203c4805SLuis R. Rodriguez * o enable promiscuous mode according to the interface state 365203c4805SLuis R. Rodriguez * o accept beacons: 366203c4805SLuis R. Rodriguez * - when operating in adhoc mode so the 802.11 layer creates 367203c4805SLuis R. Rodriguez * node table entries for peers, 368203c4805SLuis R. Rodriguez * - when operating in station mode for collecting rssi data when 369203c4805SLuis R. Rodriguez * the station is otherwise quiet, or 370203c4805SLuis R. Rodriguez * - when operating as a repeater so we see repeater-sta beacons 371203c4805SLuis R. Rodriguez * - when scanning 372203c4805SLuis R. Rodriguez */ 373203c4805SLuis R. Rodriguez 374203c4805SLuis R. Rodriguez u32 ath_calcrxfilter(struct ath_softc *sc) 375203c4805SLuis R. Rodriguez { 376203c4805SLuis R. Rodriguez u32 rfilt; 377203c4805SLuis R. Rodriguez 378ac06697cSFelix Fietkau rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 379203c4805SLuis R. Rodriguez | ATH9K_RX_FILTER_MCAST; 380203c4805SLuis R. Rodriguez 38173e4937dSZefir Kurtisi /* if operating on a DFS channel, enable radar pulse detection */ 38273e4937dSZefir Kurtisi if (sc->hw->conf.radar_enabled) 38373e4937dSZefir Kurtisi rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; 38473e4937dSZefir Kurtisi 3859c1d8e4aSJouni Malinen if (sc->rx.rxfilter & FIF_PROBE_REQ) 386203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROBEREQ; 387203c4805SLuis R. Rodriguez 388203c4805SLuis R. Rodriguez /* 389203c4805SLuis R. Rodriguez * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 390203c4805SLuis R. Rodriguez * mode interface or when in monitor mode. AP mode does not need this 391203c4805SLuis R. Rodriguez * since it receives all in-BSS frames anyway. 392203c4805SLuis R. Rodriguez */ 3932e286947SFelix Fietkau if (sc->sc_ah->is_monitoring) 394203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROM; 395203c4805SLuis R. Rodriguez 396203c4805SLuis R. Rodriguez if (sc->rx.rxfilter & FIF_CONTROL) 397203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_CONTROL; 398203c4805SLuis R. Rodriguez 399203c4805SLuis R. Rodriguez if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 400cfda6695SBen Greear (sc->nvifs <= 1) && 401203c4805SLuis R. Rodriguez !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 402203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MYBEACON; 403203c4805SLuis R. Rodriguez else 404203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_BEACON; 405203c4805SLuis R. Rodriguez 406264bbec8SFelix Fietkau if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 40766afad01SSenthil Balasubramanian (sc->rx.rxfilter & FIF_PSPOLL)) 408203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PSPOLL; 409203c4805SLuis R. Rodriguez 4107ea310beSSujith if (conf_is_ht(&sc->hw->conf)) 4117ea310beSSujith rfilt |= ATH9K_RX_FILTER_COMP_BAR; 4127ea310beSSujith 4137545daf4SFelix Fietkau if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 414a549459cSThomas Wagner /* This is needed for older chips */ 415a549459cSThomas Wagner if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 4165eb6ba83SJavier Cardona rfilt |= ATH9K_RX_FILTER_PROM; 417203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 418203c4805SLuis R. Rodriguez } 419203c4805SLuis R. Rodriguez 420b3d7aa43SGabor Juhos if (AR_SREV_9550(sc->sc_ah)) 421b3d7aa43SGabor Juhos rfilt |= ATH9K_RX_FILTER_4ADDRESS; 422b3d7aa43SGabor Juhos 423203c4805SLuis R. Rodriguez return rfilt; 424203c4805SLuis R. Rodriguez 425203c4805SLuis R. Rodriguez } 426203c4805SLuis R. Rodriguez 427203c4805SLuis R. Rodriguez int ath_startrecv(struct ath_softc *sc) 428203c4805SLuis R. Rodriguez { 429203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 430203c4805SLuis R. Rodriguez struct ath_buf *bf, *tbf; 431203c4805SLuis R. Rodriguez 432b5c80475SFelix Fietkau if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 433b5c80475SFelix Fietkau ath_edma_start_recv(sc); 434b5c80475SFelix Fietkau return 0; 435b5c80475SFelix Fietkau } 436b5c80475SFelix Fietkau 437203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 438203c4805SLuis R. Rodriguez goto start_recv; 439203c4805SLuis R. Rodriguez 440e96542e5SFelix Fietkau sc->rx.buf_hold = NULL; 441203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 442203c4805SLuis R. Rodriguez list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 443203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 444203c4805SLuis R. Rodriguez } 445203c4805SLuis R. Rodriguez 446203c4805SLuis R. Rodriguez /* We could have deleted elements so the list may be empty now */ 447203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 448203c4805SLuis R. Rodriguez goto start_recv; 449203c4805SLuis R. Rodriguez 450203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 451203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 452203c4805SLuis R. Rodriguez ath9k_hw_rxena(ah); 453203c4805SLuis R. Rodriguez 454203c4805SLuis R. Rodriguez start_recv: 455203c4805SLuis R. Rodriguez ath_opmode_init(sc); 4564cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 457203c4805SLuis R. Rodriguez 458203c4805SLuis R. Rodriguez return 0; 459203c4805SLuis R. Rodriguez } 460203c4805SLuis R. Rodriguez 4614b883f02SFelix Fietkau static void ath_flushrecv(struct ath_softc *sc) 4624b883f02SFelix Fietkau { 4634b883f02SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 4644b883f02SFelix Fietkau ath_rx_tasklet(sc, 1, true); 4654b883f02SFelix Fietkau ath_rx_tasklet(sc, 1, false); 4664b883f02SFelix Fietkau } 4674b883f02SFelix Fietkau 468203c4805SLuis R. Rodriguez bool ath_stoprecv(struct ath_softc *sc) 469203c4805SLuis R. Rodriguez { 470203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 4715882da02SFelix Fietkau bool stopped, reset = false; 472203c4805SLuis R. Rodriguez 473d47844a0SFelix Fietkau ath9k_hw_abortpcurecv(ah); 474203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, 0); 4755882da02SFelix Fietkau stopped = ath9k_hw_stopdmarecv(ah, &reset); 476b5c80475SFelix Fietkau 4774b883f02SFelix Fietkau ath_flushrecv(sc); 4784b883f02SFelix Fietkau 479b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 480b5c80475SFelix Fietkau ath_edma_stop_recv(sc); 481b5c80475SFelix Fietkau else 482203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 483203c4805SLuis R. Rodriguez 484d584747bSRajkumar Manoharan if (!(ah->ah_flags & AH_UNPLUGGED) && 485d584747bSRajkumar Manoharan unlikely(!stopped)) { 486d7fd1b50SBen Greear ath_err(ath9k_hw_common(sc->sc_ah), 487d7fd1b50SBen Greear "Could not stop RX, we could be " 48878a7685eSLuis R. Rodriguez "confusing the DMA engine when we start RX up\n"); 489d7fd1b50SBen Greear ATH_DBG_WARN_ON_ONCE(!stopped); 490d7fd1b50SBen Greear } 4912232d31bSFelix Fietkau return stopped && !reset; 492203c4805SLuis R. Rodriguez } 493203c4805SLuis R. Rodriguez 494cc65965cSJouni Malinen static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 495cc65965cSJouni Malinen { 496cc65965cSJouni Malinen /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 497cc65965cSJouni Malinen struct ieee80211_mgmt *mgmt; 498cc65965cSJouni Malinen u8 *pos, *end, id, elen; 499cc65965cSJouni Malinen struct ieee80211_tim_ie *tim; 500cc65965cSJouni Malinen 501cc65965cSJouni Malinen mgmt = (struct ieee80211_mgmt *)skb->data; 502cc65965cSJouni Malinen pos = mgmt->u.beacon.variable; 503cc65965cSJouni Malinen end = skb->data + skb->len; 504cc65965cSJouni Malinen 505cc65965cSJouni Malinen while (pos + 2 < end) { 506cc65965cSJouni Malinen id = *pos++; 507cc65965cSJouni Malinen elen = *pos++; 508cc65965cSJouni Malinen if (pos + elen > end) 509cc65965cSJouni Malinen break; 510cc65965cSJouni Malinen 511cc65965cSJouni Malinen if (id == WLAN_EID_TIM) { 512cc65965cSJouni Malinen if (elen < sizeof(*tim)) 513cc65965cSJouni Malinen break; 514cc65965cSJouni Malinen tim = (struct ieee80211_tim_ie *) pos; 515cc65965cSJouni Malinen if (tim->dtim_count != 0) 516cc65965cSJouni Malinen break; 517cc65965cSJouni Malinen return tim->bitmap_ctrl & 0x01; 518cc65965cSJouni Malinen } 519cc65965cSJouni Malinen 520cc65965cSJouni Malinen pos += elen; 521cc65965cSJouni Malinen } 522cc65965cSJouni Malinen 523cc65965cSJouni Malinen return false; 524cc65965cSJouni Malinen } 525cc65965cSJouni Malinen 526cc65965cSJouni Malinen static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 527cc65965cSJouni Malinen { 5281510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 529cc65965cSJouni Malinen 530cc65965cSJouni Malinen if (skb->len < 24 + 8 + 2 + 2) 531cc65965cSJouni Malinen return; 532cc65965cSJouni Malinen 5331b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 534293dc5dfSGabor Juhos 5351b04b930SSujith if (sc->ps_flags & PS_BEACON_SYNC) { 5361b04b930SSujith sc->ps_flags &= ~PS_BEACON_SYNC; 537d2182b69SJoe Perches ath_dbg(common, PS, 5381a6404a1SSujith Manoharan "Reconfigure beacon timers based on synchronized timestamp\n"); 539ef4ad633SSujith Manoharan ath9k_set_beacon(sc); 540ccdfeab6SJouni Malinen } 541ccdfeab6SJouni Malinen 542cc65965cSJouni Malinen if (ath_beacon_dtim_pending_cab(skb)) { 543cc65965cSJouni Malinen /* 544cc65965cSJouni Malinen * Remain awake waiting for buffered broadcast/multicast 54558f5fffdSGabor Juhos * frames. If the last broadcast/multicast frame is not 54658f5fffdSGabor Juhos * received properly, the next beacon frame will work as 54758f5fffdSGabor Juhos * a backup trigger for returning into NETWORK SLEEP state, 54858f5fffdSGabor Juhos * so we are waiting for it as well. 549cc65965cSJouni Malinen */ 550d2182b69SJoe Perches ath_dbg(common, PS, 551226afe68SJoe Perches "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 5521b04b930SSujith sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 553cc65965cSJouni Malinen return; 554cc65965cSJouni Malinen } 555cc65965cSJouni Malinen 5561b04b930SSujith if (sc->ps_flags & PS_WAIT_FOR_CAB) { 557cc65965cSJouni Malinen /* 558cc65965cSJouni Malinen * This can happen if a broadcast frame is dropped or the AP 559cc65965cSJouni Malinen * fails to send a frame indicating that all CAB frames have 560cc65965cSJouni Malinen * been delivered. 561cc65965cSJouni Malinen */ 5621b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_CAB; 563d2182b69SJoe Perches ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 564cc65965cSJouni Malinen } 565cc65965cSJouni Malinen } 566cc65965cSJouni Malinen 567f73c604cSRajkumar Manoharan static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 568cc65965cSJouni Malinen { 569cc65965cSJouni Malinen struct ieee80211_hdr *hdr; 570c46917bbSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 571cc65965cSJouni Malinen 572cc65965cSJouni Malinen hdr = (struct ieee80211_hdr *)skb->data; 573cc65965cSJouni Malinen 574cc65965cSJouni Malinen /* Process Beacon and CAB receive in PS state */ 575ededf1f8SVasanthakumar Thiagarajan if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 57607c15a3fSSujith Manoharan && mybeacon) { 577cc65965cSJouni Malinen ath_rx_ps_beacon(sc, skb); 57807c15a3fSSujith Manoharan } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 579cc65965cSJouni Malinen (ieee80211_is_data(hdr->frame_control) || 580cc65965cSJouni Malinen ieee80211_is_action(hdr->frame_control)) && 581cc65965cSJouni Malinen is_multicast_ether_addr(hdr->addr1) && 582cc65965cSJouni Malinen !ieee80211_has_moredata(hdr->frame_control)) { 583cc65965cSJouni Malinen /* 584cc65965cSJouni Malinen * No more broadcast/multicast frames to be received at this 585cc65965cSJouni Malinen * point. 586cc65965cSJouni Malinen */ 5873fac6dfdSSenthil Balasubramanian sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 588d2182b69SJoe Perches ath_dbg(common, PS, 589c46917bbSLuis R. Rodriguez "All PS CAB frames received, back to sleep\n"); 5901b04b930SSujith } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 5919a23f9caSJouni Malinen !is_multicast_ether_addr(hdr->addr1) && 5929a23f9caSJouni Malinen !ieee80211_has_morefrags(hdr->frame_control)) { 5931b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 594d2182b69SJoe Perches ath_dbg(common, PS, 595226afe68SJoe Perches "Going back to sleep after having received PS-Poll data (0x%lx)\n", 5961b04b930SSujith sc->ps_flags & (PS_WAIT_FOR_BEACON | 5971b04b930SSujith PS_WAIT_FOR_CAB | 5981b04b930SSujith PS_WAIT_FOR_PSPOLL_DATA | 5991b04b930SSujith PS_WAIT_FOR_TX_ACK)); 600cc65965cSJouni Malinen } 601cc65965cSJouni Malinen } 602cc65965cSJouni Malinen 603b5c80475SFelix Fietkau static bool ath_edma_get_buffers(struct ath_softc *sc, 6043a2923e8SFelix Fietkau enum ath9k_rx_qtype qtype, 6053a2923e8SFelix Fietkau struct ath_rx_status *rs, 6063a2923e8SFelix Fietkau struct ath_buf **dest) 607203c4805SLuis R. Rodriguez { 608b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 609203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 61027c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 611b5c80475SFelix Fietkau struct sk_buff *skb; 612b5c80475SFelix Fietkau struct ath_buf *bf; 613b5c80475SFelix Fietkau int ret; 614203c4805SLuis R. Rodriguez 615b5c80475SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 616b5c80475SFelix Fietkau if (!skb) 617b5c80475SFelix Fietkau return false; 618203c4805SLuis R. Rodriguez 619b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 620b5c80475SFelix Fietkau BUG_ON(!bf); 621b5c80475SFelix Fietkau 622ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 623b5c80475SFelix Fietkau common->rx_bufsize, DMA_FROM_DEVICE); 624b5c80475SFelix Fietkau 6253a2923e8SFelix Fietkau ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 626ce9426d1SMing Lei if (ret == -EINPROGRESS) { 627ce9426d1SMing Lei /*let device gain the buffer again*/ 628ce9426d1SMing Lei dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 629ce9426d1SMing Lei common->rx_bufsize, DMA_FROM_DEVICE); 630b5c80475SFelix Fietkau return false; 631ce9426d1SMing Lei } 632b5c80475SFelix Fietkau 633b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 634b5c80475SFelix Fietkau if (ret == -EINVAL) { 635b5c80475SFelix Fietkau /* corrupt descriptor, skip this one and the following one */ 636b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 637b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 638b5c80475SFelix Fietkau 6393a2923e8SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 6403a2923e8SFelix Fietkau if (skb) { 641b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 642b5c80475SFelix Fietkau BUG_ON(!bf); 643b5c80475SFelix Fietkau 644b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 645b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 646b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 647b5c80475SFelix Fietkau } 6486bb51c70STom Hughes 6496bb51c70STom Hughes bf = NULL; 6503a2923e8SFelix Fietkau } 651b5c80475SFelix Fietkau 6523a2923e8SFelix Fietkau *dest = bf; 653b5c80475SFelix Fietkau return true; 654b5c80475SFelix Fietkau } 655b5c80475SFelix Fietkau 656b5c80475SFelix Fietkau static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 657b5c80475SFelix Fietkau struct ath_rx_status *rs, 658b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 659b5c80475SFelix Fietkau { 6603a2923e8SFelix Fietkau struct ath_buf *bf = NULL; 661b5c80475SFelix Fietkau 6623a2923e8SFelix Fietkau while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 6633a2923e8SFelix Fietkau if (!bf) 6643a2923e8SFelix Fietkau continue; 665b5c80475SFelix Fietkau 666b5c80475SFelix Fietkau return bf; 667b5c80475SFelix Fietkau } 6683a2923e8SFelix Fietkau return NULL; 6693a2923e8SFelix Fietkau } 670b5c80475SFelix Fietkau 671b5c80475SFelix Fietkau static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 672b5c80475SFelix Fietkau struct ath_rx_status *rs) 673b5c80475SFelix Fietkau { 674b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 675b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 676b5c80475SFelix Fietkau struct ath_desc *ds; 677b5c80475SFelix Fietkau struct ath_buf *bf; 678b5c80475SFelix Fietkau int ret; 679203c4805SLuis R. Rodriguez 680203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) { 681203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 682b5c80475SFelix Fietkau return NULL; 683203c4805SLuis R. Rodriguez } 684203c4805SLuis R. Rodriguez 685203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 686e96542e5SFelix Fietkau if (bf == sc->rx.buf_hold) 687e96542e5SFelix Fietkau return NULL; 688e96542e5SFelix Fietkau 689203c4805SLuis R. Rodriguez ds = bf->bf_desc; 690203c4805SLuis R. Rodriguez 691203c4805SLuis R. Rodriguez /* 692203c4805SLuis R. Rodriguez * Must provide the virtual address of the current 693203c4805SLuis R. Rodriguez * descriptor, the physical address, and the virtual 694203c4805SLuis R. Rodriguez * address of the next descriptor in the h/w chain. 695203c4805SLuis R. Rodriguez * This allows the HAL to look ahead to see if the 696203c4805SLuis R. Rodriguez * hardware is done with a descriptor by checking the 697203c4805SLuis R. Rodriguez * done bit in the following descriptor and the address 698203c4805SLuis R. Rodriguez * of the current descriptor the DMA engine is working 699203c4805SLuis R. Rodriguez * on. All this is necessary because of our use of 700203c4805SLuis R. Rodriguez * a self-linked list to avoid rx overruns. 701203c4805SLuis R. Rodriguez */ 7023de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, ds, rs); 703b5c80475SFelix Fietkau if (ret == -EINPROGRESS) { 70429bffa96SFelix Fietkau struct ath_rx_status trs; 705203c4805SLuis R. Rodriguez struct ath_buf *tbf; 706203c4805SLuis R. Rodriguez struct ath_desc *tds; 707203c4805SLuis R. Rodriguez 70829bffa96SFelix Fietkau memset(&trs, 0, sizeof(trs)); 709203c4805SLuis R. Rodriguez if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 710203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 711b5c80475SFelix Fietkau return NULL; 712203c4805SLuis R. Rodriguez } 713203c4805SLuis R. Rodriguez 714203c4805SLuis R. Rodriguez tbf = list_entry(bf->list.next, struct ath_buf, list); 715203c4805SLuis R. Rodriguez 716203c4805SLuis R. Rodriguez /* 717203c4805SLuis R. Rodriguez * On some hardware the descriptor status words could 718203c4805SLuis R. Rodriguez * get corrupted, including the done bit. Because of 719203c4805SLuis R. Rodriguez * this, check if the next descriptor's done bit is 720203c4805SLuis R. Rodriguez * set or not. 721203c4805SLuis R. Rodriguez * 722203c4805SLuis R. Rodriguez * If the next descriptor's done bit is set, the current 723203c4805SLuis R. Rodriguez * descriptor has been corrupted. Force s/w to discard 724203c4805SLuis R. Rodriguez * this descriptor and continue... 725203c4805SLuis R. Rodriguez */ 726203c4805SLuis R. Rodriguez 727203c4805SLuis R. Rodriguez tds = tbf->bf_desc; 7283de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 729b5c80475SFelix Fietkau if (ret == -EINPROGRESS) 730b5c80475SFelix Fietkau return NULL; 731723e7113SFelix Fietkau 732723e7113SFelix Fietkau /* 733723e7113SFelix Fietkau * mark descriptor as zero-length and set the 'more' 734723e7113SFelix Fietkau * flag to ensure that both buffers get discarded 735723e7113SFelix Fietkau */ 736723e7113SFelix Fietkau rs->rs_datalen = 0; 737723e7113SFelix Fietkau rs->rs_more = true; 738203c4805SLuis R. Rodriguez } 739203c4805SLuis R. Rodriguez 740a3dc48e8SFelix Fietkau list_del(&bf->list); 741b5c80475SFelix Fietkau if (!bf->bf_mpdu) 742b5c80475SFelix Fietkau return bf; 743203c4805SLuis R. Rodriguez 744203c4805SLuis R. Rodriguez /* 745203c4805SLuis R. Rodriguez * Synchronize the DMA transfer with CPU before 746203c4805SLuis R. Rodriguez * 1. accessing the frame 747203c4805SLuis R. Rodriguez * 2. requeueing the same buffer to h/w 748203c4805SLuis R. Rodriguez */ 749ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 750cc861f74SLuis R. Rodriguez common->rx_bufsize, 751203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 752203c4805SLuis R. Rodriguez 753b5c80475SFelix Fietkau return bf; 754b5c80475SFelix Fietkau } 755b5c80475SFelix Fietkau 756d435700fSSujith /* Assumes you've already done the endian to CPU conversion */ 757d435700fSSujith static bool ath9k_rx_accept(struct ath_common *common, 7589f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 759d435700fSSujith struct ieee80211_rx_status *rxs, 760d435700fSSujith struct ath_rx_status *rx_stats, 761d435700fSSujith bool *decrypt_error) 762d435700fSSujith { 763ec205999SFelix Fietkau struct ath_softc *sc = (struct ath_softc *) common->priv; 76466760eacSFelix Fietkau bool is_mc, is_valid_tkip, strip_mic, mic_error; 765d435700fSSujith struct ath_hw *ah = common->ah; 766d435700fSSujith __le16 fc; 767b7b1b512SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 768d435700fSSujith 769d435700fSSujith fc = hdr->frame_control; 770d435700fSSujith 77166760eacSFelix Fietkau is_mc = !!is_multicast_ether_addr(hdr->addr1); 77266760eacSFelix Fietkau is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 77366760eacSFelix Fietkau test_bit(rx_stats->rs_keyix, common->tkip_keymap); 774152e585dSBill Jordan strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 7752a5783b8SMichael Liang ieee80211_has_protected(fc) && 776152e585dSBill Jordan !(rx_stats->rs_status & 777846d9363SFelix Fietkau (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 778846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS)); 77966760eacSFelix Fietkau 780f88373faSFelix Fietkau /* 781f88373faSFelix Fietkau * Key miss events are only relevant for pairwise keys where the 782f88373faSFelix Fietkau * descriptor does contain a valid key index. This has been observed 783f88373faSFelix Fietkau * mostly with CCMP encryption. 784f88373faSFelix Fietkau */ 785bed3d9c0SFelix Fietkau if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 786bed3d9c0SFelix Fietkau !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 787f88373faSFelix Fietkau rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 788f88373faSFelix Fietkau 78915072189SBen Greear if (!rx_stats->rs_datalen) { 79015072189SBen Greear RX_STAT_INC(rx_len_err); 791d435700fSSujith return false; 79215072189SBen Greear } 79315072189SBen Greear 794d435700fSSujith /* 795d435700fSSujith * rs_status follows rs_datalen so if rs_datalen is too large 796d435700fSSujith * we can take a hint that hardware corrupted it, so ignore 797d435700fSSujith * those frames. 798d435700fSSujith */ 79915072189SBen Greear if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 80015072189SBen Greear RX_STAT_INC(rx_len_err); 801d435700fSSujith return false; 80215072189SBen Greear } 803d435700fSSujith 8040d95521eSFelix Fietkau /* Only use error bits from the last fragment */ 805d435700fSSujith if (rx_stats->rs_more) 8060d95521eSFelix Fietkau return true; 807d435700fSSujith 80866760eacSFelix Fietkau mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 80966760eacSFelix Fietkau !ieee80211_has_morefrags(fc) && 81066760eacSFelix Fietkau !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 81166760eacSFelix Fietkau (rx_stats->rs_status & ATH9K_RXERR_MIC); 81266760eacSFelix Fietkau 813d435700fSSujith /* 814d435700fSSujith * The rx_stats->rs_status will not be set until the end of the 815d435700fSSujith * chained descriptors so it can be ignored if rs_more is set. The 816d435700fSSujith * rs_more will be false at the last element of the chained 817d435700fSSujith * descriptors. 818d435700fSSujith */ 819d435700fSSujith if (rx_stats->rs_status != 0) { 820846d9363SFelix Fietkau u8 status_mask; 821846d9363SFelix Fietkau 82266760eacSFelix Fietkau if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 823d435700fSSujith rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 82466760eacSFelix Fietkau mic_error = false; 82566760eacSFelix Fietkau } 826d435700fSSujith if (rx_stats->rs_status & ATH9K_RXERR_PHY) 827d435700fSSujith return false; 828d435700fSSujith 829846d9363SFelix Fietkau if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 830846d9363SFelix Fietkau (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 831d435700fSSujith *decrypt_error = true; 83266760eacSFelix Fietkau mic_error = false; 833d435700fSSujith } 83466760eacSFelix Fietkau 835d435700fSSujith /* 836d435700fSSujith * Reject error frames with the exception of 837d435700fSSujith * decryption and MIC failures. For monitor mode, 838d435700fSSujith * we also ignore the CRC error. 839d435700fSSujith */ 840846d9363SFelix Fietkau status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 841846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS; 842846d9363SFelix Fietkau 843ec205999SFelix Fietkau if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 844846d9363SFelix Fietkau status_mask |= ATH9K_RXERR_CRC; 845846d9363SFelix Fietkau 846846d9363SFelix Fietkau if (rx_stats->rs_status & ~status_mask) 847d435700fSSujith return false; 848d435700fSSujith } 84966760eacSFelix Fietkau 85066760eacSFelix Fietkau /* 85166760eacSFelix Fietkau * For unicast frames the MIC error bit can have false positives, 85266760eacSFelix Fietkau * so all MIC error reports need to be validated in software. 85366760eacSFelix Fietkau * False negatives are not common, so skip software verification 85466760eacSFelix Fietkau * if the hardware considers the MIC valid. 85566760eacSFelix Fietkau */ 85666760eacSFelix Fietkau if (strip_mic) 85766760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_STRIPPED; 85866760eacSFelix Fietkau else if (is_mc && mic_error) 85966760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_ERROR; 86066760eacSFelix Fietkau 861d435700fSSujith return true; 862d435700fSSujith } 863d435700fSSujith 864d435700fSSujith static int ath9k_process_rate(struct ath_common *common, 865d435700fSSujith struct ieee80211_hw *hw, 866d435700fSSujith struct ath_rx_status *rx_stats, 8679f167f64SVasanthakumar Thiagarajan struct ieee80211_rx_status *rxs) 868d435700fSSujith { 869d435700fSSujith struct ieee80211_supported_band *sband; 870d435700fSSujith enum ieee80211_band band; 871d435700fSSujith unsigned int i = 0; 872990e08a0SBen Greear struct ath_softc __maybe_unused *sc = common->priv; 873d435700fSSujith 874675a0b04SKarl Beldan band = hw->conf.chandef.chan->band; 875d435700fSSujith sband = hw->wiphy->bands[band]; 876d435700fSSujith 877d435700fSSujith if (rx_stats->rs_rate & 0x80) { 878d435700fSSujith /* HT rate */ 879d435700fSSujith rxs->flag |= RX_FLAG_HT; 880ab276103SOleksij Rempel rxs->flag |= rx_stats->flag; 881d435700fSSujith rxs->rate_idx = rx_stats->rs_rate & 0x7f; 882d435700fSSujith return 0; 883d435700fSSujith } 884d435700fSSujith 885d435700fSSujith for (i = 0; i < sband->n_bitrates; i++) { 886d435700fSSujith if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 887d435700fSSujith rxs->rate_idx = i; 888d435700fSSujith return 0; 889d435700fSSujith } 890d435700fSSujith if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 891d435700fSSujith rxs->flag |= RX_FLAG_SHORTPRE; 892d435700fSSujith rxs->rate_idx = i; 893d435700fSSujith return 0; 894d435700fSSujith } 895d435700fSSujith } 896d435700fSSujith 897d435700fSSujith /* 898d435700fSSujith * No valid hardware bitrate found -- we should not get here 899d435700fSSujith * because hardware has already validated this frame as OK. 900d435700fSSujith */ 901d2182b69SJoe Perches ath_dbg(common, ANY, 902226afe68SJoe Perches "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 903226afe68SJoe Perches rx_stats->rs_rate); 90415072189SBen Greear RX_STAT_INC(rx_rate_err); 905d435700fSSujith return -EINVAL; 906d435700fSSujith } 907d435700fSSujith 908d435700fSSujith static void ath9k_process_rssi(struct ath_common *common, 909d435700fSSujith struct ieee80211_hw *hw, 9109f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 911d435700fSSujith struct ath_rx_status *rx_stats) 912d435700fSSujith { 9139ac58615SFelix Fietkau struct ath_softc *sc = hw->priv; 914d435700fSSujith struct ath_hw *ah = common->ah; 9159fa23e17SFelix Fietkau int last_rssi; 9162ef16755SFelix Fietkau int rssi = rx_stats->rs_rssi; 917d435700fSSujith 918cf3af748SRajkumar Manoharan if (!rx_stats->is_mybeacon || 919cf3af748SRajkumar Manoharan ((ah->opmode != NL80211_IFTYPE_STATION) && 920cf3af748SRajkumar Manoharan (ah->opmode != NL80211_IFTYPE_ADHOC))) 9219fa23e17SFelix Fietkau return; 9229fa23e17SFelix Fietkau 9239fa23e17SFelix Fietkau if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 9249ac58615SFelix Fietkau ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 925686b9cb9SBen Greear 9269ac58615SFelix Fietkau last_rssi = sc->last_rssi; 927d435700fSSujith if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 9282ef16755SFelix Fietkau rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 9292ef16755SFelix Fietkau if (rssi < 0) 9302ef16755SFelix Fietkau rssi = 0; 931d435700fSSujith 932d435700fSSujith /* Update Beacon RSSI, this is used by ANI. */ 9332ef16755SFelix Fietkau ah->stats.avgbrssi = rssi; 934d435700fSSujith } 935d435700fSSujith 936d435700fSSujith /* 937d435700fSSujith * For Decrypt or Demic errors, we only mark packet status here and always push 938d435700fSSujith * up the frame up to let mac80211 handle the actual error case, be it no 939d435700fSSujith * decryption key or real decryption error. This let us keep statistics there. 940d435700fSSujith */ 941723e7113SFelix Fietkau static int ath9k_rx_skb_preprocess(struct ath_softc *sc, 9429f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 943d435700fSSujith struct ath_rx_status *rx_stats, 944d435700fSSujith struct ieee80211_rx_status *rx_status, 945d435700fSSujith bool *decrypt_error) 946d435700fSSujith { 947723e7113SFelix Fietkau struct ieee80211_hw *hw = sc->hw; 948723e7113SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 949723e7113SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 950723e7113SFelix Fietkau bool discard_current = sc->rx.discard_next; 951723e7113SFelix Fietkau 952723e7113SFelix Fietkau sc->rx.discard_next = rx_stats->rs_more; 953723e7113SFelix Fietkau if (discard_current) 954723e7113SFelix Fietkau return -EINVAL; 955f749b946SFelix Fietkau 956d435700fSSujith /* 957d435700fSSujith * everything but the rate is checked here, the rate check is done 958d435700fSSujith * separately to avoid doing two lookups for a rate for each frame. 959d435700fSSujith */ 9609f167f64SVasanthakumar Thiagarajan if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 961d435700fSSujith return -EINVAL; 962d435700fSSujith 9630d95521eSFelix Fietkau /* Only use status info from the last fragment */ 9640d95521eSFelix Fietkau if (rx_stats->rs_more) 9650d95521eSFelix Fietkau return 0; 9660d95521eSFelix Fietkau 9679f167f64SVasanthakumar Thiagarajan if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 968d435700fSSujith return -EINVAL; 969d435700fSSujith 97074a97755SSujith Manoharan ath9k_process_rssi(common, hw, hdr, rx_stats); 97174a97755SSujith Manoharan 972675a0b04SKarl Beldan rx_status->band = hw->conf.chandef.chan->band; 973675a0b04SKarl Beldan rx_status->freq = hw->conf.chandef.chan->center_freq; 974f749b946SFelix Fietkau rx_status->signal = ah->noise + rx_stats->rs_rssi; 975d435700fSSujith rx_status->antenna = rx_stats->rs_antenna; 97696d21371SThomas Pedersen rx_status->flag |= RX_FLAG_MACTIME_END; 9772ef16755SFelix Fietkau if (rx_stats->rs_moreaggr) 9782ef16755SFelix Fietkau rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 979d435700fSSujith 980723e7113SFelix Fietkau sc->rx.discard_next = false; 981d435700fSSujith return 0; 982d435700fSSujith } 983d435700fSSujith 984d435700fSSujith static void ath9k_rx_skb_postprocess(struct ath_common *common, 985d435700fSSujith struct sk_buff *skb, 986d435700fSSujith struct ath_rx_status *rx_stats, 987d435700fSSujith struct ieee80211_rx_status *rxs, 988d435700fSSujith bool decrypt_error) 989d435700fSSujith { 990d435700fSSujith struct ath_hw *ah = common->ah; 991d435700fSSujith struct ieee80211_hdr *hdr; 992d435700fSSujith int hdrlen, padpos, padsize; 993d435700fSSujith u8 keyix; 994d435700fSSujith __le16 fc; 995d435700fSSujith 996d435700fSSujith /* see if any padding is done by the hw and remove it */ 997d435700fSSujith hdr = (struct ieee80211_hdr *) skb->data; 998d435700fSSujith hdrlen = ieee80211_get_hdrlen_from_skb(skb); 999d435700fSSujith fc = hdr->frame_control; 1000c60c9929SFelix Fietkau padpos = ieee80211_hdrlen(fc); 1001d435700fSSujith 1002d435700fSSujith /* The MAC header is padded to have 32-bit boundary if the 1003d435700fSSujith * packet payload is non-zero. The general calculation for 1004d435700fSSujith * padsize would take into account odd header lengths: 1005d435700fSSujith * padsize = (4 - padpos % 4) % 4; However, since only 1006d435700fSSujith * even-length headers are used, padding can only be 0 or 2 1007d435700fSSujith * bytes and we can optimize this a bit. In addition, we must 1008d435700fSSujith * not try to remove padding from short control frames that do 1009d435700fSSujith * not have payload. */ 1010d435700fSSujith padsize = padpos & 3; 1011d435700fSSujith if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1012d435700fSSujith memmove(skb->data + padsize, skb->data, padpos); 1013d435700fSSujith skb_pull(skb, padsize); 1014d435700fSSujith } 1015d435700fSSujith 1016d435700fSSujith keyix = rx_stats->rs_keyix; 1017d435700fSSujith 1018d435700fSSujith if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1019d435700fSSujith ieee80211_has_protected(fc)) { 1020d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1021d435700fSSujith } else if (ieee80211_has_protected(fc) 1022d435700fSSujith && !decrypt_error && skb->len >= hdrlen + 4) { 1023d435700fSSujith keyix = skb->data[hdrlen + 3] >> 6; 1024d435700fSSujith 1025d435700fSSujith if (test_bit(keyix, common->keymap)) 1026d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1027d435700fSSujith } 1028d435700fSSujith if (ah->sw_mgmt_crypto && 1029d435700fSSujith (rxs->flag & RX_FLAG_DECRYPTED) && 1030d435700fSSujith ieee80211_is_mgmt(fc)) 1031d435700fSSujith /* Use software decrypt for management frames. */ 1032d435700fSSujith rxs->flag &= ~RX_FLAG_DECRYPTED; 1033d435700fSSujith } 1034b5c80475SFelix Fietkau 1035ab2e2fc8SSven Eckelmann #ifdef CONFIG_ATH9K_DEBUGFS 1036e93d083fSSimon Wunderlich static s8 fix_rssi_inv_only(u8 rssi_val) 1037e93d083fSSimon Wunderlich { 1038e93d083fSSimon Wunderlich if (rssi_val == 128) 1039e93d083fSSimon Wunderlich rssi_val = 0; 1040e93d083fSSimon Wunderlich return (s8) rssi_val; 1041e93d083fSSimon Wunderlich } 1042ab2e2fc8SSven Eckelmann #endif 1043e93d083fSSimon Wunderlich 10449b99e665SSimon Wunderlich /* returns 1 if this was a spectral frame, even if not handled. */ 10459b99e665SSimon Wunderlich static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, 1046e93d083fSSimon Wunderlich struct ath_rx_status *rs, u64 tsf) 1047e93d083fSSimon Wunderlich { 1048bd2ffe14SSven Eckelmann #ifdef CONFIG_ATH9K_DEBUGFS 1049e93d083fSSimon Wunderlich struct ath_hw *ah = sc->sc_ah; 1050e93d083fSSimon Wunderlich u8 bins[SPECTRAL_HT20_NUM_BINS]; 1051e93d083fSSimon Wunderlich u8 *vdata = (u8 *)hdr; 1052e93d083fSSimon Wunderlich struct fft_sample_ht20 fft_sample; 1053e93d083fSSimon Wunderlich struct ath_radar_info *radar_info; 1054e93d083fSSimon Wunderlich struct ath_ht20_mag_info *mag_info; 1055e93d083fSSimon Wunderlich int len = rs->rs_datalen; 10564ab0b0aaSSven Eckelmann int dc_pos; 105712824374SSven Eckelmann u16 length, max_magnitude; 1058e93d083fSSimon Wunderlich 1059e93d083fSSimon Wunderlich /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer 1060e93d083fSSimon Wunderlich * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT 1061e93d083fSSimon Wunderlich * yet, but this is supposed to be possible as well. 1062e93d083fSSimon Wunderlich */ 1063e93d083fSSimon Wunderlich if (rs->rs_phyerr != ATH9K_PHYERR_RADAR && 1064e93d083fSSimon Wunderlich rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT && 1065e93d083fSSimon Wunderlich rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL) 10669b99e665SSimon Wunderlich return 0; 10679b99e665SSimon Wunderlich 10689b99e665SSimon Wunderlich /* check if spectral scan bit is set. This does not have to be checked 10699b99e665SSimon Wunderlich * if received through a SPECTRAL phy error, but shouldn't hurt. 10709b99e665SSimon Wunderlich */ 10719b99e665SSimon Wunderlich radar_info = ((struct ath_radar_info *)&vdata[len]) - 1; 10729b99e665SSimon Wunderlich if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) 10739b99e665SSimon Wunderlich return 0; 1074e93d083fSSimon Wunderlich 1075e93d083fSSimon Wunderlich /* Variation in the data length is possible and will be fixed later. 1076e93d083fSSimon Wunderlich * Note that we only support HT20 for now. 1077e93d083fSSimon Wunderlich * 1078e93d083fSSimon Wunderlich * TODO: add HT20_40 support as well. 1079e93d083fSSimon Wunderlich */ 1080e93d083fSSimon Wunderlich if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) || 1081e93d083fSSimon Wunderlich (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1)) 10829b99e665SSimon Wunderlich return 1; 1083e93d083fSSimon Wunderlich 1084e93d083fSSimon Wunderlich fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20; 108512824374SSven Eckelmann length = sizeof(fft_sample) - sizeof(fft_sample.tlv); 108612824374SSven Eckelmann fft_sample.tlv.length = __cpu_to_be16(length); 1087e93d083fSSimon Wunderlich 10884ab0b0aaSSven Eckelmann fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq); 1089e93d083fSSimon Wunderlich fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); 1090e93d083fSSimon Wunderlich fft_sample.noise = ah->noise; 1091e93d083fSSimon Wunderlich 1092e93d083fSSimon Wunderlich switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) { 1093e93d083fSSimon Wunderlich case 0: 1094e93d083fSSimon Wunderlich /* length correct, nothing to do. */ 1095e93d083fSSimon Wunderlich memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS); 1096e93d083fSSimon Wunderlich break; 1097e93d083fSSimon Wunderlich case -1: 1098e93d083fSSimon Wunderlich /* first byte missing, duplicate it. */ 1099e93d083fSSimon Wunderlich memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1); 1100e93d083fSSimon Wunderlich bins[0] = vdata[0]; 1101e93d083fSSimon Wunderlich break; 1102e93d083fSSimon Wunderlich case 2: 1103e93d083fSSimon Wunderlich /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ 1104e93d083fSSimon Wunderlich memcpy(bins, vdata, 30); 1105e93d083fSSimon Wunderlich bins[30] = vdata[31]; 1106e93d083fSSimon Wunderlich memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31); 1107e93d083fSSimon Wunderlich break; 1108e93d083fSSimon Wunderlich case 1: 1109e93d083fSSimon Wunderlich /* MAC added 2 extra bytes AND first byte is missing. */ 1110e93d083fSSimon Wunderlich bins[0] = vdata[0]; 1111e93d083fSSimon Wunderlich memcpy(&bins[0], vdata, 30); 1112e93d083fSSimon Wunderlich bins[31] = vdata[31]; 1113e93d083fSSimon Wunderlich memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32); 1114e93d083fSSimon Wunderlich break; 1115e93d083fSSimon Wunderlich default: 11169b99e665SSimon Wunderlich return 1; 1117e93d083fSSimon Wunderlich } 1118e93d083fSSimon Wunderlich 1119e93d083fSSimon Wunderlich /* DC value (value in the middle) is the blind spot of the spectral 1120e93d083fSSimon Wunderlich * sample and invalid, interpolate it. 1121e93d083fSSimon Wunderlich */ 1122e93d083fSSimon Wunderlich dc_pos = SPECTRAL_HT20_NUM_BINS / 2; 1123e93d083fSSimon Wunderlich bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; 1124e93d083fSSimon Wunderlich 1125e93d083fSSimon Wunderlich /* mag data is at the end of the frame, in front of radar_info */ 1126e93d083fSSimon Wunderlich mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; 1127e93d083fSSimon Wunderlich 11284ab0b0aaSSven Eckelmann /* copy raw bins without scaling them */ 11294ab0b0aaSSven Eckelmann memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS); 11304ab0b0aaSSven Eckelmann fft_sample.max_exp = mag_info->max_exp & 0xf; 1131e93d083fSSimon Wunderlich 113212824374SSven Eckelmann max_magnitude = spectral_max_magnitude(mag_info->all_bins); 113312824374SSven Eckelmann fft_sample.max_magnitude = __cpu_to_be16(max_magnitude); 1134e93d083fSSimon Wunderlich fft_sample.max_index = spectral_max_index(mag_info->all_bins); 1135e93d083fSSimon Wunderlich fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins); 11364ab0b0aaSSven Eckelmann fft_sample.tsf = __cpu_to_be64(tsf); 1137e93d083fSSimon Wunderlich 1138e93d083fSSimon Wunderlich ath_debug_send_fft_sample(sc, &fft_sample.tlv); 11399b99e665SSimon Wunderlich return 1; 11409b99e665SSimon Wunderlich #else 11419b99e665SSimon Wunderlich return 0; 1142e93d083fSSimon Wunderlich #endif 1143e93d083fSSimon Wunderlich } 1144e93d083fSSimon Wunderlich 114521fbbca3SChristian Lamparter static void ath9k_apply_ampdu_details(struct ath_softc *sc, 114621fbbca3SChristian Lamparter struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) 114721fbbca3SChristian Lamparter { 114821fbbca3SChristian Lamparter if (rs->rs_isaggr) { 114921fbbca3SChristian Lamparter rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 115021fbbca3SChristian Lamparter 115121fbbca3SChristian Lamparter rxs->ampdu_reference = sc->rx.ampdu_ref; 115221fbbca3SChristian Lamparter 115321fbbca3SChristian Lamparter if (!rs->rs_moreaggr) { 115421fbbca3SChristian Lamparter rxs->flag |= RX_FLAG_AMPDU_IS_LAST; 115521fbbca3SChristian Lamparter sc->rx.ampdu_ref++; 115621fbbca3SChristian Lamparter } 115721fbbca3SChristian Lamparter 115821fbbca3SChristian Lamparter if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) 115921fbbca3SChristian Lamparter rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; 116021fbbca3SChristian Lamparter } 116121fbbca3SChristian Lamparter } 116221fbbca3SChristian Lamparter 1163*f6307ddaSSujith Manoharan static bool ath9k_is_mybeacon(struct ath_softc *sc, struct sk_buff *skb) 1164*f6307ddaSSujith Manoharan { 1165*f6307ddaSSujith Manoharan struct ath_hw *ah = sc->sc_ah; 1166*f6307ddaSSujith Manoharan struct ath_common *common = ath9k_hw_common(ah); 1167*f6307ddaSSujith Manoharan struct ieee80211_hdr *hdr; 1168*f6307ddaSSujith Manoharan 1169*f6307ddaSSujith Manoharan hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); 1170*f6307ddaSSujith Manoharan 1171*f6307ddaSSujith Manoharan if (ieee80211_is_beacon(hdr->frame_control)) { 1172*f6307ddaSSujith Manoharan RX_STAT_INC(rx_beacons); 1173*f6307ddaSSujith Manoharan if (!is_zero_ether_addr(common->curbssid) && 1174*f6307ddaSSujith Manoharan ether_addr_equal(hdr->addr3, common->curbssid)) 1175*f6307ddaSSujith Manoharan return true; 1176*f6307ddaSSujith Manoharan } 1177*f6307ddaSSujith Manoharan 1178*f6307ddaSSujith Manoharan return false; 1179*f6307ddaSSujith Manoharan } 1180*f6307ddaSSujith Manoharan 1181b5c80475SFelix Fietkau int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1182b5c80475SFelix Fietkau { 1183b5c80475SFelix Fietkau struct ath_buf *bf; 11840d95521eSFelix Fietkau struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1185b5c80475SFelix Fietkau struct ieee80211_rx_status *rxs; 1186b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 118716fe28e9SSujith Manoharan struct ath9k_hw_capabilities *pCap = &ah->caps; 1188b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 11897545daf4SFelix Fietkau struct ieee80211_hw *hw = sc->hw; 1190b5c80475SFelix Fietkau struct ieee80211_hdr *hdr; 1191b5c80475SFelix Fietkau int retval; 1192b5c80475SFelix Fietkau struct ath_rx_status rs; 1193b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype; 1194b5c80475SFelix Fietkau bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1195b5c80475SFelix Fietkau int dma_type; 1196a6d2055bSFelix Fietkau u64 tsf = 0; 1197a6d2055bSFelix Fietkau u32 tsf_lower = 0; 11988ab2cd09SLuis R. Rodriguez unsigned long flags; 11992e1cd495SFelix Fietkau dma_addr_t new_buf_addr; 1200b5c80475SFelix Fietkau 1201b5c80475SFelix Fietkau if (edma) 1202b5c80475SFelix Fietkau dma_type = DMA_BIDIRECTIONAL; 120356824223SMing Lei else 120456824223SMing Lei dma_type = DMA_FROM_DEVICE; 1205b5c80475SFelix Fietkau 1206b5c80475SFelix Fietkau qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1207b5c80475SFelix Fietkau 1208a6d2055bSFelix Fietkau tsf = ath9k_hw_gettsf64(ah); 1209a6d2055bSFelix Fietkau tsf_lower = tsf & 0xffffffff; 1210a6d2055bSFelix Fietkau 1211b5c80475SFelix Fietkau do { 1212e1352fdeSLorenzo Bianconi bool decrypt_error = false; 1213b5c80475SFelix Fietkau 1214b5c80475SFelix Fietkau memset(&rs, 0, sizeof(rs)); 1215b5c80475SFelix Fietkau if (edma) 1216b5c80475SFelix Fietkau bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1217b5c80475SFelix Fietkau else 1218b5c80475SFelix Fietkau bf = ath_get_next_rx_buf(sc, &rs); 1219b5c80475SFelix Fietkau 1220b5c80475SFelix Fietkau if (!bf) 1221b5c80475SFelix Fietkau break; 1222b5c80475SFelix Fietkau 1223b5c80475SFelix Fietkau skb = bf->bf_mpdu; 1224b5c80475SFelix Fietkau if (!skb) 1225b5c80475SFelix Fietkau continue; 1226b5c80475SFelix Fietkau 12270d95521eSFelix Fietkau /* 12280d95521eSFelix Fietkau * Take frame header from the first fragment and RX status from 12290d95521eSFelix Fietkau * the last one. 12300d95521eSFelix Fietkau */ 12310d95521eSFelix Fietkau if (sc->rx.frag) 12320d95521eSFelix Fietkau hdr_skb = sc->rx.frag; 12330d95521eSFelix Fietkau else 12340d95521eSFelix Fietkau hdr_skb = skb; 12350d95521eSFelix Fietkau 1236*f6307ddaSSujith Manoharan rs.is_mybeacon = ath9k_is_mybeacon(sc, hdr_skb); 1237*f6307ddaSSujith Manoharan 1238*f6307ddaSSujith Manoharan hdr = (struct ieee80211_hdr *) (hdr_skb->data + 1239*f6307ddaSSujith Manoharan ah->caps.rx_status_len); 12405ca42627SLuis R. Rodriguez 1241be41b052SMohammed Shafi Shajakhan if (ieee80211_is_data_present(hdr->frame_control) && 1242be41b052SMohammed Shafi Shajakhan !ieee80211_is_qos_nullfunc(hdr->frame_control)) 12436995fb80SRajkumar Manoharan sc->rx.num_pkts++; 1244be41b052SMohammed Shafi Shajakhan 124529bffa96SFelix Fietkau ath_debug_stat_rx(sc, &rs); 12461395d3f0SSujith 1247*f6307ddaSSujith Manoharan rxs = IEEE80211_SKB_RXCB(hdr_skb); 1248ffb1c56aSAshok Nagarajan memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1249ffb1c56aSAshok Nagarajan 1250a6d2055bSFelix Fietkau rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1251a6d2055bSFelix Fietkau if (rs.rs_tstamp > tsf_lower && 1252a6d2055bSFelix Fietkau unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1253a6d2055bSFelix Fietkau rxs->mactime -= 0x100000000ULL; 1254a6d2055bSFelix Fietkau 1255a6d2055bSFelix Fietkau if (rs.rs_tstamp < tsf_lower && 1256a6d2055bSFelix Fietkau unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1257a6d2055bSFelix Fietkau rxs->mactime += 0x100000000ULL; 1258a6d2055bSFelix Fietkau 125973e4937dSZefir Kurtisi if (rs.rs_phyerr == ATH9K_PHYERR_RADAR) 126073e4937dSZefir Kurtisi ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime); 126173e4937dSZefir Kurtisi 12629b99e665SSimon Wunderlich if (rs.rs_status & ATH9K_RXERR_PHY) { 12639b99e665SSimon Wunderlich if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) { 12649b99e665SSimon Wunderlich RX_STAT_INC(rx_spectral); 12659b99e665SSimon Wunderlich goto requeue_drop_frag; 12669b99e665SSimon Wunderlich } 12679b99e665SSimon Wunderlich } 1268e93d083fSSimon Wunderlich 1269723e7113SFelix Fietkau retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs, 1270723e7113SFelix Fietkau &decrypt_error); 127183c76570SZefir Kurtisi if (retval) 127283c76570SZefir Kurtisi goto requeue_drop_frag; 127383c76570SZefir Kurtisi 127401e18918SRajkumar Manoharan if (rs.is_mybeacon) { 127501e18918SRajkumar Manoharan sc->hw_busy_count = 0; 127601e18918SRajkumar Manoharan ath_start_rx_poll(sc, 3); 127701e18918SRajkumar Manoharan } 1278203c4805SLuis R. Rodriguez /* Ensure we always have an skb to requeue once we are done 1279203c4805SLuis R. Rodriguez * processing the current buffer's skb */ 1280cc861f74SLuis R. Rodriguez requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1281203c4805SLuis R. Rodriguez 1282203c4805SLuis R. Rodriguez /* If there is no memory we ignore the current RX'd frame, 1283203c4805SLuis R. Rodriguez * tell hardware it can give us a new frame using the old 1284203c4805SLuis R. Rodriguez * skb and put it at the tail of the sc->rx.rxbuf list for 1285203c4805SLuis R. Rodriguez * processing. */ 128615072189SBen Greear if (!requeue_skb) { 128715072189SBen Greear RX_STAT_INC(rx_oom_err); 12880d95521eSFelix Fietkau goto requeue_drop_frag; 128915072189SBen Greear } 1290203c4805SLuis R. Rodriguez 12912e1cd495SFelix Fietkau /* We will now give hardware our shiny new allocated skb */ 12922e1cd495SFelix Fietkau new_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 12932e1cd495SFelix Fietkau common->rx_bufsize, dma_type); 12942e1cd495SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) { 12952e1cd495SFelix Fietkau dev_kfree_skb_any(requeue_skb); 12962e1cd495SFelix Fietkau goto requeue_drop_frag; 12972e1cd495SFelix Fietkau } 12982e1cd495SFelix Fietkau 1299203c4805SLuis R. Rodriguez /* Unmap the frame */ 1300203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 13012e1cd495SFelix Fietkau common->rx_bufsize, dma_type); 1302203c4805SLuis R. Rodriguez 1303176f0e84SSujith Manoharan bf->bf_mpdu = requeue_skb; 1304176f0e84SSujith Manoharan bf->bf_buf_addr = new_buf_addr; 1305176f0e84SSujith Manoharan 1306b5c80475SFelix Fietkau skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1307b5c80475SFelix Fietkau if (ah->caps.rx_status_len) 1308b5c80475SFelix Fietkau skb_pull(skb, ah->caps.rx_status_len); 1309203c4805SLuis R. Rodriguez 13100d95521eSFelix Fietkau if (!rs.rs_more) 13110d95521eSFelix Fietkau ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1312c9b14170SLuis R. Rodriguez rxs, decrypt_error); 1313203c4805SLuis R. Rodriguez 13140d95521eSFelix Fietkau if (rs.rs_more) { 131515072189SBen Greear RX_STAT_INC(rx_frags); 13160d95521eSFelix Fietkau /* 13170d95521eSFelix Fietkau * rs_more indicates chained descriptors which can be 13180d95521eSFelix Fietkau * used to link buffers together for a sort of 13190d95521eSFelix Fietkau * scatter-gather operation. 13200d95521eSFelix Fietkau */ 13210d95521eSFelix Fietkau if (sc->rx.frag) { 13220d95521eSFelix Fietkau /* too many fragments - cannot handle frame */ 13230d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 13240d95521eSFelix Fietkau dev_kfree_skb_any(skb); 132515072189SBen Greear RX_STAT_INC(rx_too_many_frags_err); 13260d95521eSFelix Fietkau skb = NULL; 13270d95521eSFelix Fietkau } 13280d95521eSFelix Fietkau sc->rx.frag = skb; 13290d95521eSFelix Fietkau goto requeue; 13300d95521eSFelix Fietkau } 13313747c3eeSFelix Fietkau if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC) 13323747c3eeSFelix Fietkau goto requeue_drop_frag; 13330d95521eSFelix Fietkau 13340d95521eSFelix Fietkau if (sc->rx.frag) { 13350d95521eSFelix Fietkau int space = skb->len - skb_tailroom(hdr_skb); 13360d95521eSFelix Fietkau 13370d95521eSFelix Fietkau if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 13380d95521eSFelix Fietkau dev_kfree_skb(skb); 133915072189SBen Greear RX_STAT_INC(rx_oom_err); 13400d95521eSFelix Fietkau goto requeue_drop_frag; 13410d95521eSFelix Fietkau } 13420d95521eSFelix Fietkau 1343b5447ff9SEric Dumazet sc->rx.frag = NULL; 1344b5447ff9SEric Dumazet 13450d95521eSFelix Fietkau skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 13460d95521eSFelix Fietkau skb->len); 13470d95521eSFelix Fietkau dev_kfree_skb_any(skb); 13480d95521eSFelix Fietkau skb = hdr_skb; 13490d95521eSFelix Fietkau } 13500d95521eSFelix Fietkau 135166760eacSFelix Fietkau if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 135266760eacSFelix Fietkau skb_trim(skb, skb->len - 8); 135366760eacSFelix Fietkau 13548ab2cd09SLuis R. Rodriguez spin_lock_irqsave(&sc->sc_pm_lock, flags); 1355aaef24b4SMohammed Shafi Shajakhan if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 13561b04b930SSujith PS_WAIT_FOR_CAB | 1357aaef24b4SMohammed Shafi Shajakhan PS_WAIT_FOR_PSPOLL_DATA)) || 1358cedc7e3dSMohammed Shafi Shajakhan ath9k_check_auto_sleep(sc)) 1359f73c604cSRajkumar Manoharan ath_rx_ps(sc, skb, rs.is_mybeacon); 13608ab2cd09SLuis R. Rodriguez spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1361cc65965cSJouni Malinen 136216fe28e9SSujith Manoharan /* 136316fe28e9SSujith Manoharan * Run the LNA combining algorithm only in these cases: 136416fe28e9SSujith Manoharan * 136516fe28e9SSujith Manoharan * Standalone WLAN cards with both LNA/Antenna diversity 136616fe28e9SSujith Manoharan * enabled in the EEPROM. 136716fe28e9SSujith Manoharan * 136816fe28e9SSujith Manoharan * WLAN+BT cards which are in the supported card list 136916fe28e9SSujith Manoharan * in ath_pci_id_table and the user has loaded the 137016fe28e9SSujith Manoharan * driver with "bt_ant_diversity" set to true. 137116fe28e9SSujith Manoharan */ 137216fe28e9SSujith Manoharan if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 137316fe28e9SSujith Manoharan /* 137416fe28e9SSujith Manoharan * Change the default rx antenna if rx diversity 137516fe28e9SSujith Manoharan * chooses the other antenna 3 times in a row. 137616fe28e9SSujith Manoharan */ 137716fe28e9SSujith Manoharan if (sc->rx.defant != rs.rs_antenna) { 137816fe28e9SSujith Manoharan if (++sc->rx.rxotherant >= 3) 137916fe28e9SSujith Manoharan ath_setdefantenna(sc, rs.rs_antenna); 138016fe28e9SSujith Manoharan } else { 138116fe28e9SSujith Manoharan sc->rx.rxotherant = 0; 138216fe28e9SSujith Manoharan } 138316fe28e9SSujith Manoharan 138416fe28e9SSujith Manoharan if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { 138516fe28e9SSujith Manoharan if (common->bt_ant_diversity) 1386102885a5SVasanthakumar Thiagarajan ath_ant_comb_scan(sc, &rs); 138716fe28e9SSujith Manoharan } else { 138816fe28e9SSujith Manoharan ath_ant_comb_scan(sc, &rs); 138916fe28e9SSujith Manoharan } 139016fe28e9SSujith Manoharan } 1391102885a5SVasanthakumar Thiagarajan 139221fbbca3SChristian Lamparter ath9k_apply_ampdu_details(sc, &rs, rxs); 139321fbbca3SChristian Lamparter 13947545daf4SFelix Fietkau ieee80211_rx(hw, skb); 1395cc65965cSJouni Malinen 13960d95521eSFelix Fietkau requeue_drop_frag: 13970d95521eSFelix Fietkau if (sc->rx.frag) { 13980d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 13990d95521eSFelix Fietkau sc->rx.frag = NULL; 14000d95521eSFelix Fietkau } 1401203c4805SLuis R. Rodriguez requeue: 1402b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 1403a3dc48e8SFelix Fietkau if (flush) 1404a3dc48e8SFelix Fietkau continue; 1405a3dc48e8SFelix Fietkau 1406a3dc48e8SFelix Fietkau if (edma) { 1407b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 1408b5c80475SFelix Fietkau } else { 1409e96542e5SFelix Fietkau ath_rx_buf_relink(sc, bf); 141095294973SFelix Fietkau ath9k_hw_rxena(ah); 1411b5c80475SFelix Fietkau } 1412203c4805SLuis R. Rodriguez } while (1); 1413203c4805SLuis R. Rodriguez 141429ab0b36SRajkumar Manoharan if (!(ah->imask & ATH9K_INT_RXEOL)) { 141529ab0b36SRajkumar Manoharan ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 141672d874c6SFelix Fietkau ath9k_hw_set_interrupts(ah); 141729ab0b36SRajkumar Manoharan } 141829ab0b36SRajkumar Manoharan 1419203c4805SLuis R. Rodriguez return 0; 1420203c4805SLuis R. Rodriguez } 1421