1203c4805SLuis R. Rodriguez /* 25b68138eSSujith Manoharan * Copyright (c) 2008-2011 Atheros Communications Inc. 3203c4805SLuis R. Rodriguez * 4203c4805SLuis R. Rodriguez * Permission to use, copy, modify, and/or distribute this software for any 5203c4805SLuis R. Rodriguez * purpose with or without fee is hereby granted, provided that the above 6203c4805SLuis R. Rodriguez * copyright notice and this permission notice appear in all copies. 7203c4805SLuis R. Rodriguez * 8203c4805SLuis R. Rodriguez * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9203c4805SLuis R. Rodriguez * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10203c4805SLuis R. Rodriguez * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11203c4805SLuis R. Rodriguez * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12203c4805SLuis R. Rodriguez * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13203c4805SLuis R. Rodriguez * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14203c4805SLuis R. Rodriguez * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15203c4805SLuis R. Rodriguez */ 16203c4805SLuis R. Rodriguez 17b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h> 18e93d083fSSimon Wunderlich #include <linux/relay.h> 19203c4805SLuis R. Rodriguez #include "ath9k.h" 20b622a720SLuis R. Rodriguez #include "ar9003_mac.h" 21203c4805SLuis R. Rodriguez 221a04d59dSFelix Fietkau #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb)) 23b5c80475SFelix Fietkau 24ededf1f8SVasanthakumar Thiagarajan static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 25ededf1f8SVasanthakumar Thiagarajan { 26ededf1f8SVasanthakumar Thiagarajan return sc->ps_enabled && 27ededf1f8SVasanthakumar Thiagarajan (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 28ededf1f8SVasanthakumar Thiagarajan } 29ededf1f8SVasanthakumar Thiagarajan 30203c4805SLuis R. Rodriguez /* 31203c4805SLuis R. Rodriguez * Setup and link descriptors. 32203c4805SLuis R. Rodriguez * 33203c4805SLuis R. Rodriguez * 11N: we can no longer afford to self link the last descriptor. 34203c4805SLuis R. Rodriguez * MAC acknowledges BA status as long as it copies frames to host 35203c4805SLuis R. Rodriguez * buffer (or rx fifo). This can incorrectly acknowledge packets 36203c4805SLuis R. Rodriguez * to a sender if last desc is self-linked. 37203c4805SLuis R. Rodriguez */ 381a04d59dSFelix Fietkau static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf) 39203c4805SLuis R. Rodriguez { 40203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 41cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 42203c4805SLuis R. Rodriguez struct ath_desc *ds; 43203c4805SLuis R. Rodriguez struct sk_buff *skb; 44203c4805SLuis R. Rodriguez 45203c4805SLuis R. Rodriguez ds = bf->bf_desc; 46203c4805SLuis R. Rodriguez ds->ds_link = 0; /* link to null */ 47203c4805SLuis R. Rodriguez ds->ds_data = bf->bf_buf_addr; 48203c4805SLuis R. Rodriguez 49203c4805SLuis R. Rodriguez /* virtual addr of the beginning of the buffer. */ 50203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 519680e8a3SLuis R. Rodriguez BUG_ON(skb == NULL); 52203c4805SLuis R. Rodriguez ds->ds_vdata = skb->data; 53203c4805SLuis R. Rodriguez 54cc861f74SLuis R. Rodriguez /* 55cc861f74SLuis R. Rodriguez * setup rx descriptors. The rx_bufsize here tells the hardware 56203c4805SLuis R. Rodriguez * how much data it can DMA to us and that we are prepared 57cc861f74SLuis R. Rodriguez * to process 58cc861f74SLuis R. Rodriguez */ 59203c4805SLuis R. Rodriguez ath9k_hw_setuprxdesc(ah, ds, 60cc861f74SLuis R. Rodriguez common->rx_bufsize, 61203c4805SLuis R. Rodriguez 0); 62203c4805SLuis R. Rodriguez 63203c4805SLuis R. Rodriguez if (sc->rx.rxlink == NULL) 64203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 65203c4805SLuis R. Rodriguez else 66203c4805SLuis R. Rodriguez *sc->rx.rxlink = bf->bf_daddr; 67203c4805SLuis R. Rodriguez 68203c4805SLuis R. Rodriguez sc->rx.rxlink = &ds->ds_link; 69203c4805SLuis R. Rodriguez } 70203c4805SLuis R. Rodriguez 711a04d59dSFelix Fietkau static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf) 72e96542e5SFelix Fietkau { 73e96542e5SFelix Fietkau if (sc->rx.buf_hold) 74e96542e5SFelix Fietkau ath_rx_buf_link(sc, sc->rx.buf_hold); 75e96542e5SFelix Fietkau 76e96542e5SFelix Fietkau sc->rx.buf_hold = bf; 77e96542e5SFelix Fietkau } 78e96542e5SFelix Fietkau 79203c4805SLuis R. Rodriguez static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 80203c4805SLuis R. Rodriguez { 81203c4805SLuis R. Rodriguez /* XXX block beacon interrupts */ 82203c4805SLuis R. Rodriguez ath9k_hw_setantenna(sc->sc_ah, antenna); 83203c4805SLuis R. Rodriguez sc->rx.defant = antenna; 84203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 85203c4805SLuis R. Rodriguez } 86203c4805SLuis R. Rodriguez 87203c4805SLuis R. Rodriguez static void ath_opmode_init(struct ath_softc *sc) 88203c4805SLuis R. Rodriguez { 89203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 901510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 911510718dSLuis R. Rodriguez 92203c4805SLuis R. Rodriguez u32 rfilt, mfilt[2]; 93203c4805SLuis R. Rodriguez 94203c4805SLuis R. Rodriguez /* configure rx filter */ 95203c4805SLuis R. Rodriguez rfilt = ath_calcrxfilter(sc); 96203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, rfilt); 97203c4805SLuis R. Rodriguez 98203c4805SLuis R. Rodriguez /* configure bssid mask */ 9913b81559SLuis R. Rodriguez ath_hw_setbssidmask(common); 100203c4805SLuis R. Rodriguez 101203c4805SLuis R. Rodriguez /* configure operational mode */ 102203c4805SLuis R. Rodriguez ath9k_hw_setopmode(ah); 103203c4805SLuis R. Rodriguez 104203c4805SLuis R. Rodriguez /* calculate and install multicast filter */ 105203c4805SLuis R. Rodriguez mfilt[0] = mfilt[1] = ~0; 106203c4805SLuis R. Rodriguez ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 107203c4805SLuis R. Rodriguez } 108203c4805SLuis R. Rodriguez 109b5c80475SFelix Fietkau static bool ath_rx_edma_buf_link(struct ath_softc *sc, 110b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 111b5c80475SFelix Fietkau { 112b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 113b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 114b5c80475SFelix Fietkau struct sk_buff *skb; 1151a04d59dSFelix Fietkau struct ath_rxbuf *bf; 116b5c80475SFelix Fietkau 117b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 118b5c80475SFelix Fietkau if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 119b5c80475SFelix Fietkau return false; 120b5c80475SFelix Fietkau 1211a04d59dSFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); 122b5c80475SFelix Fietkau list_del_init(&bf->list); 123b5c80475SFelix Fietkau 124b5c80475SFelix Fietkau skb = bf->bf_mpdu; 125b5c80475SFelix Fietkau 126b5c80475SFelix Fietkau memset(skb->data, 0, ah->caps.rx_status_len); 127b5c80475SFelix Fietkau dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 128b5c80475SFelix Fietkau ah->caps.rx_status_len, DMA_TO_DEVICE); 129b5c80475SFelix Fietkau 130b5c80475SFelix Fietkau SKB_CB_ATHBUF(skb) = bf; 131b5c80475SFelix Fietkau ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 13207236bf3SSujith Manoharan __skb_queue_tail(&rx_edma->rx_fifo, skb); 133b5c80475SFelix Fietkau 134b5c80475SFelix Fietkau return true; 135b5c80475SFelix Fietkau } 136b5c80475SFelix Fietkau 137b5c80475SFelix Fietkau static void ath_rx_addbuffer_edma(struct ath_softc *sc, 1387a897203SSujith Manoharan enum ath9k_rx_qtype qtype) 139b5c80475SFelix Fietkau { 140b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1411a04d59dSFelix Fietkau struct ath_rxbuf *bf, *tbf; 142b5c80475SFelix Fietkau 143b5c80475SFelix Fietkau if (list_empty(&sc->rx.rxbuf)) { 144d2182b69SJoe Perches ath_dbg(common, QUEUE, "No free rx buf available\n"); 145b5c80475SFelix Fietkau return; 146b5c80475SFelix Fietkau } 147b5c80475SFelix Fietkau 1486a01f0c0SMohammed Shafi Shajakhan list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 149b5c80475SFelix Fietkau if (!ath_rx_edma_buf_link(sc, qtype)) 150b5c80475SFelix Fietkau break; 151b5c80475SFelix Fietkau 152b5c80475SFelix Fietkau } 153b5c80475SFelix Fietkau 154b5c80475SFelix Fietkau static void ath_rx_remove_buffer(struct ath_softc *sc, 155b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 156b5c80475SFelix Fietkau { 1571a04d59dSFelix Fietkau struct ath_rxbuf *bf; 158b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 159b5c80475SFelix Fietkau struct sk_buff *skb; 160b5c80475SFelix Fietkau 161b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 162b5c80475SFelix Fietkau 16307236bf3SSujith Manoharan while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 164b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 165b5c80475SFelix Fietkau BUG_ON(!bf); 166b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 167b5c80475SFelix Fietkau } 168b5c80475SFelix Fietkau } 169b5c80475SFelix Fietkau 170b5c80475SFelix Fietkau static void ath_rx_edma_cleanup(struct ath_softc *sc) 171b5c80475SFelix Fietkau { 172ba542385SMohammed Shafi Shajakhan struct ath_hw *ah = sc->sc_ah; 173ba542385SMohammed Shafi Shajakhan struct ath_common *common = ath9k_hw_common(ah); 1741a04d59dSFelix Fietkau struct ath_rxbuf *bf; 175b5c80475SFelix Fietkau 176b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 177b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 178b5c80475SFelix Fietkau 179b5c80475SFelix Fietkau list_for_each_entry(bf, &sc->rx.rxbuf, list) { 180ba542385SMohammed Shafi Shajakhan if (bf->bf_mpdu) { 181ba542385SMohammed Shafi Shajakhan dma_unmap_single(sc->dev, bf->bf_buf_addr, 182ba542385SMohammed Shafi Shajakhan common->rx_bufsize, 183ba542385SMohammed Shafi Shajakhan DMA_BIDIRECTIONAL); 184b5c80475SFelix Fietkau dev_kfree_skb_any(bf->bf_mpdu); 185ba542385SMohammed Shafi Shajakhan bf->bf_buf_addr = 0; 186ba542385SMohammed Shafi Shajakhan bf->bf_mpdu = NULL; 187ba542385SMohammed Shafi Shajakhan } 188b5c80475SFelix Fietkau } 189b5c80475SFelix Fietkau } 190b5c80475SFelix Fietkau 191b5c80475SFelix Fietkau static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 192b5c80475SFelix Fietkau { 1935d07cca2SSujith Manoharan __skb_queue_head_init(&rx_edma->rx_fifo); 194b5c80475SFelix Fietkau rx_edma->rx_fifo_hwsize = size; 195b5c80475SFelix Fietkau } 196b5c80475SFelix Fietkau 197b5c80475SFelix Fietkau static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 198b5c80475SFelix Fietkau { 199b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 200b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 201b5c80475SFelix Fietkau struct sk_buff *skb; 2021a04d59dSFelix Fietkau struct ath_rxbuf *bf; 203b5c80475SFelix Fietkau int error = 0, i; 204b5c80475SFelix Fietkau u32 size; 205b5c80475SFelix Fietkau 206b5c80475SFelix Fietkau ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 207b5c80475SFelix Fietkau ah->caps.rx_status_len); 208b5c80475SFelix Fietkau 209b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 210b5c80475SFelix Fietkau ah->caps.rx_lp_qdepth); 211b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 212b5c80475SFelix Fietkau ah->caps.rx_hp_qdepth); 213b5c80475SFelix Fietkau 2141a04d59dSFelix Fietkau size = sizeof(struct ath_rxbuf) * nbufs; 215b81950b1SFelix Fietkau bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); 216b5c80475SFelix Fietkau if (!bf) 217b5c80475SFelix Fietkau return -ENOMEM; 218b5c80475SFelix Fietkau 219b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 220b5c80475SFelix Fietkau 221b5c80475SFelix Fietkau for (i = 0; i < nbufs; i++, bf++) { 222b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 223b5c80475SFelix Fietkau if (!skb) { 224b5c80475SFelix Fietkau error = -ENOMEM; 225b5c80475SFelix Fietkau goto rx_init_fail; 226b5c80475SFelix Fietkau } 227b5c80475SFelix Fietkau 228b5c80475SFelix Fietkau memset(skb->data, 0, common->rx_bufsize); 229b5c80475SFelix Fietkau bf->bf_mpdu = skb; 230b5c80475SFelix Fietkau 231b5c80475SFelix Fietkau bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 232b5c80475SFelix Fietkau common->rx_bufsize, 233b5c80475SFelix Fietkau DMA_BIDIRECTIONAL); 234b5c80475SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, 235b5c80475SFelix Fietkau bf->bf_buf_addr))) { 236b5c80475SFelix Fietkau dev_kfree_skb_any(skb); 237b5c80475SFelix Fietkau bf->bf_mpdu = NULL; 2386cf9e995SBen Greear bf->bf_buf_addr = 0; 2393800276aSJoe Perches ath_err(common, 240b5c80475SFelix Fietkau "dma_mapping_error() on RX init\n"); 241b5c80475SFelix Fietkau error = -ENOMEM; 242b5c80475SFelix Fietkau goto rx_init_fail; 243b5c80475SFelix Fietkau } 244b5c80475SFelix Fietkau 245b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 246b5c80475SFelix Fietkau } 247b5c80475SFelix Fietkau 248b5c80475SFelix Fietkau return 0; 249b5c80475SFelix Fietkau 250b5c80475SFelix Fietkau rx_init_fail: 251b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 252b5c80475SFelix Fietkau return error; 253b5c80475SFelix Fietkau } 254b5c80475SFelix Fietkau 255b5c80475SFelix Fietkau static void ath_edma_start_recv(struct ath_softc *sc) 256b5c80475SFelix Fietkau { 257b5c80475SFelix Fietkau ath9k_hw_rxena(sc->sc_ah); 2587a897203SSujith Manoharan ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP); 2597a897203SSujith Manoharan ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP); 260b5c80475SFelix Fietkau ath_opmode_init(sc); 2614cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 262b5c80475SFelix Fietkau } 263b5c80475SFelix Fietkau 264b5c80475SFelix Fietkau static void ath_edma_stop_recv(struct ath_softc *sc) 265b5c80475SFelix Fietkau { 266b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 267b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 268b5c80475SFelix Fietkau } 269b5c80475SFelix Fietkau 270203c4805SLuis R. Rodriguez int ath_rx_init(struct ath_softc *sc, int nbufs) 271203c4805SLuis R. Rodriguez { 27227c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 273203c4805SLuis R. Rodriguez struct sk_buff *skb; 2741a04d59dSFelix Fietkau struct ath_rxbuf *bf; 275203c4805SLuis R. Rodriguez int error = 0; 276203c4805SLuis R. Rodriguez 2774bdd1e97SLuis R. Rodriguez spin_lock_init(&sc->sc_pcu_lock); 278203c4805SLuis R. Rodriguez 2790d95521eSFelix Fietkau common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 2800d95521eSFelix Fietkau sc->sc_ah->caps.rx_status_len; 2810d95521eSFelix Fietkau 282e87f3d53SSujith Manoharan if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 283b5c80475SFelix Fietkau return ath_rx_edma_init(sc, nbufs); 284e87f3d53SSujith Manoharan 285d2182b69SJoe Perches ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 286cc861f74SLuis R. Rodriguez common->cachelsz, common->rx_bufsize); 287203c4805SLuis R. Rodriguez 288203c4805SLuis R. Rodriguez /* Initialize rx descriptors */ 289203c4805SLuis R. Rodriguez 290203c4805SLuis R. Rodriguez error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 2914adfcdedSVasanthakumar Thiagarajan "rx", nbufs, 1, 0); 292203c4805SLuis R. Rodriguez if (error != 0) { 2933800276aSJoe Perches ath_err(common, 294b5c80475SFelix Fietkau "failed to allocate rx descriptors: %d\n", 295b5c80475SFelix Fietkau error); 296203c4805SLuis R. Rodriguez goto err; 297203c4805SLuis R. Rodriguez } 298203c4805SLuis R. Rodriguez 299203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 300b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, 301b5c80475SFelix Fietkau GFP_KERNEL); 302203c4805SLuis R. Rodriguez if (skb == NULL) { 303203c4805SLuis R. Rodriguez error = -ENOMEM; 304203c4805SLuis R. Rodriguez goto err; 305203c4805SLuis R. Rodriguez } 306203c4805SLuis R. Rodriguez 307203c4805SLuis R. Rodriguez bf->bf_mpdu = skb; 308203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 309cc861f74SLuis R. Rodriguez common->rx_bufsize, 310203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 311203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 312203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 313203c4805SLuis R. Rodriguez dev_kfree_skb_any(skb); 314203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 3156cf9e995SBen Greear bf->bf_buf_addr = 0; 3163800276aSJoe Perches ath_err(common, 317203c4805SLuis R. Rodriguez "dma_mapping_error() on RX init\n"); 318203c4805SLuis R. Rodriguez error = -ENOMEM; 319203c4805SLuis R. Rodriguez goto err; 320203c4805SLuis R. Rodriguez } 321203c4805SLuis R. Rodriguez } 322203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 323203c4805SLuis R. Rodriguez err: 324203c4805SLuis R. Rodriguez if (error) 325203c4805SLuis R. Rodriguez ath_rx_cleanup(sc); 326203c4805SLuis R. Rodriguez 327203c4805SLuis R. Rodriguez return error; 328203c4805SLuis R. Rodriguez } 329203c4805SLuis R. Rodriguez 330203c4805SLuis R. Rodriguez void ath_rx_cleanup(struct ath_softc *sc) 331203c4805SLuis R. Rodriguez { 332cc861f74SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 333cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 334203c4805SLuis R. Rodriguez struct sk_buff *skb; 3351a04d59dSFelix Fietkau struct ath_rxbuf *bf; 336203c4805SLuis R. Rodriguez 337b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 338b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 339b5c80475SFelix Fietkau return; 340e87f3d53SSujith Manoharan } 341e87f3d53SSujith Manoharan 342203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 343203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 344203c4805SLuis R. Rodriguez if (skb) { 345203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 346b5c80475SFelix Fietkau common->rx_bufsize, 347b5c80475SFelix Fietkau DMA_FROM_DEVICE); 348203c4805SLuis R. Rodriguez dev_kfree_skb(skb); 3496cf9e995SBen Greear bf->bf_buf_addr = 0; 3506cf9e995SBen Greear bf->bf_mpdu = NULL; 351203c4805SLuis R. Rodriguez } 352203c4805SLuis R. Rodriguez } 353203c4805SLuis R. Rodriguez } 354203c4805SLuis R. Rodriguez 355203c4805SLuis R. Rodriguez /* 356203c4805SLuis R. Rodriguez * Calculate the receive filter according to the 357203c4805SLuis R. Rodriguez * operating mode and state: 358203c4805SLuis R. Rodriguez * 359203c4805SLuis R. Rodriguez * o always accept unicast, broadcast, and multicast traffic 360203c4805SLuis R. Rodriguez * o maintain current state of phy error reception (the hal 361203c4805SLuis R. Rodriguez * may enable phy error frames for noise immunity work) 362203c4805SLuis R. Rodriguez * o probe request frames are accepted only when operating in 363203c4805SLuis R. Rodriguez * hostap, adhoc, or monitor modes 364203c4805SLuis R. Rodriguez * o enable promiscuous mode according to the interface state 365203c4805SLuis R. Rodriguez * o accept beacons: 366203c4805SLuis R. Rodriguez * - when operating in adhoc mode so the 802.11 layer creates 367203c4805SLuis R. Rodriguez * node table entries for peers, 368203c4805SLuis R. Rodriguez * - when operating in station mode for collecting rssi data when 369203c4805SLuis R. Rodriguez * the station is otherwise quiet, or 370203c4805SLuis R. Rodriguez * - when operating as a repeater so we see repeater-sta beacons 371203c4805SLuis R. Rodriguez * - when scanning 372203c4805SLuis R. Rodriguez */ 373203c4805SLuis R. Rodriguez 374203c4805SLuis R. Rodriguez u32 ath_calcrxfilter(struct ath_softc *sc) 375203c4805SLuis R. Rodriguez { 376203c4805SLuis R. Rodriguez u32 rfilt; 377203c4805SLuis R. Rodriguez 378*89f927afSLuis R. Rodriguez if (config_enabled(CONFIG_ATH9K_TX99)) 379*89f927afSLuis R. Rodriguez return 0; 380*89f927afSLuis R. Rodriguez 381ac06697cSFelix Fietkau rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 382203c4805SLuis R. Rodriguez | ATH9K_RX_FILTER_MCAST; 383203c4805SLuis R. Rodriguez 38473e4937dSZefir Kurtisi /* if operating on a DFS channel, enable radar pulse detection */ 38573e4937dSZefir Kurtisi if (sc->hw->conf.radar_enabled) 38673e4937dSZefir Kurtisi rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; 38773e4937dSZefir Kurtisi 3889c1d8e4aSJouni Malinen if (sc->rx.rxfilter & FIF_PROBE_REQ) 389203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROBEREQ; 390203c4805SLuis R. Rodriguez 391203c4805SLuis R. Rodriguez /* 392203c4805SLuis R. Rodriguez * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 393203c4805SLuis R. Rodriguez * mode interface or when in monitor mode. AP mode does not need this 394203c4805SLuis R. Rodriguez * since it receives all in-BSS frames anyway. 395203c4805SLuis R. Rodriguez */ 3962e286947SFelix Fietkau if (sc->sc_ah->is_monitoring) 397203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROM; 398203c4805SLuis R. Rodriguez 399203c4805SLuis R. Rodriguez if (sc->rx.rxfilter & FIF_CONTROL) 400203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_CONTROL; 401203c4805SLuis R. Rodriguez 402203c4805SLuis R. Rodriguez if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 403cfda6695SBen Greear (sc->nvifs <= 1) && 404203c4805SLuis R. Rodriguez !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 405203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MYBEACON; 406203c4805SLuis R. Rodriguez else 407203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_BEACON; 408203c4805SLuis R. Rodriguez 409264bbec8SFelix Fietkau if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 41066afad01SSenthil Balasubramanian (sc->rx.rxfilter & FIF_PSPOLL)) 411203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PSPOLL; 412203c4805SLuis R. Rodriguez 4137ea310beSSujith if (conf_is_ht(&sc->hw->conf)) 4147ea310beSSujith rfilt |= ATH9K_RX_FILTER_COMP_BAR; 4157ea310beSSujith 4167545daf4SFelix Fietkau if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 417a549459cSThomas Wagner /* This is needed for older chips */ 418a549459cSThomas Wagner if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 4195eb6ba83SJavier Cardona rfilt |= ATH9K_RX_FILTER_PROM; 420203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 421203c4805SLuis R. Rodriguez } 422203c4805SLuis R. Rodriguez 423b3d7aa43SGabor Juhos if (AR_SREV_9550(sc->sc_ah)) 424b3d7aa43SGabor Juhos rfilt |= ATH9K_RX_FILTER_4ADDRESS; 425b3d7aa43SGabor Juhos 426203c4805SLuis R. Rodriguez return rfilt; 427203c4805SLuis R. Rodriguez 428203c4805SLuis R. Rodriguez } 429203c4805SLuis R. Rodriguez 430203c4805SLuis R. Rodriguez int ath_startrecv(struct ath_softc *sc) 431203c4805SLuis R. Rodriguez { 432203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 4331a04d59dSFelix Fietkau struct ath_rxbuf *bf, *tbf; 434203c4805SLuis R. Rodriguez 435b5c80475SFelix Fietkau if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 436b5c80475SFelix Fietkau ath_edma_start_recv(sc); 437b5c80475SFelix Fietkau return 0; 438b5c80475SFelix Fietkau } 439b5c80475SFelix Fietkau 440203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 441203c4805SLuis R. Rodriguez goto start_recv; 442203c4805SLuis R. Rodriguez 443e96542e5SFelix Fietkau sc->rx.buf_hold = NULL; 444203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 445203c4805SLuis R. Rodriguez list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 446203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 447203c4805SLuis R. Rodriguez } 448203c4805SLuis R. Rodriguez 449203c4805SLuis R. Rodriguez /* We could have deleted elements so the list may be empty now */ 450203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 451203c4805SLuis R. Rodriguez goto start_recv; 452203c4805SLuis R. Rodriguez 4531a04d59dSFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); 454203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 455203c4805SLuis R. Rodriguez ath9k_hw_rxena(ah); 456203c4805SLuis R. Rodriguez 457203c4805SLuis R. Rodriguez start_recv: 458203c4805SLuis R. Rodriguez ath_opmode_init(sc); 4594cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 460203c4805SLuis R. Rodriguez 461203c4805SLuis R. Rodriguez return 0; 462203c4805SLuis R. Rodriguez } 463203c4805SLuis R. Rodriguez 4644b883f02SFelix Fietkau static void ath_flushrecv(struct ath_softc *sc) 4654b883f02SFelix Fietkau { 4664b883f02SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 4674b883f02SFelix Fietkau ath_rx_tasklet(sc, 1, true); 4684b883f02SFelix Fietkau ath_rx_tasklet(sc, 1, false); 4694b883f02SFelix Fietkau } 4704b883f02SFelix Fietkau 471203c4805SLuis R. Rodriguez bool ath_stoprecv(struct ath_softc *sc) 472203c4805SLuis R. Rodriguez { 473203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 4745882da02SFelix Fietkau bool stopped, reset = false; 475203c4805SLuis R. Rodriguez 476d47844a0SFelix Fietkau ath9k_hw_abortpcurecv(ah); 477203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, 0); 4785882da02SFelix Fietkau stopped = ath9k_hw_stopdmarecv(ah, &reset); 479b5c80475SFelix Fietkau 4804b883f02SFelix Fietkau ath_flushrecv(sc); 4814b883f02SFelix Fietkau 482b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 483b5c80475SFelix Fietkau ath_edma_stop_recv(sc); 484b5c80475SFelix Fietkau else 485203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 486203c4805SLuis R. Rodriguez 487d584747bSRajkumar Manoharan if (!(ah->ah_flags & AH_UNPLUGGED) && 488d584747bSRajkumar Manoharan unlikely(!stopped)) { 489d7fd1b50SBen Greear ath_err(ath9k_hw_common(sc->sc_ah), 490d7fd1b50SBen Greear "Could not stop RX, we could be " 49178a7685eSLuis R. Rodriguez "confusing the DMA engine when we start RX up\n"); 492d7fd1b50SBen Greear ATH_DBG_WARN_ON_ONCE(!stopped); 493d7fd1b50SBen Greear } 4942232d31bSFelix Fietkau return stopped && !reset; 495203c4805SLuis R. Rodriguez } 496203c4805SLuis R. Rodriguez 497cc65965cSJouni Malinen static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 498cc65965cSJouni Malinen { 499cc65965cSJouni Malinen /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 500cc65965cSJouni Malinen struct ieee80211_mgmt *mgmt; 501cc65965cSJouni Malinen u8 *pos, *end, id, elen; 502cc65965cSJouni Malinen struct ieee80211_tim_ie *tim; 503cc65965cSJouni Malinen 504cc65965cSJouni Malinen mgmt = (struct ieee80211_mgmt *)skb->data; 505cc65965cSJouni Malinen pos = mgmt->u.beacon.variable; 506cc65965cSJouni Malinen end = skb->data + skb->len; 507cc65965cSJouni Malinen 508cc65965cSJouni Malinen while (pos + 2 < end) { 509cc65965cSJouni Malinen id = *pos++; 510cc65965cSJouni Malinen elen = *pos++; 511cc65965cSJouni Malinen if (pos + elen > end) 512cc65965cSJouni Malinen break; 513cc65965cSJouni Malinen 514cc65965cSJouni Malinen if (id == WLAN_EID_TIM) { 515cc65965cSJouni Malinen if (elen < sizeof(*tim)) 516cc65965cSJouni Malinen break; 517cc65965cSJouni Malinen tim = (struct ieee80211_tim_ie *) pos; 518cc65965cSJouni Malinen if (tim->dtim_count != 0) 519cc65965cSJouni Malinen break; 520cc65965cSJouni Malinen return tim->bitmap_ctrl & 0x01; 521cc65965cSJouni Malinen } 522cc65965cSJouni Malinen 523cc65965cSJouni Malinen pos += elen; 524cc65965cSJouni Malinen } 525cc65965cSJouni Malinen 526cc65965cSJouni Malinen return false; 527cc65965cSJouni Malinen } 528cc65965cSJouni Malinen 529cc65965cSJouni Malinen static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 530cc65965cSJouni Malinen { 5311510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 532cc65965cSJouni Malinen 533cc65965cSJouni Malinen if (skb->len < 24 + 8 + 2 + 2) 534cc65965cSJouni Malinen return; 535cc65965cSJouni Malinen 5361b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 537293dc5dfSGabor Juhos 5381b04b930SSujith if (sc->ps_flags & PS_BEACON_SYNC) { 5391b04b930SSujith sc->ps_flags &= ~PS_BEACON_SYNC; 540d2182b69SJoe Perches ath_dbg(common, PS, 5411a6404a1SSujith Manoharan "Reconfigure beacon timers based on synchronized timestamp\n"); 542ef4ad633SSujith Manoharan ath9k_set_beacon(sc); 543ccdfeab6SJouni Malinen } 544ccdfeab6SJouni Malinen 545cc65965cSJouni Malinen if (ath_beacon_dtim_pending_cab(skb)) { 546cc65965cSJouni Malinen /* 547cc65965cSJouni Malinen * Remain awake waiting for buffered broadcast/multicast 54858f5fffdSGabor Juhos * frames. If the last broadcast/multicast frame is not 54958f5fffdSGabor Juhos * received properly, the next beacon frame will work as 55058f5fffdSGabor Juhos * a backup trigger for returning into NETWORK SLEEP state, 55158f5fffdSGabor Juhos * so we are waiting for it as well. 552cc65965cSJouni Malinen */ 553d2182b69SJoe Perches ath_dbg(common, PS, 554226afe68SJoe Perches "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 5551b04b930SSujith sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 556cc65965cSJouni Malinen return; 557cc65965cSJouni Malinen } 558cc65965cSJouni Malinen 5591b04b930SSujith if (sc->ps_flags & PS_WAIT_FOR_CAB) { 560cc65965cSJouni Malinen /* 561cc65965cSJouni Malinen * This can happen if a broadcast frame is dropped or the AP 562cc65965cSJouni Malinen * fails to send a frame indicating that all CAB frames have 563cc65965cSJouni Malinen * been delivered. 564cc65965cSJouni Malinen */ 5651b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_CAB; 566d2182b69SJoe Perches ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 567cc65965cSJouni Malinen } 568cc65965cSJouni Malinen } 569cc65965cSJouni Malinen 570f73c604cSRajkumar Manoharan static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 571cc65965cSJouni Malinen { 572cc65965cSJouni Malinen struct ieee80211_hdr *hdr; 573c46917bbSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 574cc65965cSJouni Malinen 575cc65965cSJouni Malinen hdr = (struct ieee80211_hdr *)skb->data; 576cc65965cSJouni Malinen 577cc65965cSJouni Malinen /* Process Beacon and CAB receive in PS state */ 578ededf1f8SVasanthakumar Thiagarajan if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 57907c15a3fSSujith Manoharan && mybeacon) { 580cc65965cSJouni Malinen ath_rx_ps_beacon(sc, skb); 58107c15a3fSSujith Manoharan } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 582cc65965cSJouni Malinen (ieee80211_is_data(hdr->frame_control) || 583cc65965cSJouni Malinen ieee80211_is_action(hdr->frame_control)) && 584cc65965cSJouni Malinen is_multicast_ether_addr(hdr->addr1) && 585cc65965cSJouni Malinen !ieee80211_has_moredata(hdr->frame_control)) { 586cc65965cSJouni Malinen /* 587cc65965cSJouni Malinen * No more broadcast/multicast frames to be received at this 588cc65965cSJouni Malinen * point. 589cc65965cSJouni Malinen */ 5903fac6dfdSSenthil Balasubramanian sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 591d2182b69SJoe Perches ath_dbg(common, PS, 592c46917bbSLuis R. Rodriguez "All PS CAB frames received, back to sleep\n"); 5931b04b930SSujith } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 5949a23f9caSJouni Malinen !is_multicast_ether_addr(hdr->addr1) && 5959a23f9caSJouni Malinen !ieee80211_has_morefrags(hdr->frame_control)) { 5961b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 597d2182b69SJoe Perches ath_dbg(common, PS, 598226afe68SJoe Perches "Going back to sleep after having received PS-Poll data (0x%lx)\n", 5991b04b930SSujith sc->ps_flags & (PS_WAIT_FOR_BEACON | 6001b04b930SSujith PS_WAIT_FOR_CAB | 6011b04b930SSujith PS_WAIT_FOR_PSPOLL_DATA | 6021b04b930SSujith PS_WAIT_FOR_TX_ACK)); 603cc65965cSJouni Malinen } 604cc65965cSJouni Malinen } 605cc65965cSJouni Malinen 606b5c80475SFelix Fietkau static bool ath_edma_get_buffers(struct ath_softc *sc, 6073a2923e8SFelix Fietkau enum ath9k_rx_qtype qtype, 6083a2923e8SFelix Fietkau struct ath_rx_status *rs, 6091a04d59dSFelix Fietkau struct ath_rxbuf **dest) 610203c4805SLuis R. Rodriguez { 611b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 612203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 61327c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 614b5c80475SFelix Fietkau struct sk_buff *skb; 6151a04d59dSFelix Fietkau struct ath_rxbuf *bf; 616b5c80475SFelix Fietkau int ret; 617203c4805SLuis R. Rodriguez 618b5c80475SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 619b5c80475SFelix Fietkau if (!skb) 620b5c80475SFelix Fietkau return false; 621203c4805SLuis R. Rodriguez 622b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 623b5c80475SFelix Fietkau BUG_ON(!bf); 624b5c80475SFelix Fietkau 625ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 626b5c80475SFelix Fietkau common->rx_bufsize, DMA_FROM_DEVICE); 627b5c80475SFelix Fietkau 6283a2923e8SFelix Fietkau ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 629ce9426d1SMing Lei if (ret == -EINPROGRESS) { 630ce9426d1SMing Lei /*let device gain the buffer again*/ 631ce9426d1SMing Lei dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 632ce9426d1SMing Lei common->rx_bufsize, DMA_FROM_DEVICE); 633b5c80475SFelix Fietkau return false; 634ce9426d1SMing Lei } 635b5c80475SFelix Fietkau 636b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 637b5c80475SFelix Fietkau if (ret == -EINVAL) { 638b5c80475SFelix Fietkau /* corrupt descriptor, skip this one and the following one */ 639b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 640b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 641b5c80475SFelix Fietkau 6423a2923e8SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 6433a2923e8SFelix Fietkau if (skb) { 644b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 645b5c80475SFelix Fietkau BUG_ON(!bf); 646b5c80475SFelix Fietkau 647b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 648b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 649b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 650b5c80475SFelix Fietkau } 6516bb51c70STom Hughes 6526bb51c70STom Hughes bf = NULL; 6533a2923e8SFelix Fietkau } 654b5c80475SFelix Fietkau 6553a2923e8SFelix Fietkau *dest = bf; 656b5c80475SFelix Fietkau return true; 657b5c80475SFelix Fietkau } 658b5c80475SFelix Fietkau 6591a04d59dSFelix Fietkau static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 660b5c80475SFelix Fietkau struct ath_rx_status *rs, 661b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 662b5c80475SFelix Fietkau { 6631a04d59dSFelix Fietkau struct ath_rxbuf *bf = NULL; 664b5c80475SFelix Fietkau 6653a2923e8SFelix Fietkau while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 6663a2923e8SFelix Fietkau if (!bf) 6673a2923e8SFelix Fietkau continue; 668b5c80475SFelix Fietkau 669b5c80475SFelix Fietkau return bf; 670b5c80475SFelix Fietkau } 6713a2923e8SFelix Fietkau return NULL; 6723a2923e8SFelix Fietkau } 673b5c80475SFelix Fietkau 6741a04d59dSFelix Fietkau static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, 675b5c80475SFelix Fietkau struct ath_rx_status *rs) 676b5c80475SFelix Fietkau { 677b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 678b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 679b5c80475SFelix Fietkau struct ath_desc *ds; 6801a04d59dSFelix Fietkau struct ath_rxbuf *bf; 681b5c80475SFelix Fietkau int ret; 682203c4805SLuis R. Rodriguez 683203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) { 684203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 685b5c80475SFelix Fietkau return NULL; 686203c4805SLuis R. Rodriguez } 687203c4805SLuis R. Rodriguez 6881a04d59dSFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); 689e96542e5SFelix Fietkau if (bf == sc->rx.buf_hold) 690e96542e5SFelix Fietkau return NULL; 691e96542e5SFelix Fietkau 692203c4805SLuis R. Rodriguez ds = bf->bf_desc; 693203c4805SLuis R. Rodriguez 694203c4805SLuis R. Rodriguez /* 695203c4805SLuis R. Rodriguez * Must provide the virtual address of the current 696203c4805SLuis R. Rodriguez * descriptor, the physical address, and the virtual 697203c4805SLuis R. Rodriguez * address of the next descriptor in the h/w chain. 698203c4805SLuis R. Rodriguez * This allows the HAL to look ahead to see if the 699203c4805SLuis R. Rodriguez * hardware is done with a descriptor by checking the 700203c4805SLuis R. Rodriguez * done bit in the following descriptor and the address 701203c4805SLuis R. Rodriguez * of the current descriptor the DMA engine is working 702203c4805SLuis R. Rodriguez * on. All this is necessary because of our use of 703203c4805SLuis R. Rodriguez * a self-linked list to avoid rx overruns. 704203c4805SLuis R. Rodriguez */ 7053de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, ds, rs); 706b5c80475SFelix Fietkau if (ret == -EINPROGRESS) { 70729bffa96SFelix Fietkau struct ath_rx_status trs; 7081a04d59dSFelix Fietkau struct ath_rxbuf *tbf; 709203c4805SLuis R. Rodriguez struct ath_desc *tds; 710203c4805SLuis R. Rodriguez 71129bffa96SFelix Fietkau memset(&trs, 0, sizeof(trs)); 712203c4805SLuis R. Rodriguez if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 713203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 714b5c80475SFelix Fietkau return NULL; 715203c4805SLuis R. Rodriguez } 716203c4805SLuis R. Rodriguez 7171a04d59dSFelix Fietkau tbf = list_entry(bf->list.next, struct ath_rxbuf, list); 718203c4805SLuis R. Rodriguez 719203c4805SLuis R. Rodriguez /* 720203c4805SLuis R. Rodriguez * On some hardware the descriptor status words could 721203c4805SLuis R. Rodriguez * get corrupted, including the done bit. Because of 722203c4805SLuis R. Rodriguez * this, check if the next descriptor's done bit is 723203c4805SLuis R. Rodriguez * set or not. 724203c4805SLuis R. Rodriguez * 725203c4805SLuis R. Rodriguez * If the next descriptor's done bit is set, the current 726203c4805SLuis R. Rodriguez * descriptor has been corrupted. Force s/w to discard 727203c4805SLuis R. Rodriguez * this descriptor and continue... 728203c4805SLuis R. Rodriguez */ 729203c4805SLuis R. Rodriguez 730203c4805SLuis R. Rodriguez tds = tbf->bf_desc; 7313de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 732b5c80475SFelix Fietkau if (ret == -EINPROGRESS) 733b5c80475SFelix Fietkau return NULL; 734723e7113SFelix Fietkau 735723e7113SFelix Fietkau /* 736723e7113SFelix Fietkau * mark descriptor as zero-length and set the 'more' 737723e7113SFelix Fietkau * flag to ensure that both buffers get discarded 738723e7113SFelix Fietkau */ 739723e7113SFelix Fietkau rs->rs_datalen = 0; 740723e7113SFelix Fietkau rs->rs_more = true; 741203c4805SLuis R. Rodriguez } 742203c4805SLuis R. Rodriguez 743a3dc48e8SFelix Fietkau list_del(&bf->list); 744b5c80475SFelix Fietkau if (!bf->bf_mpdu) 745b5c80475SFelix Fietkau return bf; 746203c4805SLuis R. Rodriguez 747203c4805SLuis R. Rodriguez /* 748203c4805SLuis R. Rodriguez * Synchronize the DMA transfer with CPU before 749203c4805SLuis R. Rodriguez * 1. accessing the frame 750203c4805SLuis R. Rodriguez * 2. requeueing the same buffer to h/w 751203c4805SLuis R. Rodriguez */ 752ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 753cc861f74SLuis R. Rodriguez common->rx_bufsize, 754203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 755203c4805SLuis R. Rodriguez 756b5c80475SFelix Fietkau return bf; 757b5c80475SFelix Fietkau } 758b5c80475SFelix Fietkau 759d435700fSSujith /* Assumes you've already done the endian to CPU conversion */ 760d435700fSSujith static bool ath9k_rx_accept(struct ath_common *common, 7619f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 762d435700fSSujith struct ieee80211_rx_status *rxs, 763d435700fSSujith struct ath_rx_status *rx_stats, 764d435700fSSujith bool *decrypt_error) 765d435700fSSujith { 766ec205999SFelix Fietkau struct ath_softc *sc = (struct ath_softc *) common->priv; 76766760eacSFelix Fietkau bool is_mc, is_valid_tkip, strip_mic, mic_error; 768d435700fSSujith struct ath_hw *ah = common->ah; 769d435700fSSujith __le16 fc; 770d435700fSSujith 771d435700fSSujith fc = hdr->frame_control; 772d435700fSSujith 77366760eacSFelix Fietkau is_mc = !!is_multicast_ether_addr(hdr->addr1); 77466760eacSFelix Fietkau is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 77566760eacSFelix Fietkau test_bit(rx_stats->rs_keyix, common->tkip_keymap); 776152e585dSBill Jordan strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 7772a5783b8SMichael Liang ieee80211_has_protected(fc) && 778152e585dSBill Jordan !(rx_stats->rs_status & 779846d9363SFelix Fietkau (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 780846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS)); 78166760eacSFelix Fietkau 782f88373faSFelix Fietkau /* 783f88373faSFelix Fietkau * Key miss events are only relevant for pairwise keys where the 784f88373faSFelix Fietkau * descriptor does contain a valid key index. This has been observed 785f88373faSFelix Fietkau * mostly with CCMP encryption. 786f88373faSFelix Fietkau */ 787bed3d9c0SFelix Fietkau if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 788bed3d9c0SFelix Fietkau !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 789f88373faSFelix Fietkau rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 790f88373faSFelix Fietkau 79166760eacSFelix Fietkau mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 79266760eacSFelix Fietkau !ieee80211_has_morefrags(fc) && 79366760eacSFelix Fietkau !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 79466760eacSFelix Fietkau (rx_stats->rs_status & ATH9K_RXERR_MIC); 79566760eacSFelix Fietkau 796d435700fSSujith /* 797d435700fSSujith * The rx_stats->rs_status will not be set until the end of the 798d435700fSSujith * chained descriptors so it can be ignored if rs_more is set. The 799d435700fSSujith * rs_more will be false at the last element of the chained 800d435700fSSujith * descriptors. 801d435700fSSujith */ 802d435700fSSujith if (rx_stats->rs_status != 0) { 803846d9363SFelix Fietkau u8 status_mask; 804846d9363SFelix Fietkau 80566760eacSFelix Fietkau if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 806d435700fSSujith rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 80766760eacSFelix Fietkau mic_error = false; 80866760eacSFelix Fietkau } 809d435700fSSujith 810846d9363SFelix Fietkau if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 811846d9363SFelix Fietkau (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 812d435700fSSujith *decrypt_error = true; 81366760eacSFelix Fietkau mic_error = false; 814d435700fSSujith } 81566760eacSFelix Fietkau 816d435700fSSujith /* 817d435700fSSujith * Reject error frames with the exception of 818d435700fSSujith * decryption and MIC failures. For monitor mode, 819d435700fSSujith * we also ignore the CRC error. 820d435700fSSujith */ 821846d9363SFelix Fietkau status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 822846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS; 823846d9363SFelix Fietkau 824ec205999SFelix Fietkau if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 825846d9363SFelix Fietkau status_mask |= ATH9K_RXERR_CRC; 826846d9363SFelix Fietkau 827846d9363SFelix Fietkau if (rx_stats->rs_status & ~status_mask) 828d435700fSSujith return false; 829d435700fSSujith } 83066760eacSFelix Fietkau 83166760eacSFelix Fietkau /* 83266760eacSFelix Fietkau * For unicast frames the MIC error bit can have false positives, 83366760eacSFelix Fietkau * so all MIC error reports need to be validated in software. 83466760eacSFelix Fietkau * False negatives are not common, so skip software verification 83566760eacSFelix Fietkau * if the hardware considers the MIC valid. 83666760eacSFelix Fietkau */ 83766760eacSFelix Fietkau if (strip_mic) 83866760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_STRIPPED; 83966760eacSFelix Fietkau else if (is_mc && mic_error) 84066760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_ERROR; 84166760eacSFelix Fietkau 842d435700fSSujith return true; 843d435700fSSujith } 844d435700fSSujith 845d435700fSSujith static int ath9k_process_rate(struct ath_common *common, 846d435700fSSujith struct ieee80211_hw *hw, 847d435700fSSujith struct ath_rx_status *rx_stats, 8489f167f64SVasanthakumar Thiagarajan struct ieee80211_rx_status *rxs) 849d435700fSSujith { 850d435700fSSujith struct ieee80211_supported_band *sband; 851d435700fSSujith enum ieee80211_band band; 852d435700fSSujith unsigned int i = 0; 853990e08a0SBen Greear struct ath_softc __maybe_unused *sc = common->priv; 854d435700fSSujith 855675a0b04SKarl Beldan band = hw->conf.chandef.chan->band; 856d435700fSSujith sband = hw->wiphy->bands[band]; 857d435700fSSujith 858f819c0e7SSimon Wunderlich switch (hw->conf.chandef.width) { 859f819c0e7SSimon Wunderlich case NL80211_CHAN_WIDTH_5: 860f819c0e7SSimon Wunderlich rxs->flag |= RX_FLAG_5MHZ; 861f819c0e7SSimon Wunderlich break; 862f819c0e7SSimon Wunderlich case NL80211_CHAN_WIDTH_10: 863f819c0e7SSimon Wunderlich rxs->flag |= RX_FLAG_10MHZ; 864f819c0e7SSimon Wunderlich break; 865f819c0e7SSimon Wunderlich default: 866f819c0e7SSimon Wunderlich break; 867f819c0e7SSimon Wunderlich } 868f819c0e7SSimon Wunderlich 869d435700fSSujith if (rx_stats->rs_rate & 0x80) { 870d435700fSSujith /* HT rate */ 871d435700fSSujith rxs->flag |= RX_FLAG_HT; 872ab276103SOleksij Rempel rxs->flag |= rx_stats->flag; 873d435700fSSujith rxs->rate_idx = rx_stats->rs_rate & 0x7f; 874d435700fSSujith return 0; 875d435700fSSujith } 876d435700fSSujith 877d435700fSSujith for (i = 0; i < sband->n_bitrates; i++) { 878d435700fSSujith if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 879d435700fSSujith rxs->rate_idx = i; 880d435700fSSujith return 0; 881d435700fSSujith } 882d435700fSSujith if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 883d435700fSSujith rxs->flag |= RX_FLAG_SHORTPRE; 884d435700fSSujith rxs->rate_idx = i; 885d435700fSSujith return 0; 886d435700fSSujith } 887d435700fSSujith } 888d435700fSSujith 889d435700fSSujith /* 890d435700fSSujith * No valid hardware bitrate found -- we should not get here 891d435700fSSujith * because hardware has already validated this frame as OK. 892d435700fSSujith */ 893d2182b69SJoe Perches ath_dbg(common, ANY, 894226afe68SJoe Perches "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 895226afe68SJoe Perches rx_stats->rs_rate); 89615072189SBen Greear RX_STAT_INC(rx_rate_err); 897d435700fSSujith return -EINVAL; 898d435700fSSujith } 899d435700fSSujith 900d435700fSSujith static void ath9k_process_rssi(struct ath_common *common, 901d435700fSSujith struct ieee80211_hw *hw, 902e3acd13dSSujith Manoharan struct ath_rx_status *rx_stats, 903e3acd13dSSujith Manoharan struct ieee80211_rx_status *rxs) 904d435700fSSujith { 9059ac58615SFelix Fietkau struct ath_softc *sc = hw->priv; 906d435700fSSujith struct ath_hw *ah = common->ah; 9079fa23e17SFelix Fietkau int last_rssi; 9082ef16755SFelix Fietkau int rssi = rx_stats->rs_rssi; 909d435700fSSujith 910e3acd13dSSujith Manoharan /* 911e3acd13dSSujith Manoharan * RSSI is not available for subframes in an A-MPDU. 912e3acd13dSSujith Manoharan */ 913e3acd13dSSujith Manoharan if (rx_stats->rs_moreaggr) { 914e3acd13dSSujith Manoharan rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; 9159fa23e17SFelix Fietkau return; 916e3acd13dSSujith Manoharan } 9179fa23e17SFelix Fietkau 918e3acd13dSSujith Manoharan /* 919e3acd13dSSujith Manoharan * Check if the RSSI for the last subframe in an A-MPDU 920e3acd13dSSujith Manoharan * or an unaggregated frame is valid. 921e3acd13dSSujith Manoharan */ 922e3acd13dSSujith Manoharan if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) { 923e3acd13dSSujith Manoharan rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; 924e3acd13dSSujith Manoharan return; 925e3acd13dSSujith Manoharan } 926e3acd13dSSujith Manoharan 927e3acd13dSSujith Manoharan /* 928e3acd13dSSujith Manoharan * Update Beacon RSSI, this is used by ANI. 929e3acd13dSSujith Manoharan */ 930e3acd13dSSujith Manoharan if (rx_stats->is_mybeacon && 931e3acd13dSSujith Manoharan ((ah->opmode == NL80211_IFTYPE_STATION) || 932e3acd13dSSujith Manoharan (ah->opmode == NL80211_IFTYPE_ADHOC))) { 9339ac58615SFelix Fietkau ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 9349ac58615SFelix Fietkau last_rssi = sc->last_rssi; 935e3acd13dSSujith Manoharan 936d435700fSSujith if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 9372ef16755SFelix Fietkau rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 9382ef16755SFelix Fietkau if (rssi < 0) 9392ef16755SFelix Fietkau rssi = 0; 940d435700fSSujith 9412ef16755SFelix Fietkau ah->stats.avgbrssi = rssi; 942d435700fSSujith } 943d435700fSSujith 944e3acd13dSSujith Manoharan rxs->signal = ah->noise + rx_stats->rs_rssi; 945e3acd13dSSujith Manoharan } 946e3acd13dSSujith Manoharan 947e0dd1a96SSujith Manoharan static void ath9k_process_tsf(struct ath_rx_status *rs, 948e0dd1a96SSujith Manoharan struct ieee80211_rx_status *rxs, 949e0dd1a96SSujith Manoharan u64 tsf) 950e0dd1a96SSujith Manoharan { 951e0dd1a96SSujith Manoharan u32 tsf_lower = tsf & 0xffffffff; 952e0dd1a96SSujith Manoharan 953e0dd1a96SSujith Manoharan rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; 954e0dd1a96SSujith Manoharan if (rs->rs_tstamp > tsf_lower && 955e0dd1a96SSujith Manoharan unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) 956e0dd1a96SSujith Manoharan rxs->mactime -= 0x100000000ULL; 957e0dd1a96SSujith Manoharan 958e0dd1a96SSujith Manoharan if (rs->rs_tstamp < tsf_lower && 959e0dd1a96SSujith Manoharan unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) 960e0dd1a96SSujith Manoharan rxs->mactime += 0x100000000ULL; 961e0dd1a96SSujith Manoharan } 962e0dd1a96SSujith Manoharan 9633105b672SSujith Manoharan #ifdef CONFIG_ATH9K_DEBUGFS 9643105b672SSujith Manoharan static s8 fix_rssi_inv_only(u8 rssi_val) 9653105b672SSujith Manoharan { 9663105b672SSujith Manoharan if (rssi_val == 128) 9673105b672SSujith Manoharan rssi_val = 0; 9683105b672SSujith Manoharan return (s8) rssi_val; 9693105b672SSujith Manoharan } 9703105b672SSujith Manoharan #endif 9713105b672SSujith Manoharan 9723105b672SSujith Manoharan /* returns 1 if this was a spectral frame, even if not handled. */ 9733105b672SSujith Manoharan static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, 9743105b672SSujith Manoharan struct ath_rx_status *rs, u64 tsf) 9753105b672SSujith Manoharan { 9763105b672SSujith Manoharan #ifdef CONFIG_ATH9K_DEBUGFS 9773105b672SSujith Manoharan struct ath_hw *ah = sc->sc_ah; 978e07f01e4SLorenzo Bianconi u8 num_bins, *bins, *vdata = (u8 *)hdr; 979e07f01e4SLorenzo Bianconi struct fft_sample_ht20 fft_sample_20; 980e07f01e4SLorenzo Bianconi struct fft_sample_ht20_40 fft_sample_40; 981e07f01e4SLorenzo Bianconi struct fft_sample_tlv *tlv; 9823105b672SSujith Manoharan struct ath_radar_info *radar_info; 9833105b672SSujith Manoharan int len = rs->rs_datalen; 9843105b672SSujith Manoharan int dc_pos; 985e07f01e4SLorenzo Bianconi u16 fft_len, length, freq = ah->curchan->chan->center_freq; 986e07f01e4SLorenzo Bianconi enum nl80211_channel_type chan_type; 9873105b672SSujith Manoharan 9883105b672SSujith Manoharan /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer 9893105b672SSujith Manoharan * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT 9903105b672SSujith Manoharan * yet, but this is supposed to be possible as well. 9913105b672SSujith Manoharan */ 9923105b672SSujith Manoharan if (rs->rs_phyerr != ATH9K_PHYERR_RADAR && 9933105b672SSujith Manoharan rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT && 9943105b672SSujith Manoharan rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL) 9953105b672SSujith Manoharan return 0; 9963105b672SSujith Manoharan 9973105b672SSujith Manoharan /* check if spectral scan bit is set. This does not have to be checked 9983105b672SSujith Manoharan * if received through a SPECTRAL phy error, but shouldn't hurt. 9993105b672SSujith Manoharan */ 10003105b672SSujith Manoharan radar_info = ((struct ath_radar_info *)&vdata[len]) - 1; 10013105b672SSujith Manoharan if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) 10023105b672SSujith Manoharan return 0; 10033105b672SSujith Manoharan 1004e07f01e4SLorenzo Bianconi chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef); 1005e07f01e4SLorenzo Bianconi if ((chan_type == NL80211_CHAN_HT40MINUS) || 1006e07f01e4SLorenzo Bianconi (chan_type == NL80211_CHAN_HT40PLUS)) { 1007e07f01e4SLorenzo Bianconi fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN; 1008e07f01e4SLorenzo Bianconi num_bins = SPECTRAL_HT20_40_NUM_BINS; 1009e07f01e4SLorenzo Bianconi bins = (u8 *)fft_sample_40.data; 1010e07f01e4SLorenzo Bianconi } else { 1011e07f01e4SLorenzo Bianconi fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN; 1012e07f01e4SLorenzo Bianconi num_bins = SPECTRAL_HT20_NUM_BINS; 1013e07f01e4SLorenzo Bianconi bins = (u8 *)fft_sample_20.data; 1014e07f01e4SLorenzo Bianconi } 1015e07f01e4SLorenzo Bianconi 1016e07f01e4SLorenzo Bianconi /* Variation in the data length is possible and will be fixed later */ 1017e07f01e4SLorenzo Bianconi if ((len > fft_len + 2) || (len < fft_len - 1)) 10183105b672SSujith Manoharan return 1; 10193105b672SSujith Manoharan 1020e07f01e4SLorenzo Bianconi switch (len - fft_len) { 10213105b672SSujith Manoharan case 0: 10223105b672SSujith Manoharan /* length correct, nothing to do. */ 1023e07f01e4SLorenzo Bianconi memcpy(bins, vdata, num_bins); 10243105b672SSujith Manoharan break; 10253105b672SSujith Manoharan case -1: 10263105b672SSujith Manoharan /* first byte missing, duplicate it. */ 1027e07f01e4SLorenzo Bianconi memcpy(&bins[1], vdata, num_bins - 1); 10283105b672SSujith Manoharan bins[0] = vdata[0]; 10293105b672SSujith Manoharan break; 10303105b672SSujith Manoharan case 2: 10313105b672SSujith Manoharan /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ 10323105b672SSujith Manoharan memcpy(bins, vdata, 30); 10333105b672SSujith Manoharan bins[30] = vdata[31]; 1034e07f01e4SLorenzo Bianconi memcpy(&bins[31], &vdata[33], num_bins - 31); 10353105b672SSujith Manoharan break; 10363105b672SSujith Manoharan case 1: 10373105b672SSujith Manoharan /* MAC added 2 extra bytes AND first byte is missing. */ 10383105b672SSujith Manoharan bins[0] = vdata[0]; 1039e07f01e4SLorenzo Bianconi memcpy(&bins[1], vdata, 30); 10403105b672SSujith Manoharan bins[31] = vdata[31]; 1041e07f01e4SLorenzo Bianconi memcpy(&bins[32], &vdata[33], num_bins - 32); 10423105b672SSujith Manoharan break; 10433105b672SSujith Manoharan default: 10443105b672SSujith Manoharan return 1; 10453105b672SSujith Manoharan } 10463105b672SSujith Manoharan 10473105b672SSujith Manoharan /* DC value (value in the middle) is the blind spot of the spectral 10483105b672SSujith Manoharan * sample and invalid, interpolate it. 10493105b672SSujith Manoharan */ 1050e07f01e4SLorenzo Bianconi dc_pos = num_bins / 2; 10513105b672SSujith Manoharan bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; 10523105b672SSujith Manoharan 1053e07f01e4SLorenzo Bianconi if ((chan_type == NL80211_CHAN_HT40MINUS) || 1054e07f01e4SLorenzo Bianconi (chan_type == NL80211_CHAN_HT40PLUS)) { 1055e07f01e4SLorenzo Bianconi s8 lower_rssi, upper_rssi; 1056e07f01e4SLorenzo Bianconi s16 ext_nf; 1057e07f01e4SLorenzo Bianconi u8 lower_max_index, upper_max_index; 1058e07f01e4SLorenzo Bianconi u8 lower_bitmap_w, upper_bitmap_w; 1059e07f01e4SLorenzo Bianconi u16 lower_mag, upper_mag; 1060e07f01e4SLorenzo Bianconi struct ath9k_hw_cal_data *caldata = ah->caldata; 1061e07f01e4SLorenzo Bianconi struct ath_ht20_40_mag_info *mag_info; 1062e07f01e4SLorenzo Bianconi 1063e07f01e4SLorenzo Bianconi if (caldata) 1064e07f01e4SLorenzo Bianconi ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan, 1065e07f01e4SLorenzo Bianconi caldata->nfCalHist[3].privNF); 1066e07f01e4SLorenzo Bianconi else 1067e07f01e4SLorenzo Bianconi ext_nf = ATH_DEFAULT_NOISE_FLOOR; 1068e07f01e4SLorenzo Bianconi 1069e07f01e4SLorenzo Bianconi length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv); 1070e07f01e4SLorenzo Bianconi fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40; 1071e07f01e4SLorenzo Bianconi fft_sample_40.tlv.length = __cpu_to_be16(length); 1072e07f01e4SLorenzo Bianconi fft_sample_40.freq = __cpu_to_be16(freq); 1073e07f01e4SLorenzo Bianconi fft_sample_40.channel_type = chan_type; 1074e07f01e4SLorenzo Bianconi 1075e07f01e4SLorenzo Bianconi if (chan_type == NL80211_CHAN_HT40PLUS) { 1076e07f01e4SLorenzo Bianconi lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); 1077e07f01e4SLorenzo Bianconi upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0); 1078e07f01e4SLorenzo Bianconi 1079e07f01e4SLorenzo Bianconi fft_sample_40.lower_noise = ah->noise; 1080e07f01e4SLorenzo Bianconi fft_sample_40.upper_noise = ext_nf; 1081e07f01e4SLorenzo Bianconi } else { 1082e07f01e4SLorenzo Bianconi lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0); 1083e07f01e4SLorenzo Bianconi upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); 1084e07f01e4SLorenzo Bianconi 1085e07f01e4SLorenzo Bianconi fft_sample_40.lower_noise = ext_nf; 1086e07f01e4SLorenzo Bianconi fft_sample_40.upper_noise = ah->noise; 1087e07f01e4SLorenzo Bianconi } 1088e07f01e4SLorenzo Bianconi fft_sample_40.lower_rssi = lower_rssi; 1089e07f01e4SLorenzo Bianconi fft_sample_40.upper_rssi = upper_rssi; 1090e07f01e4SLorenzo Bianconi 1091e07f01e4SLorenzo Bianconi mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1; 1092e07f01e4SLorenzo Bianconi lower_mag = spectral_max_magnitude(mag_info->lower_bins); 1093e07f01e4SLorenzo Bianconi upper_mag = spectral_max_magnitude(mag_info->upper_bins); 1094e07f01e4SLorenzo Bianconi fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag); 1095e07f01e4SLorenzo Bianconi fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag); 1096e07f01e4SLorenzo Bianconi lower_max_index = spectral_max_index(mag_info->lower_bins); 1097e07f01e4SLorenzo Bianconi upper_max_index = spectral_max_index(mag_info->upper_bins); 1098e07f01e4SLorenzo Bianconi fft_sample_40.lower_max_index = lower_max_index; 1099e07f01e4SLorenzo Bianconi fft_sample_40.upper_max_index = upper_max_index; 1100e07f01e4SLorenzo Bianconi lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins); 1101e07f01e4SLorenzo Bianconi upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins); 1102e07f01e4SLorenzo Bianconi fft_sample_40.lower_bitmap_weight = lower_bitmap_w; 1103e07f01e4SLorenzo Bianconi fft_sample_40.upper_bitmap_weight = upper_bitmap_w; 1104e07f01e4SLorenzo Bianconi fft_sample_40.max_exp = mag_info->max_exp & 0xf; 1105e07f01e4SLorenzo Bianconi 1106e07f01e4SLorenzo Bianconi fft_sample_40.tsf = __cpu_to_be64(tsf); 1107e07f01e4SLorenzo Bianconi 1108e07f01e4SLorenzo Bianconi tlv = (struct fft_sample_tlv *)&fft_sample_40; 1109e07f01e4SLorenzo Bianconi } else { 1110e07f01e4SLorenzo Bianconi u8 max_index, bitmap_w; 1111e07f01e4SLorenzo Bianconi u16 magnitude; 1112e07f01e4SLorenzo Bianconi struct ath_ht20_mag_info *mag_info; 1113e07f01e4SLorenzo Bianconi 1114e07f01e4SLorenzo Bianconi length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv); 1115e07f01e4SLorenzo Bianconi fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20; 1116e07f01e4SLorenzo Bianconi fft_sample_20.tlv.length = __cpu_to_be16(length); 1117e07f01e4SLorenzo Bianconi fft_sample_20.freq = __cpu_to_be16(freq); 1118e07f01e4SLorenzo Bianconi 1119e07f01e4SLorenzo Bianconi fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); 1120e07f01e4SLorenzo Bianconi fft_sample_20.noise = ah->noise; 1121e07f01e4SLorenzo Bianconi 11223105b672SSujith Manoharan mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; 1123e07f01e4SLorenzo Bianconi magnitude = spectral_max_magnitude(mag_info->all_bins); 1124e07f01e4SLorenzo Bianconi fft_sample_20.max_magnitude = __cpu_to_be16(magnitude); 1125e07f01e4SLorenzo Bianconi max_index = spectral_max_index(mag_info->all_bins); 1126e07f01e4SLorenzo Bianconi fft_sample_20.max_index = max_index; 1127e07f01e4SLorenzo Bianconi bitmap_w = spectral_bitmap_weight(mag_info->all_bins); 1128e07f01e4SLorenzo Bianconi fft_sample_20.bitmap_weight = bitmap_w; 1129e07f01e4SLorenzo Bianconi fft_sample_20.max_exp = mag_info->max_exp & 0xf; 11303105b672SSujith Manoharan 1131e07f01e4SLorenzo Bianconi fft_sample_20.tsf = __cpu_to_be64(tsf); 11323105b672SSujith Manoharan 1133e07f01e4SLorenzo Bianconi tlv = (struct fft_sample_tlv *)&fft_sample_20; 1134e07f01e4SLorenzo Bianconi } 11353105b672SSujith Manoharan 1136e07f01e4SLorenzo Bianconi ath_debug_send_fft_sample(sc, tlv); 11373105b672SSujith Manoharan return 1; 11383105b672SSujith Manoharan #else 11393105b672SSujith Manoharan return 0; 11403105b672SSujith Manoharan #endif 11413105b672SSujith Manoharan } 11423105b672SSujith Manoharan 11436f38482eSSujith Manoharan static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr) 11446f38482eSSujith Manoharan { 11456f38482eSSujith Manoharan struct ath_hw *ah = sc->sc_ah; 11466f38482eSSujith Manoharan struct ath_common *common = ath9k_hw_common(ah); 11476f38482eSSujith Manoharan 11486f38482eSSujith Manoharan if (ieee80211_is_beacon(hdr->frame_control)) { 11496f38482eSSujith Manoharan RX_STAT_INC(rx_beacons); 11506f38482eSSujith Manoharan if (!is_zero_ether_addr(common->curbssid) && 11516f38482eSSujith Manoharan ether_addr_equal(hdr->addr3, common->curbssid)) 11526f38482eSSujith Manoharan return true; 11536f38482eSSujith Manoharan } 11546f38482eSSujith Manoharan 11556f38482eSSujith Manoharan return false; 11566f38482eSSujith Manoharan } 11576f38482eSSujith Manoharan 1158d435700fSSujith /* 1159d435700fSSujith * For Decrypt or Demic errors, we only mark packet status here and always push 1160d435700fSSujith * up the frame up to let mac80211 handle the actual error case, be it no 1161d435700fSSujith * decryption key or real decryption error. This let us keep statistics there. 1162d435700fSSujith */ 1163723e7113SFelix Fietkau static int ath9k_rx_skb_preprocess(struct ath_softc *sc, 11646f38482eSSujith Manoharan struct sk_buff *skb, 1165d435700fSSujith struct ath_rx_status *rx_stats, 1166d435700fSSujith struct ieee80211_rx_status *rx_status, 1167e0dd1a96SSujith Manoharan bool *decrypt_error, u64 tsf) 1168d435700fSSujith { 1169723e7113SFelix Fietkau struct ieee80211_hw *hw = sc->hw; 1170723e7113SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 1171723e7113SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 11726f38482eSSujith Manoharan struct ieee80211_hdr *hdr; 1173723e7113SFelix Fietkau bool discard_current = sc->rx.discard_next; 11747c5c73cdSSujith Manoharan int ret = 0; 1175723e7113SFelix Fietkau 11765871d2d7SSujith Manoharan /* 11775871d2d7SSujith Manoharan * Discard corrupt descriptors which are marked in 11785871d2d7SSujith Manoharan * ath_get_next_rx_buf(). 11795871d2d7SSujith Manoharan */ 1180723e7113SFelix Fietkau sc->rx.discard_next = rx_stats->rs_more; 1181723e7113SFelix Fietkau if (discard_current) 1182723e7113SFelix Fietkau return -EINVAL; 1183f749b946SFelix Fietkau 1184d435700fSSujith /* 11855871d2d7SSujith Manoharan * Discard zero-length packets. 11865871d2d7SSujith Manoharan */ 11875871d2d7SSujith Manoharan if (!rx_stats->rs_datalen) { 11885871d2d7SSujith Manoharan RX_STAT_INC(rx_len_err); 11895871d2d7SSujith Manoharan return -EINVAL; 11905871d2d7SSujith Manoharan } 11915871d2d7SSujith Manoharan 11925871d2d7SSujith Manoharan /* 11935871d2d7SSujith Manoharan * rs_status follows rs_datalen so if rs_datalen is too large 11945871d2d7SSujith Manoharan * we can take a hint that hardware corrupted it, so ignore 11955871d2d7SSujith Manoharan * those frames. 11965871d2d7SSujith Manoharan */ 11975871d2d7SSujith Manoharan if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { 11985871d2d7SSujith Manoharan RX_STAT_INC(rx_len_err); 11995871d2d7SSujith Manoharan return -EINVAL; 12005871d2d7SSujith Manoharan } 12015871d2d7SSujith Manoharan 12024a470647SSujith Manoharan /* Only use status info from the last fragment */ 12034a470647SSujith Manoharan if (rx_stats->rs_more) 12044a470647SSujith Manoharan return 0; 12054a470647SSujith Manoharan 1206b0925595SSujith Manoharan /* 1207b0925595SSujith Manoharan * Return immediately if the RX descriptor has been marked 1208b0925595SSujith Manoharan * as corrupt based on the various error bits. 1209b0925595SSujith Manoharan * 1210b0925595SSujith Manoharan * This is different from the other corrupt descriptor 1211b0925595SSujith Manoharan * condition handled above. 1212b0925595SSujith Manoharan */ 12137c5c73cdSSujith Manoharan if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { 12147c5c73cdSSujith Manoharan ret = -EINVAL; 12157c5c73cdSSujith Manoharan goto exit; 12167c5c73cdSSujith Manoharan } 1217b0925595SSujith Manoharan 12186f38482eSSujith Manoharan hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); 12196f38482eSSujith Manoharan 1220e0dd1a96SSujith Manoharan ath9k_process_tsf(rx_stats, rx_status, tsf); 12215e85a32aSSujith Manoharan ath_debug_stat_rx(sc, rx_stats); 1222e0dd1a96SSujith Manoharan 12235871d2d7SSujith Manoharan /* 12246b87d71cSSujith Manoharan * Process PHY errors and return so that the packet 12256b87d71cSSujith Manoharan * can be dropped. 12266b87d71cSSujith Manoharan */ 12276b87d71cSSujith Manoharan if (rx_stats->rs_status & ATH9K_RXERR_PHY) { 12286b87d71cSSujith Manoharan ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime); 12296b87d71cSSujith Manoharan if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) 12306b87d71cSSujith Manoharan RX_STAT_INC(rx_spectral); 12316b87d71cSSujith Manoharan 12327c5c73cdSSujith Manoharan ret = -EINVAL; 12337c5c73cdSSujith Manoharan goto exit; 12346b87d71cSSujith Manoharan } 12356b87d71cSSujith Manoharan 12366b87d71cSSujith Manoharan /* 1237d435700fSSujith * everything but the rate is checked here, the rate check is done 1238d435700fSSujith * separately to avoid doing two lookups for a rate for each frame. 1239d435700fSSujith */ 12407c5c73cdSSujith Manoharan if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { 12417c5c73cdSSujith Manoharan ret = -EINVAL; 12427c5c73cdSSujith Manoharan goto exit; 12437c5c73cdSSujith Manoharan } 1244d435700fSSujith 12456f38482eSSujith Manoharan rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr); 1246eb5f952cSSujith Manoharan if (rx_stats->is_mybeacon) { 1247eb5f952cSSujith Manoharan sc->hw_busy_count = 0; 1248eb5f952cSSujith Manoharan ath_start_rx_poll(sc, 3); 1249eb5f952cSSujith Manoharan } 12506f38482eSSujith Manoharan 12517c5c73cdSSujith Manoharan if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { 12527c5c73cdSSujith Manoharan ret =-EINVAL; 12537c5c73cdSSujith Manoharan goto exit; 12547c5c73cdSSujith Manoharan } 1255d435700fSSujith 1256e3acd13dSSujith Manoharan ath9k_process_rssi(common, hw, rx_stats, rx_status); 125774a97755SSujith Manoharan 1258675a0b04SKarl Beldan rx_status->band = hw->conf.chandef.chan->band; 1259675a0b04SKarl Beldan rx_status->freq = hw->conf.chandef.chan->center_freq; 1260d435700fSSujith rx_status->antenna = rx_stats->rs_antenna; 126196d21371SThomas Pedersen rx_status->flag |= RX_FLAG_MACTIME_END; 1262d435700fSSujith 1263a5525d9cSSujith Manoharan #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1264a5525d9cSSujith Manoharan if (ieee80211_is_data_present(hdr->frame_control) && 1265a5525d9cSSujith Manoharan !ieee80211_is_qos_nullfunc(hdr->frame_control)) 1266a5525d9cSSujith Manoharan sc->rx.num_pkts++; 1267a5525d9cSSujith Manoharan #endif 1268a5525d9cSSujith Manoharan 12697c5c73cdSSujith Manoharan exit: 12707c5c73cdSSujith Manoharan sc->rx.discard_next = false; 12717c5c73cdSSujith Manoharan return ret; 1272d435700fSSujith } 1273d435700fSSujith 1274d435700fSSujith static void ath9k_rx_skb_postprocess(struct ath_common *common, 1275d435700fSSujith struct sk_buff *skb, 1276d435700fSSujith struct ath_rx_status *rx_stats, 1277d435700fSSujith struct ieee80211_rx_status *rxs, 1278d435700fSSujith bool decrypt_error) 1279d435700fSSujith { 1280d435700fSSujith struct ath_hw *ah = common->ah; 1281d435700fSSujith struct ieee80211_hdr *hdr; 1282d435700fSSujith int hdrlen, padpos, padsize; 1283d435700fSSujith u8 keyix; 1284d435700fSSujith __le16 fc; 1285d435700fSSujith 1286d435700fSSujith /* see if any padding is done by the hw and remove it */ 1287d435700fSSujith hdr = (struct ieee80211_hdr *) skb->data; 1288d435700fSSujith hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1289d435700fSSujith fc = hdr->frame_control; 1290c60c9929SFelix Fietkau padpos = ieee80211_hdrlen(fc); 1291d435700fSSujith 1292d435700fSSujith /* The MAC header is padded to have 32-bit boundary if the 1293d435700fSSujith * packet payload is non-zero. The general calculation for 1294d435700fSSujith * padsize would take into account odd header lengths: 1295d435700fSSujith * padsize = (4 - padpos % 4) % 4; However, since only 1296d435700fSSujith * even-length headers are used, padding can only be 0 or 2 1297d435700fSSujith * bytes and we can optimize this a bit. In addition, we must 1298d435700fSSujith * not try to remove padding from short control frames that do 1299d435700fSSujith * not have payload. */ 1300d435700fSSujith padsize = padpos & 3; 1301d435700fSSujith if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1302d435700fSSujith memmove(skb->data + padsize, skb->data, padpos); 1303d435700fSSujith skb_pull(skb, padsize); 1304d435700fSSujith } 1305d435700fSSujith 1306d435700fSSujith keyix = rx_stats->rs_keyix; 1307d435700fSSujith 1308d435700fSSujith if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1309d435700fSSujith ieee80211_has_protected(fc)) { 1310d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1311d435700fSSujith } else if (ieee80211_has_protected(fc) 1312d435700fSSujith && !decrypt_error && skb->len >= hdrlen + 4) { 1313d435700fSSujith keyix = skb->data[hdrlen + 3] >> 6; 1314d435700fSSujith 1315d435700fSSujith if (test_bit(keyix, common->keymap)) 1316d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1317d435700fSSujith } 1318d435700fSSujith if (ah->sw_mgmt_crypto && 1319d435700fSSujith (rxs->flag & RX_FLAG_DECRYPTED) && 1320d435700fSSujith ieee80211_is_mgmt(fc)) 1321d435700fSSujith /* Use software decrypt for management frames. */ 1322d435700fSSujith rxs->flag &= ~RX_FLAG_DECRYPTED; 1323d435700fSSujith } 1324b5c80475SFelix Fietkau 1325c3124df7SSujith Manoharan /* 1326c3124df7SSujith Manoharan * Run the LNA combining algorithm only in these cases: 1327c3124df7SSujith Manoharan * 1328c3124df7SSujith Manoharan * Standalone WLAN cards with both LNA/Antenna diversity 1329c3124df7SSujith Manoharan * enabled in the EEPROM. 1330c3124df7SSujith Manoharan * 1331c3124df7SSujith Manoharan * WLAN+BT cards which are in the supported card list 1332c3124df7SSujith Manoharan * in ath_pci_id_table and the user has loaded the 1333c3124df7SSujith Manoharan * driver with "bt_ant_diversity" set to true. 1334c3124df7SSujith Manoharan */ 1335c3124df7SSujith Manoharan static void ath9k_antenna_check(struct ath_softc *sc, 1336c3124df7SSujith Manoharan struct ath_rx_status *rs) 1337c3124df7SSujith Manoharan { 1338c3124df7SSujith Manoharan struct ath_hw *ah = sc->sc_ah; 1339c3124df7SSujith Manoharan struct ath9k_hw_capabilities *pCap = &ah->caps; 1340c3124df7SSujith Manoharan struct ath_common *common = ath9k_hw_common(ah); 1341c3124df7SSujith Manoharan 1342c3124df7SSujith Manoharan if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) 1343c3124df7SSujith Manoharan return; 1344c3124df7SSujith Manoharan 1345c3124df7SSujith Manoharan /* 1346c3124df7SSujith Manoharan * All MPDUs in an aggregate will use the same LNA 1347c3124df7SSujith Manoharan * as the first MPDU. 1348c3124df7SSujith Manoharan */ 1349c3124df7SSujith Manoharan if (rs->rs_isaggr && !rs->rs_firstaggr) 1350c3124df7SSujith Manoharan return; 1351c3124df7SSujith Manoharan 1352c3124df7SSujith Manoharan /* 1353c3124df7SSujith Manoharan * Change the default rx antenna if rx diversity 1354c3124df7SSujith Manoharan * chooses the other antenna 3 times in a row. 1355c3124df7SSujith Manoharan */ 1356c3124df7SSujith Manoharan if (sc->rx.defant != rs->rs_antenna) { 1357c3124df7SSujith Manoharan if (++sc->rx.rxotherant >= 3) 1358c3124df7SSujith Manoharan ath_setdefantenna(sc, rs->rs_antenna); 1359c3124df7SSujith Manoharan } else { 1360c3124df7SSujith Manoharan sc->rx.rxotherant = 0; 1361c3124df7SSujith Manoharan } 1362c3124df7SSujith Manoharan 1363c3124df7SSujith Manoharan if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { 1364c3124df7SSujith Manoharan if (common->bt_ant_diversity) 1365c3124df7SSujith Manoharan ath_ant_comb_scan(sc, rs); 1366c3124df7SSujith Manoharan } else { 1367c3124df7SSujith Manoharan ath_ant_comb_scan(sc, rs); 1368c3124df7SSujith Manoharan } 1369c3124df7SSujith Manoharan } 1370c3124df7SSujith Manoharan 137121fbbca3SChristian Lamparter static void ath9k_apply_ampdu_details(struct ath_softc *sc, 137221fbbca3SChristian Lamparter struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) 137321fbbca3SChristian Lamparter { 137421fbbca3SChristian Lamparter if (rs->rs_isaggr) { 137521fbbca3SChristian Lamparter rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 137621fbbca3SChristian Lamparter 137721fbbca3SChristian Lamparter rxs->ampdu_reference = sc->rx.ampdu_ref; 137821fbbca3SChristian Lamparter 137921fbbca3SChristian Lamparter if (!rs->rs_moreaggr) { 138021fbbca3SChristian Lamparter rxs->flag |= RX_FLAG_AMPDU_IS_LAST; 138121fbbca3SChristian Lamparter sc->rx.ampdu_ref++; 138221fbbca3SChristian Lamparter } 138321fbbca3SChristian Lamparter 138421fbbca3SChristian Lamparter if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) 138521fbbca3SChristian Lamparter rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; 138621fbbca3SChristian Lamparter } 138721fbbca3SChristian Lamparter } 138821fbbca3SChristian Lamparter 1389b5c80475SFelix Fietkau int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1390b5c80475SFelix Fietkau { 13911a04d59dSFelix Fietkau struct ath_rxbuf *bf; 13920d95521eSFelix Fietkau struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1393b5c80475SFelix Fietkau struct ieee80211_rx_status *rxs; 1394b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 1395b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 13967545daf4SFelix Fietkau struct ieee80211_hw *hw = sc->hw; 1397b5c80475SFelix Fietkau int retval; 1398b5c80475SFelix Fietkau struct ath_rx_status rs; 1399b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype; 1400b5c80475SFelix Fietkau bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1401b5c80475SFelix Fietkau int dma_type; 1402a6d2055bSFelix Fietkau u64 tsf = 0; 14038ab2cd09SLuis R. Rodriguez unsigned long flags; 14042e1cd495SFelix Fietkau dma_addr_t new_buf_addr; 1405b5c80475SFelix Fietkau 1406b5c80475SFelix Fietkau if (edma) 1407b5c80475SFelix Fietkau dma_type = DMA_BIDIRECTIONAL; 140856824223SMing Lei else 140956824223SMing Lei dma_type = DMA_FROM_DEVICE; 1410b5c80475SFelix Fietkau 1411b5c80475SFelix Fietkau qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1412b5c80475SFelix Fietkau 1413a6d2055bSFelix Fietkau tsf = ath9k_hw_gettsf64(ah); 1414a6d2055bSFelix Fietkau 1415b5c80475SFelix Fietkau do { 1416e1352fdeSLorenzo Bianconi bool decrypt_error = false; 1417b5c80475SFelix Fietkau 1418b5c80475SFelix Fietkau memset(&rs, 0, sizeof(rs)); 1419b5c80475SFelix Fietkau if (edma) 1420b5c80475SFelix Fietkau bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1421b5c80475SFelix Fietkau else 1422b5c80475SFelix Fietkau bf = ath_get_next_rx_buf(sc, &rs); 1423b5c80475SFelix Fietkau 1424b5c80475SFelix Fietkau if (!bf) 1425b5c80475SFelix Fietkau break; 1426b5c80475SFelix Fietkau 1427b5c80475SFelix Fietkau skb = bf->bf_mpdu; 1428b5c80475SFelix Fietkau if (!skb) 1429b5c80475SFelix Fietkau continue; 1430b5c80475SFelix Fietkau 14310d95521eSFelix Fietkau /* 14320d95521eSFelix Fietkau * Take frame header from the first fragment and RX status from 14330d95521eSFelix Fietkau * the last one. 14340d95521eSFelix Fietkau */ 14350d95521eSFelix Fietkau if (sc->rx.frag) 14360d95521eSFelix Fietkau hdr_skb = sc->rx.frag; 14370d95521eSFelix Fietkau else 14380d95521eSFelix Fietkau hdr_skb = skb; 14390d95521eSFelix Fietkau 1440f6307ddaSSujith Manoharan rxs = IEEE80211_SKB_RXCB(hdr_skb); 1441ffb1c56aSAshok Nagarajan memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1442ffb1c56aSAshok Nagarajan 14436f38482eSSujith Manoharan retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, 1444e0dd1a96SSujith Manoharan &decrypt_error, tsf); 144583c76570SZefir Kurtisi if (retval) 144683c76570SZefir Kurtisi goto requeue_drop_frag; 144783c76570SZefir Kurtisi 1448203c4805SLuis R. Rodriguez /* Ensure we always have an skb to requeue once we are done 1449203c4805SLuis R. Rodriguez * processing the current buffer's skb */ 1450cc861f74SLuis R. Rodriguez requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1451203c4805SLuis R. Rodriguez 1452203c4805SLuis R. Rodriguez /* If there is no memory we ignore the current RX'd frame, 1453203c4805SLuis R. Rodriguez * tell hardware it can give us a new frame using the old 1454203c4805SLuis R. Rodriguez * skb and put it at the tail of the sc->rx.rxbuf list for 1455203c4805SLuis R. Rodriguez * processing. */ 145615072189SBen Greear if (!requeue_skb) { 145715072189SBen Greear RX_STAT_INC(rx_oom_err); 14580d95521eSFelix Fietkau goto requeue_drop_frag; 145915072189SBen Greear } 1460203c4805SLuis R. Rodriguez 14612e1cd495SFelix Fietkau /* We will now give hardware our shiny new allocated skb */ 14622e1cd495SFelix Fietkau new_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 14632e1cd495SFelix Fietkau common->rx_bufsize, dma_type); 14642e1cd495SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) { 14652e1cd495SFelix Fietkau dev_kfree_skb_any(requeue_skb); 14662e1cd495SFelix Fietkau goto requeue_drop_frag; 14672e1cd495SFelix Fietkau } 14682e1cd495SFelix Fietkau 1469203c4805SLuis R. Rodriguez /* Unmap the frame */ 1470203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 14712e1cd495SFelix Fietkau common->rx_bufsize, dma_type); 1472203c4805SLuis R. Rodriguez 1473176f0e84SSujith Manoharan bf->bf_mpdu = requeue_skb; 1474176f0e84SSujith Manoharan bf->bf_buf_addr = new_buf_addr; 1475176f0e84SSujith Manoharan 1476b5c80475SFelix Fietkau skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1477b5c80475SFelix Fietkau if (ah->caps.rx_status_len) 1478b5c80475SFelix Fietkau skb_pull(skb, ah->caps.rx_status_len); 1479203c4805SLuis R. Rodriguez 14800d95521eSFelix Fietkau if (!rs.rs_more) 14810d95521eSFelix Fietkau ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1482c9b14170SLuis R. Rodriguez rxs, decrypt_error); 1483203c4805SLuis R. Rodriguez 14840d95521eSFelix Fietkau if (rs.rs_more) { 148515072189SBen Greear RX_STAT_INC(rx_frags); 14860d95521eSFelix Fietkau /* 14870d95521eSFelix Fietkau * rs_more indicates chained descriptors which can be 14880d95521eSFelix Fietkau * used to link buffers together for a sort of 14890d95521eSFelix Fietkau * scatter-gather operation. 14900d95521eSFelix Fietkau */ 14910d95521eSFelix Fietkau if (sc->rx.frag) { 14920d95521eSFelix Fietkau /* too many fragments - cannot handle frame */ 14930d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 14940d95521eSFelix Fietkau dev_kfree_skb_any(skb); 149515072189SBen Greear RX_STAT_INC(rx_too_many_frags_err); 14960d95521eSFelix Fietkau skb = NULL; 14970d95521eSFelix Fietkau } 14980d95521eSFelix Fietkau sc->rx.frag = skb; 14990d95521eSFelix Fietkau goto requeue; 15000d95521eSFelix Fietkau } 15010d95521eSFelix Fietkau 15020d95521eSFelix Fietkau if (sc->rx.frag) { 15030d95521eSFelix Fietkau int space = skb->len - skb_tailroom(hdr_skb); 15040d95521eSFelix Fietkau 15050d95521eSFelix Fietkau if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 15060d95521eSFelix Fietkau dev_kfree_skb(skb); 150715072189SBen Greear RX_STAT_INC(rx_oom_err); 15080d95521eSFelix Fietkau goto requeue_drop_frag; 15090d95521eSFelix Fietkau } 15100d95521eSFelix Fietkau 1511b5447ff9SEric Dumazet sc->rx.frag = NULL; 1512b5447ff9SEric Dumazet 15130d95521eSFelix Fietkau skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 15140d95521eSFelix Fietkau skb->len); 15150d95521eSFelix Fietkau dev_kfree_skb_any(skb); 15160d95521eSFelix Fietkau skb = hdr_skb; 15170d95521eSFelix Fietkau } 15180d95521eSFelix Fietkau 151966760eacSFelix Fietkau if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 152066760eacSFelix Fietkau skb_trim(skb, skb->len - 8); 152166760eacSFelix Fietkau 15228ab2cd09SLuis R. Rodriguez spin_lock_irqsave(&sc->sc_pm_lock, flags); 1523aaef24b4SMohammed Shafi Shajakhan if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 15241b04b930SSujith PS_WAIT_FOR_CAB | 1525aaef24b4SMohammed Shafi Shajakhan PS_WAIT_FOR_PSPOLL_DATA)) || 1526cedc7e3dSMohammed Shafi Shajakhan ath9k_check_auto_sleep(sc)) 1527f73c604cSRajkumar Manoharan ath_rx_ps(sc, skb, rs.is_mybeacon); 15288ab2cd09SLuis R. Rodriguez spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1529cc65965cSJouni Malinen 1530c3124df7SSujith Manoharan ath9k_antenna_check(sc, &rs); 1531102885a5SVasanthakumar Thiagarajan 153221fbbca3SChristian Lamparter ath9k_apply_ampdu_details(sc, &rs, rxs); 153321fbbca3SChristian Lamparter 15347545daf4SFelix Fietkau ieee80211_rx(hw, skb); 1535cc65965cSJouni Malinen 15360d95521eSFelix Fietkau requeue_drop_frag: 15370d95521eSFelix Fietkau if (sc->rx.frag) { 15380d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 15390d95521eSFelix Fietkau sc->rx.frag = NULL; 15400d95521eSFelix Fietkau } 1541203c4805SLuis R. Rodriguez requeue: 1542b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 1543a3dc48e8SFelix Fietkau if (flush) 1544a3dc48e8SFelix Fietkau continue; 1545a3dc48e8SFelix Fietkau 1546a3dc48e8SFelix Fietkau if (edma) { 1547b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 1548b5c80475SFelix Fietkau } else { 1549e96542e5SFelix Fietkau ath_rx_buf_relink(sc, bf); 155095294973SFelix Fietkau ath9k_hw_rxena(ah); 1551b5c80475SFelix Fietkau } 1552203c4805SLuis R. Rodriguez } while (1); 1553203c4805SLuis R. Rodriguez 155429ab0b36SRajkumar Manoharan if (!(ah->imask & ATH9K_INT_RXEOL)) { 155529ab0b36SRajkumar Manoharan ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 155672d874c6SFelix Fietkau ath9k_hw_set_interrupts(ah); 155729ab0b36SRajkumar Manoharan } 155829ab0b36SRajkumar Manoharan 1559203c4805SLuis R. Rodriguez return 0; 1560203c4805SLuis R. Rodriguez } 1561