recv.c (0a45da765e4bf5e8a7705266fa36e0f44787b0a1) recv.c (cc861f7468724e66567baf087b4e413e91b18150)
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

--- 34 unchanged lines hidden (view full) ---

43 * 11N: we can no longer afford to self link the last descriptor.
44 * MAC acknowledges BA status as long as it copies frames to host
45 * buffer (or rx fifo). This can incorrectly acknowledge packets
46 * to a sender if last desc is self-linked.
47 */
48static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49{
50 struct ath_hw *ah = sc->sc_ah;
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

--- 34 unchanged lines hidden (view full) ---

43 * 11N: we can no longer afford to self link the last descriptor.
44 * MAC acknowledges BA status as long as it copies frames to host
45 * buffer (or rx fifo). This can incorrectly acknowledge packets
46 * to a sender if last desc is self-linked.
47 */
48static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49{
50 struct ath_hw *ah = sc->sc_ah;
51 struct ath_common *common = ath9k_hw_common(ah);
51 struct ath_desc *ds;
52 struct sk_buff *skb;
53
54 ATH_RXBUF_RESET(bf);
55
56 ds = bf->bf_desc;
57 ds->ds_link = 0; /* link to null */
58 ds->ds_data = bf->bf_buf_addr;
59
60 /* virtual addr of the beginning of the buffer. */
61 skb = bf->bf_mpdu;
62 BUG_ON(skb == NULL);
63 ds->ds_vdata = skb->data;
64
52 struct ath_desc *ds;
53 struct sk_buff *skb;
54
55 ATH_RXBUF_RESET(bf);
56
57 ds = bf->bf_desc;
58 ds->ds_link = 0; /* link to null */
59 ds->ds_data = bf->bf_buf_addr;
60
61 /* virtual addr of the beginning of the buffer. */
62 skb = bf->bf_mpdu;
63 BUG_ON(skb == NULL);
64 ds->ds_vdata = skb->data;
65
65 /* setup rx descriptors. The rx.bufsize here tells the harware
66 /*
67 * setup rx descriptors. The rx_bufsize here tells the hardware
66 * how much data it can DMA to us and that we are prepared
68 * how much data it can DMA to us and that we are prepared
67 * to process */
69 * to process
70 */
68 ath9k_hw_setuprxdesc(ah, ds,
71 ath9k_hw_setuprxdesc(ah, ds,
69 sc->rx.bufsize,
72 common->rx_bufsize,
70 0);
71
72 if (sc->rx.rxlink == NULL)
73 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
74 else
75 *sc->rx.rxlink = bf->bf_daddr;
76
77 sc->rx.rxlink = &ds->ds_link;

--- 261 unchanged lines hidden (view full) ---

339 struct sk_buff *skb;
340 struct ath_buf *bf;
341 int error = 0;
342
343 spin_lock_init(&sc->rx.rxflushlock);
344 sc->sc_flags &= ~SC_OP_RXFLUSH;
345 spin_lock_init(&sc->rx.rxbuflock);
346
73 0);
74
75 if (sc->rx.rxlink == NULL)
76 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
77 else
78 *sc->rx.rxlink = bf->bf_daddr;
79
80 sc->rx.rxlink = &ds->ds_link;

--- 261 unchanged lines hidden (view full) ---

342 struct sk_buff *skb;
343 struct ath_buf *bf;
344 int error = 0;
345
346 spin_lock_init(&sc->rx.rxflushlock);
347 sc->sc_flags &= ~SC_OP_RXFLUSH;
348 spin_lock_init(&sc->rx.rxbuflock);
349
347 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
348 min(common->cachelsz, (u16)64));
350 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
351 min(common->cachelsz, (u16)64));
349
350 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
352
353 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
351 common->cachelsz, sc->rx.bufsize);
354 common->cachelsz, common->rx_bufsize);
352
353 /* Initialize rx descriptors */
354
355 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
356 "rx", nbufs, 1);
357 if (error != 0) {
358 ath_print(common, ATH_DBG_FATAL,
359 "failed to allocate rx descriptors: %d\n", error);
360 goto err;
361 }
362
363 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
355
356 /* Initialize rx descriptors */
357
358 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
359 "rx", nbufs, 1);
360 if (error != 0) {
361 ath_print(common, ATH_DBG_FATAL,
362 "failed to allocate rx descriptors: %d\n", error);
363 goto err;
364 }
365
366 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
364 skb = ath_rxbuf_alloc(common, sc->rx.bufsize, GFP_KERNEL);
367 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
365 if (skb == NULL) {
366 error = -ENOMEM;
367 goto err;
368 }
369
370 bf->bf_mpdu = skb;
371 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
368 if (skb == NULL) {
369 error = -ENOMEM;
370 goto err;
371 }
372
373 bf->bf_mpdu = skb;
374 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
372 sc->rx.bufsize,
375 common->rx_bufsize,
373 DMA_FROM_DEVICE);
374 if (unlikely(dma_mapping_error(sc->dev,
375 bf->bf_buf_addr))) {
376 dev_kfree_skb_any(skb);
377 bf->bf_mpdu = NULL;
378 ath_print(common, ATH_DBG_FATAL,
379 "dma_mapping_error() on RX init\n");
380 error = -ENOMEM;

--- 7 unchanged lines hidden (view full) ---

388 if (error)
389 ath_rx_cleanup(sc);
390
391 return error;
392}
393
394void ath_rx_cleanup(struct ath_softc *sc)
395{
376 DMA_FROM_DEVICE);
377 if (unlikely(dma_mapping_error(sc->dev,
378 bf->bf_buf_addr))) {
379 dev_kfree_skb_any(skb);
380 bf->bf_mpdu = NULL;
381 ath_print(common, ATH_DBG_FATAL,
382 "dma_mapping_error() on RX init\n");
383 error = -ENOMEM;

--- 7 unchanged lines hidden (view full) ---

391 if (error)
392 ath_rx_cleanup(sc);
393
394 return error;
395}
396
397void ath_rx_cleanup(struct ath_softc *sc)
398{
399 struct ath_hw *ah = sc->sc_ah;
400 struct ath_common *common = ath9k_hw_common(ah);
396 struct sk_buff *skb;
397 struct ath_buf *bf;
398
399 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
400 skb = bf->bf_mpdu;
401 if (skb) {
402 dma_unmap_single(sc->dev, bf->bf_buf_addr,
401 struct sk_buff *skb;
402 struct ath_buf *bf;
403
404 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
405 skb = bf->bf_mpdu;
406 if (skb) {
407 dma_unmap_single(sc->dev, bf->bf_buf_addr,
403 sc->rx.bufsize, DMA_FROM_DEVICE);
408 common->rx_bufsize, DMA_FROM_DEVICE);
404 dev_kfree_skb(skb);
405 }
406 }
407
408 if (sc->rx.rxdma.dd_desc_len != 0)
409 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
410}
411

--- 363 unchanged lines hidden (view full) ---

775 continue;
776
777 /*
778 * Synchronize the DMA transfer with CPU before
779 * 1. accessing the frame
780 * 2. requeueing the same buffer to h/w
781 */
782 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
409 dev_kfree_skb(skb);
410 }
411 }
412
413 if (sc->rx.rxdma.dd_desc_len != 0)
414 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
415}
416

--- 363 unchanged lines hidden (view full) ---

780 continue;
781
782 /*
783 * Synchronize the DMA transfer with CPU before
784 * 1. accessing the frame
785 * 2. requeueing the same buffer to h/w
786 */
787 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
783 sc->rx.bufsize,
788 common->rx_bufsize,
784 DMA_FROM_DEVICE);
785
786 hdr = (struct ieee80211_hdr *) skb->data;
787 rxs = IEEE80211_SKB_RXCB(skb);
788
789 hw = ath_get_virt_hw(sc, hdr);
790 rx_stats = &ds->ds_rxstat;
791
792 /*
793 * If we're asked to flush receive queue, directly
794 * chain it back at the queue without processing it.
795 */
796 if (flush)
797 goto requeue;
798
799 /* The status portion of the descriptor could get corrupted. */
789 DMA_FROM_DEVICE);
790
791 hdr = (struct ieee80211_hdr *) skb->data;
792 rxs = IEEE80211_SKB_RXCB(skb);
793
794 hw = ath_get_virt_hw(sc, hdr);
795 rx_stats = &ds->ds_rxstat;
796
797 /*
798 * If we're asked to flush receive queue, directly
799 * chain it back at the queue without processing it.
800 */
801 if (flush)
802 goto requeue;
803
804 /* The status portion of the descriptor could get corrupted. */
800 if (sc->rx.bufsize < rx_stats->rs_datalen)
805 if (common->rx_bufsize < rx_stats->rs_datalen)
801 goto requeue;
802
803 if (!ath_rx_prepare(common, hw, skb, rx_stats,
804 rxs, &decrypt_error))
805 goto requeue;
806
807 /* Ensure we always have an skb to requeue once we are done
808 * processing the current buffer's skb */
806 goto requeue;
807
808 if (!ath_rx_prepare(common, hw, skb, rx_stats,
809 rxs, &decrypt_error))
810 goto requeue;
811
812 /* Ensure we always have an skb to requeue once we are done
813 * processing the current buffer's skb */
809 requeue_skb = ath_rxbuf_alloc(common, sc->rx.bufsize, GFP_ATOMIC);
814 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
810
811 /* If there is no memory we ignore the current RX'd frame,
812 * tell hardware it can give us a new frame using the old
813 * skb and put it at the tail of the sc->rx.rxbuf list for
814 * processing. */
815 if (!requeue_skb)
816 goto requeue;
817
818 /* Unmap the frame */
819 dma_unmap_single(sc->dev, bf->bf_buf_addr,
815
816 /* If there is no memory we ignore the current RX'd frame,
817 * tell hardware it can give us a new frame using the old
818 * skb and put it at the tail of the sc->rx.rxbuf list for
819 * processing. */
820 if (!requeue_skb)
821 goto requeue;
822
823 /* Unmap the frame */
824 dma_unmap_single(sc->dev, bf->bf_buf_addr,
820 sc->rx.bufsize,
825 common->rx_bufsize,
821 DMA_FROM_DEVICE);
822
823 skb_put(skb, rx_stats->rs_datalen);
824
825 /* see if any padding is done by the hw and remove it */
826 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
827 fc = hdr->frame_control;
828

--- 26 unchanged lines hidden (view full) ---

855 (rxs->flag & RX_FLAG_DECRYPTED) &&
856 ieee80211_is_mgmt(fc))
857 /* Use software decrypt for management frames. */
858 rxs->flag &= ~RX_FLAG_DECRYPTED;
859
860 /* We will now give hardware our shiny new allocated skb */
861 bf->bf_mpdu = requeue_skb;
862 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
826 DMA_FROM_DEVICE);
827
828 skb_put(skb, rx_stats->rs_datalen);
829
830 /* see if any padding is done by the hw and remove it */
831 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
832 fc = hdr->frame_control;
833

--- 26 unchanged lines hidden (view full) ---

860 (rxs->flag & RX_FLAG_DECRYPTED) &&
861 ieee80211_is_mgmt(fc))
862 /* Use software decrypt for management frames. */
863 rxs->flag &= ~RX_FLAG_DECRYPTED;
864
865 /* We will now give hardware our shiny new allocated skb */
866 bf->bf_mpdu = requeue_skb;
867 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
863 sc->rx.bufsize,
864 DMA_FROM_DEVICE);
868 common->rx_bufsize,
869 DMA_FROM_DEVICE);
865 if (unlikely(dma_mapping_error(sc->dev,
866 bf->bf_buf_addr))) {
867 dev_kfree_skb_any(requeue_skb);
868 bf->bf_mpdu = NULL;
869 ath_print(common, ATH_DBG_FATAL,
870 "dma_mapping_error() on RX\n");
871 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
872 break;

--- 31 unchanged lines hidden ---
870 if (unlikely(dma_mapping_error(sc->dev,
871 bf->bf_buf_addr))) {
872 dev_kfree_skb_any(requeue_skb);
873 bf->bf_mpdu = NULL;
874 ath_print(common, ATH_DBG_FATAL,
875 "dma_mapping_error() on RX\n");
876 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
877 break;

--- 31 unchanged lines hidden ---