base.c (2071a0084a0323697b7d6fd5a98982194bd6929f) | base.c (cc861f7468724e66567baf087b4e413e91b18150) |
---|---|
1/*- 2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 3 * Copyright (c) 2004-2005 Atheros Communications, Inc. 4 * Copyright (c) 2006 Devicescape Software, Inc. 5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 7 * 8 * All rights reserved. --- 309 unchanged lines hidden (view full) --- 318 PCI_DMA_TODEVICE); 319 dev_kfree_skb_any(bf->skb); 320 bf->skb = NULL; 321} 322 323static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, 324 struct ath5k_buf *bf) 325{ | 1/*- 2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 3 * Copyright (c) 2004-2005 Atheros Communications, Inc. 4 * Copyright (c) 2006 Devicescape Software, Inc. 5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 7 * 8 * All rights reserved. --- 309 unchanged lines hidden (view full) --- 318 PCI_DMA_TODEVICE); 319 dev_kfree_skb_any(bf->skb); 320 bf->skb = NULL; 321} 322 323static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, 324 struct ath5k_buf *bf) 325{ |
326 struct ath5k_hw *ah = sc->ah; 327 struct ath_common *common = ath5k_hw_common(ah); 328 |
|
326 BUG_ON(!bf); 327 if (!bf->skb) 328 return; | 329 BUG_ON(!bf); 330 if (!bf->skb) 331 return; |
329 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, | 332 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, |
330 PCI_DMA_FROMDEVICE); 331 dev_kfree_skb_any(bf->skb); 332 bf->skb = NULL; 333} 334 335 336/* Queues setup */ 337static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, --- 838 unchanged lines hidden (view full) --- 1176 struct ath_common *common = ath5k_hw_common(sc->ah); 1177 struct sk_buff *skb; 1178 1179 /* 1180 * Allocate buffer with headroom_needed space for the 1181 * fake physical layer header at the start. 1182 */ 1183 skb = ath_rxbuf_alloc(common, | 333 PCI_DMA_FROMDEVICE); 334 dev_kfree_skb_any(bf->skb); 335 bf->skb = NULL; 336} 337 338 339/* Queues setup */ 340static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, --- 838 unchanged lines hidden (view full) --- 1179 struct ath_common *common = ath5k_hw_common(sc->ah); 1180 struct sk_buff *skb; 1181 1182 /* 1183 * Allocate buffer with headroom_needed space for the 1184 * fake physical layer header at the start. 1185 */ 1186 skb = ath_rxbuf_alloc(common, |
1184 sc->rxbufsize + common->cachelsz - 1, | 1187 common->rx_bufsize + common->cachelsz - 1, |
1185 GFP_ATOMIC); 1186 1187 if (!skb) { 1188 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", | 1188 GFP_ATOMIC); 1189 1190 if (!skb) { 1191 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", |
1189 sc->rxbufsize + common->cachelsz - 1); | 1192 common->rx_bufsize + common->cachelsz - 1); |
1190 return NULL; 1191 } 1192 1193 *skb_addr = pci_map_single(sc->pdev, | 1193 return NULL; 1194 } 1195 1196 *skb_addr = pci_map_single(sc->pdev, |
1194 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | 1197 skb->data, common->rx_bufsize, 1198 PCI_DMA_FROMDEVICE); |
1195 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { 1196 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 1197 dev_kfree_skb(skb); 1198 return NULL; 1199 } 1200 return skb; 1201} 1202 --- 423 unchanged lines hidden (view full) --- 1626static int 1627ath5k_rx_start(struct ath5k_softc *sc) 1628{ 1629 struct ath5k_hw *ah = sc->ah; 1630 struct ath_common *common = ath5k_hw_common(ah); 1631 struct ath5k_buf *bf; 1632 int ret; 1633 | 1199 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { 1200 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 1201 dev_kfree_skb(skb); 1202 return NULL; 1203 } 1204 return skb; 1205} 1206 --- 423 unchanged lines hidden (view full) --- 1630static int 1631ath5k_rx_start(struct ath5k_softc *sc) 1632{ 1633 struct ath5k_hw *ah = sc->ah; 1634 struct ath_common *common = ath5k_hw_common(ah); 1635 struct ath5k_buf *bf; 1636 int ret; 1637 |
1634 sc->rxbufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); | 1638 common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); |
1635 | 1639 |
1636 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", 1637 common->cachelsz, sc->rxbufsize); | 1640 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", 1641 common->cachelsz, common->rx_bufsize); |
1638 1639 spin_lock_bh(&sc->rxbuflock); 1640 sc->rxlink = NULL; 1641 list_for_each_entry(bf, &sc->rxbuf, list) { 1642 ret = ath5k_rxbuf_setup(sc, bf); 1643 if (ret != 0) { 1644 spin_unlock_bh(&sc->rxbuflock); 1645 goto err; --- 118 unchanged lines hidden (view full) --- 1764static void 1765ath5k_tasklet_rx(unsigned long data) 1766{ 1767 struct ieee80211_rx_status *rxs; 1768 struct ath5k_rx_status rs = {}; 1769 struct sk_buff *skb, *next_skb; 1770 dma_addr_t next_skb_addr; 1771 struct ath5k_softc *sc = (void *)data; | 1642 1643 spin_lock_bh(&sc->rxbuflock); 1644 sc->rxlink = NULL; 1645 list_for_each_entry(bf, &sc->rxbuf, list) { 1646 ret = ath5k_rxbuf_setup(sc, bf); 1647 if (ret != 0) { 1648 spin_unlock_bh(&sc->rxbuflock); 1649 goto err; --- 118 unchanged lines hidden (view full) --- 1768static void 1769ath5k_tasklet_rx(unsigned long data) 1770{ 1771 struct ieee80211_rx_status *rxs; 1772 struct ath5k_rx_status rs = {}; 1773 struct sk_buff *skb, *next_skb; 1774 dma_addr_t next_skb_addr; 1775 struct ath5k_softc *sc = (void *)data; |
1776 struct ath5k_hw *ah = sc->ah; 1777 struct ath_common *common = ath5k_hw_common(ah); |
|
1772 struct ath5k_buf *bf; 1773 struct ath5k_desc *ds; 1774 int ret; 1775 int hdrlen; 1776 int padsize; 1777 int rx_flag; 1778 1779 spin_lock(&sc->rxbuflock); --- 61 unchanged lines hidden (view full) --- 1841 1842 /* 1843 * If we can't replace bf->skb with a new skb under memory 1844 * pressure, just skip this packet 1845 */ 1846 if (!next_skb) 1847 goto next; 1848 | 1778 struct ath5k_buf *bf; 1779 struct ath5k_desc *ds; 1780 int ret; 1781 int hdrlen; 1782 int padsize; 1783 int rx_flag; 1784 1785 spin_lock(&sc->rxbuflock); --- 61 unchanged lines hidden (view full) --- 1847 1848 /* 1849 * If we can't replace bf->skb with a new skb under memory 1850 * pressure, just skip this packet 1851 */ 1852 if (!next_skb) 1853 goto next; 1854 |
1849 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, | 1855 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, |
1850 PCI_DMA_FROMDEVICE); 1851 skb_put(skb, rs.rs_datalen); 1852 1853 /* The MAC header is padded to have 32-bit boundary if the 1854 * packet payload is non-zero. The general calculation for 1855 * padsize would take into account odd header lengths: 1856 * padsize = (4 - hdrlen % 4) % 4; However, since only 1857 * even-length headers are used, padding can only be 0 or 2 --- 1409 unchanged lines hidden --- | 1856 PCI_DMA_FROMDEVICE); 1857 skb_put(skb, rs.rs_datalen); 1858 1859 /* The MAC header is padded to have 32-bit boundary if the 1860 * packet payload is non-zero. The general calculation for 1861 * padsize would take into account odd header lengths: 1862 * padsize = (4 - hdrlen % 4) % 4; However, since only 1863 * even-length headers are used, padding can only be 0 or 2 --- 1409 unchanged lines hidden --- |