sge.c (628a604842b68cb0fc483e7cd5fcfb836be633d5) sge.c (9e903e085262ffbf1fc44a17ac06058aca03524a)
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU

--- 282 unchanged lines hidden (view full) ---

291
292 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
293 if (dma_mapping_error(dev, *addr))
294 goto out_err;
295
296 si = skb_shinfo(skb);
297 end = &si->frags[si->nr_frags];
298 for (fp = si->frags; fp < end; fp++) {
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU

--- 282 unchanged lines hidden (view full) ---

291
292 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
293 if (dma_mapping_error(dev, *addr))
294 goto out_err;
295
296 si = skb_shinfo(skb);
297 end = &si->frags[si->nr_frags];
298 for (fp = si->frags; fp < end; fp++) {
299 *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
300 DMA_TO_DEVICE);
299 *++addr = dma_map_page(dev, fp->page, fp->page_offset,
300 skb_frag_size(fp), DMA_TO_DEVICE);
301 if (dma_mapping_error(dev, *addr))
302 goto unwind;
303 }
304 return 0;
305
306unwind:
307 while (fp-- > si->frags)
301 if (dma_mapping_error(dev, *addr))
302 goto unwind;
303 }
304 return 0;
305
306unwind:
307 while (fp-- > si->frags)
308 dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
308 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
309 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
310
311out_err:
312 return -ENOMEM;
313}
314
315static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
316 const struct ulptx_sgl *sgl, const struct sge_txq *tq)

--- 577 unchanged lines hidden (view full) ---

894 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
895
896 len = skb_headlen(skb) - start;
897 if (likely(len)) {
898 sgl->len0 = htonl(len);
899 sgl->addr0 = cpu_to_be64(addr[0] + start);
900 nfrags++;
901 } else {
309 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
310
311out_err:
312 return -ENOMEM;
313}
314
315static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
316 const struct ulptx_sgl *sgl, const struct sge_txq *tq)

--- 577 unchanged lines hidden (view full) ---

894 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
895
896 len = skb_headlen(skb) - start;
897 if (likely(len)) {
898 sgl->len0 = htonl(len);
899 sgl->addr0 = cpu_to_be64(addr[0] + start);
900 nfrags++;
901 } else {
902 sgl->len0 = htonl(si->frags[0].size);
902 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
903 sgl->addr0 = cpu_to_be64(addr[1]);
904 }
905
906 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
907 ULPTX_NSGE(nfrags));
908 if (likely(--nfrags == 0))
909 return;
910 /*
911 * Most of the complexity below deals with the possibility we hit the
912 * end of the queue in the middle of writing the SGL. For this case
913 * only we create the SGL in a temporary buffer and then copy it.
914 */
915 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
916
917 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
903 sgl->addr0 = cpu_to_be64(addr[1]);
904 }
905
906 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
907 ULPTX_NSGE(nfrags));
908 if (likely(--nfrags == 0))
909 return;
910 /*
911 * Most of the complexity below deals with the possibility we hit the
912 * end of the queue in the middle of writing the SGL. For this case
913 * only we create the SGL in a temporary buffer and then copy it.
914 */
915 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
916
917 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
918 to->len[0] = cpu_to_be32(si->frags[i].size);
919 to->len[1] = cpu_to_be32(si->frags[++i].size);
918 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
919 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
920 to->addr[0] = cpu_to_be64(addr[i]);
921 to->addr[1] = cpu_to_be64(addr[++i]);
922 }
923 if (nfrags) {
920 to->addr[0] = cpu_to_be64(addr[i]);
921 to->addr[1] = cpu_to_be64(addr[++i]);
922 }
923 if (nfrags) {
924 to->len[0] = cpu_to_be32(si->frags[i].size);
924 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
925 to->len[1] = cpu_to_be32(0);
926 to->addr[0] = cpu_to_be64(addr[i + 1]);
927 }
928 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
929 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
930
931 if (likely(part0))
932 memcpy(sgl->sge, buf, part0);

--- 461 unchanged lines hidden (view full) ---

1394 if (unlikely(!skb))
1395 goto out;
1396 __skb_put(skb, pull_len);
1397 skb_copy_to_linear_data(skb, gl->va, pull_len);
1398
1399 ssi = skb_shinfo(skb);
1400 ssi->frags[0].page = gl->frags[0].page;
1401 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
925 to->len[1] = cpu_to_be32(0);
926 to->addr[0] = cpu_to_be64(addr[i + 1]);
927 }
928 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
929 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
930
931 if (likely(part0))
932 memcpy(sgl->sge, buf, part0);

--- 461 unchanged lines hidden (view full) ---

1394 if (unlikely(!skb))
1395 goto out;
1396 __skb_put(skb, pull_len);
1397 skb_copy_to_linear_data(skb, gl->va, pull_len);
1398
1399 ssi = skb_shinfo(skb);
1400 ssi->frags[0].page = gl->frags[0].page;
1401 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1402 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len);
1403 if (gl->nfrags > 1)
1404 memcpy(&ssi->frags[1], &gl->frags[1],
1405 (gl->nfrags-1) * sizeof(skb_frag_t));
1406 ssi->nr_frags = gl->nfrags;
1407
1408 skb->len = gl->tot_len;
1409 skb->data_len = skb->len - pull_len;
1410 skb->truesize += skb->data_len;

--- 35 unchanged lines hidden (view full) ---

1446 const struct pkt_gl *gl,
1447 unsigned int offset)
1448{
1449 unsigned int n;
1450
1451 /* usually there's just one frag */
1452 si->frags[0].page = gl->frags[0].page;
1453 si->frags[0].page_offset = gl->frags[0].page_offset + offset;
1403 if (gl->nfrags > 1)
1404 memcpy(&ssi->frags[1], &gl->frags[1],
1405 (gl->nfrags-1) * sizeof(skb_frag_t));
1406 ssi->nr_frags = gl->nfrags;
1407
1408 skb->len = gl->tot_len;
1409 skb->data_len = skb->len - pull_len;
1410 skb->truesize += skb->data_len;

--- 35 unchanged lines hidden (view full) ---

1446 const struct pkt_gl *gl,
1447 unsigned int offset)
1448{
1449 unsigned int n;
1450
1451 /* usually there's just one frag */
1452 si->frags[0].page = gl->frags[0].page;
1453 si->frags[0].page_offset = gl->frags[0].page_offset + offset;
1454 si->frags[0].size = gl->frags[0].size - offset;
1454 skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset);
1455 si->nr_frags = gl->nfrags;
1456
1457 n = gl->nfrags - 1;
1458 if (n)
1459 memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1460
1461 /* get a reference to the last page, we don't own it */
1462 get_page(gl->frags[n].page);

--- 234 unchanged lines hidden (view full) ---

1697 */
1698 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1699 BUG_ON(frag >= MAX_SKB_FRAGS);
1700 BUG_ON(rxq->fl.avail == 0);
1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1702 bufsz = get_buf_size(sdesc);
1703 fp->page = sdesc->page;
1704 fp->page_offset = rspq->offset;
1455 si->nr_frags = gl->nfrags;
1456
1457 n = gl->nfrags - 1;
1458 if (n)
1459 memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1460
1461 /* get a reference to the last page, we don't own it */
1462 get_page(gl->frags[n].page);

--- 234 unchanged lines hidden (view full) ---

1697 */
1698 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1699 BUG_ON(frag >= MAX_SKB_FRAGS);
1700 BUG_ON(rxq->fl.avail == 0);
1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1702 bufsz = get_buf_size(sdesc);
1703 fp->page = sdesc->page;
1704 fp->page_offset = rspq->offset;
1705 fp->size = min(bufsz, len);
1706 len -= fp->size;
1705 skb_frag_size_set(fp, min(bufsz, len));
1706 len -= skb_frag_size(fp);
1707 if (!len)
1708 break;
1709 unmap_rx_buf(rspq->adapter, &rxq->fl);
1710 }
1711 gl.nfrags = frag+1;
1712
1713 /*
1714 * Last buffer remains mapped so explicitly make it
1715 * coherent for CPU access and start preloading first
1716 * cache line ...
1717 */
1718 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1719 get_buf_addr(sdesc),
1707 if (!len)
1708 break;
1709 unmap_rx_buf(rspq->adapter, &rxq->fl);
1710 }
1711 gl.nfrags = frag+1;
1712
1713 /*
1714 * Last buffer remains mapped so explicitly make it
1715 * coherent for CPU access and start preloading first
1716 * cache line ...
1717 */
1718 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1719 get_buf_addr(sdesc),
1720 fp->size, DMA_FROM_DEVICE);
1720 skb_frag_size(fp), DMA_FROM_DEVICE);
1721 gl.va = (page_address(gl.frags[0].page) +
1722 gl.frags[0].page_offset);
1723 prefetch(gl.va);
1724
1725 /*
1726 * Hand the new ingress packet to the handler for
1727 * this Response Queue.
1728 */
1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1730 if (likely(ret == 0))
1721 gl.va = (page_address(gl.frags[0].page) +
1722 gl.frags[0].page_offset);
1723 prefetch(gl.va);
1724
1725 /*
1726 * Hand the new ingress packet to the handler for
1727 * this Response Queue.
1728 */
1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1730 if (likely(ret == 0))
1731 rspq->offset += ALIGN(fp->size, FL_ALIGN);
1731 rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN);
1732 else
1733 restore_rx_bufs(&gl, &rxq->fl, frag);
1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1735 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1736 } else {
1737 WARN_ON(rsp_type > RSP_TYPE_CPL);
1738 ret = 0;
1739 }

--- 726 unchanged lines hidden ---
1732 else
1733 restore_rx_bufs(&gl, &rxq->fl, frag);
1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1735 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1736 } else {
1737 WARN_ON(rsp_type > RSP_TYPE_CPL);
1738 ret = 0;
1739 }

--- 726 unchanged lines hidden ---