1 /* 2 * linux/net/sunrpc/socklib.c 3 * 4 * Common socket helper routines for RPC client and server 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/compiler.h> 10 #include <linux/netdevice.h> 11 #include <linux/skbuff.h> 12 #include <linux/types.h> 13 #include <linux/pagemap.h> 14 #include <linux/udp.h> 15 #include <linux/sunrpc/xdr.h> 16 17 18 /** 19 * xdr_skb_read_bits - copy some data bits from skb to internal buffer 20 * @desc: sk_buff copy helper 21 * @to: copy destination 22 * @len: number of bytes to copy 23 * 24 * Possibly called several times to iterate over an sk_buff and copy 25 * data out of it. 26 */ 27 size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) 28 { 29 if (len > desc->count) 30 len = desc->count; 31 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) 32 return 0; 33 desc->count -= len; 34 desc->offset += len; 35 return len; 36 } 37 38 /** 39 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer 40 * @desc: sk_buff copy helper 41 * @to: copy destination 42 * @len: number of bytes to copy 43 * 44 * Same as skb_read_bits, but calculate a checksum at the same time. 45 */ 46 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) 47 { 48 unsigned int pos; 49 __wsum csum2; 50 51 if (len > desc->count) 52 len = desc->count; 53 pos = desc->offset; 54 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); 55 desc->csum = csum_block_add(desc->csum, csum2, pos); 56 desc->count -= len; 57 desc->offset += len; 58 return len; 59 } 60 61 /** 62 * xdr_partial_copy_from_skb - copy data out of an skb 63 * @xdr: target XDR buffer 64 * @base: starting offset 65 * @desc: sk_buff copy helper 66 * @copy_actor: virtual method for copying data 67 * 68 */ 69 ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) 70 { 71 struct page **ppage = xdr->pages; 72 unsigned int len, pglen = xdr->page_len; 73 ssize_t copied = 0; 74 int ret; 75 76 len = xdr->head[0].iov_len; 77 if (base < len) { 78 len -= base; 79 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); 80 copied += ret; 81 if (ret != len || !desc->count) 82 goto out; 83 base = 0; 84 } else 85 base -= len; 86 87 if (unlikely(pglen == 0)) 88 goto copy_tail; 89 if (unlikely(base >= pglen)) { 90 base -= pglen; 91 goto copy_tail; 92 } 93 if (base || xdr->page_base) { 94 pglen -= base; 95 base += xdr->page_base; 96 ppage += base >> PAGE_CACHE_SHIFT; 97 base &= ~PAGE_CACHE_MASK; 98 } 99 do { 100 char *kaddr; 101 102 /* ACL likes to be lazy in allocating pages - ACLs 103 * are small by default but can get huge. */ 104 if (unlikely(*ppage == NULL)) { 105 *ppage = alloc_page(GFP_ATOMIC); 106 if (unlikely(*ppage == NULL)) { 107 if (copied == 0) 108 copied = -ENOMEM; 109 goto out; 110 } 111 } 112 113 len = PAGE_CACHE_SIZE; 114 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); 115 if (base) { 116 len -= base; 117 if (pglen < len) 118 len = pglen; 119 ret = copy_actor(desc, kaddr + base, len); 120 base = 0; 121 } else { 122 if (pglen < len) 123 len = pglen; 124 ret = copy_actor(desc, kaddr, len); 125 } 126 flush_dcache_page(*ppage); 127 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); 128 copied += ret; 129 if (ret != len || !desc->count) 130 goto out; 131 ppage++; 132 } while ((pglen -= len) != 0); 133 copy_tail: 134 len = xdr->tail[0].iov_len; 135 if (base < len) 136 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); 137 out: 138 return copied; 139 } 140 141 /** 142 * csum_partial_copy_to_xdr - checksum and copy data 143 * @xdr: target XDR buffer 144 * @skb: source skb 145 * 146 * We have set things up such that we perform the checksum of the UDP 147 * packet in parallel with the copies into the RPC client iovec. -DaveM 148 */ 149 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) 150 { 151 struct xdr_skb_reader desc; 152 153 desc.skb = skb; 154 desc.offset = sizeof(struct udphdr); 155 desc.count = skb->len - desc.offset; 156 157 if (skb_csum_unnecessary(skb)) 158 goto no_checksum; 159 160 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); 161 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) 162 return -1; 163 if (desc.offset != skb->len) { 164 __wsum csum2; 165 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); 166 desc.csum = csum_block_add(desc.csum, csum2, desc.offset); 167 } 168 if (desc.count) 169 return -1; 170 if (csum_fold(desc.csum)) 171 return -1; 172 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) 173 netdev_rx_csum_fault(skb->dev); 174 return 0; 175 no_checksum: 176 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) 177 return -1; 178 if (desc.count) 179 return -1; 180 return 0; 181 } 182