1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/socklib.c 4 * 5 * Common socket helper routines for RPC client and server 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/compiler.h> 11 #include <linux/netdevice.h> 12 #include <linux/gfp.h> 13 #include <linux/skbuff.h> 14 #include <linux/types.h> 15 #include <linux/pagemap.h> 16 #include <linux/udp.h> 17 #include <linux/sunrpc/msg_prot.h> 18 #include <linux/sunrpc/sched.h> 19 #include <linux/sunrpc/xdr.h> 20 #include <linux/export.h> 21 22 #include "socklib.h" 23 24 /* 25 * Helper structure for copying from an sk_buff. 26 */ 27 struct xdr_skb_reader { 28 struct sk_buff *skb; 29 unsigned int offset; 30 size_t count; 31 __wsum csum; 32 }; 33 34 typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, 35 size_t len); 36 37 /** 38 * xdr_skb_read_bits - copy some data bits from skb to internal buffer 39 * @desc: sk_buff copy helper 40 * @to: copy destination 41 * @len: number of bytes to copy 42 * 43 * Possibly called several times to iterate over an sk_buff and copy 44 * data out of it. 45 */ 46 static size_t 47 xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) 48 { 49 if (len > desc->count) 50 len = desc->count; 51 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) 52 return 0; 53 desc->count -= len; 54 desc->offset += len; 55 return len; 56 } 57 58 /** 59 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer 60 * @desc: sk_buff copy helper 61 * @to: copy destination 62 * @len: number of bytes to copy 63 * 64 * Same as skb_read_bits, but calculate a checksum at the same time. 65 */ 66 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) 67 { 68 unsigned int pos; 69 __wsum csum2; 70 71 if (len > desc->count) 72 len = desc->count; 73 pos = desc->offset; 74 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len); 75 desc->csum = csum_block_add(desc->csum, csum2, pos); 76 desc->count -= len; 77 desc->offset += len; 78 return len; 79 } 80 81 /** 82 * xdr_partial_copy_from_skb - copy data out of an skb 83 * @xdr: target XDR buffer 84 * @base: starting offset 85 * @desc: sk_buff copy helper 86 * @copy_actor: virtual method for copying data 87 * 88 */ 89 static ssize_t 90 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) 91 { 92 struct page **ppage = xdr->pages; 93 unsigned int len, pglen = xdr->page_len; 94 ssize_t copied = 0; 95 size_t ret; 96 97 len = xdr->head[0].iov_len; 98 if (base < len) { 99 len -= base; 100 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); 101 copied += ret; 102 if (ret != len || !desc->count) 103 goto out; 104 base = 0; 105 } else 106 base -= len; 107 108 if (unlikely(pglen == 0)) 109 goto copy_tail; 110 if (unlikely(base >= pglen)) { 111 base -= pglen; 112 goto copy_tail; 113 } 114 if (base || xdr->page_base) { 115 pglen -= base; 116 base += xdr->page_base; 117 ppage += base >> PAGE_SHIFT; 118 base &= ~PAGE_MASK; 119 } 120 do { 121 char *kaddr; 122 123 /* ACL likes to be lazy in allocating pages - ACLs 124 * are small by default but can get huge. */ 125 if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) { 126 *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN); 127 if (unlikely(*ppage == NULL)) { 128 if (copied == 0) 129 copied = -ENOMEM; 130 goto out; 131 } 132 } 133 134 len = PAGE_SIZE; 135 kaddr = kmap_atomic(*ppage); 136 if (base) { 137 len -= base; 138 if (pglen < len) 139 len = pglen; 140 ret = copy_actor(desc, kaddr + base, len); 141 base = 0; 142 } else { 143 if (pglen < len) 144 len = pglen; 145 ret = copy_actor(desc, kaddr, len); 146 } 147 flush_dcache_page(*ppage); 148 kunmap_atomic(kaddr); 149 copied += ret; 150 if (ret != len || !desc->count) 151 goto out; 152 ppage++; 153 } while ((pglen -= len) != 0); 154 copy_tail: 155 len = xdr->tail[0].iov_len; 156 if (base < len) 157 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); 158 out: 159 return copied; 160 } 161 162 /** 163 * csum_partial_copy_to_xdr - checksum and copy data 164 * @xdr: target XDR buffer 165 * @skb: source skb 166 * 167 * We have set things up such that we perform the checksum of the UDP 168 * packet in parallel with the copies into the RPC client iovec. -DaveM 169 */ 170 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) 171 { 172 struct xdr_skb_reader desc; 173 174 desc.skb = skb; 175 desc.offset = 0; 176 desc.count = skb->len - desc.offset; 177 178 if (skb_csum_unnecessary(skb)) 179 goto no_checksum; 180 181 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); 182 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) 183 return -1; 184 if (desc.offset != skb->len) { 185 __wsum csum2; 186 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); 187 desc.csum = csum_block_add(desc.csum, csum2, desc.offset); 188 } 189 if (desc.count) 190 return -1; 191 if (csum_fold(desc.csum)) 192 return -1; 193 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 194 !skb->csum_complete_sw) 195 netdev_rx_csum_fault(skb->dev, skb); 196 return 0; 197 no_checksum: 198 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) 199 return -1; 200 if (desc.count) 201 return -1; 202 return 0; 203 } 204 EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr); 205 206 static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg, 207 size_t seek) 208 { 209 if (seek) 210 iov_iter_advance(&msg->msg_iter, seek); 211 return sock_sendmsg(sock, msg); 212 } 213 214 static int xprt_send_kvec(struct socket *sock, struct msghdr *msg, 215 struct kvec *vec, size_t seek) 216 { 217 iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); 218 return xprt_sendmsg(sock, msg, seek); 219 } 220 221 static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, 222 struct xdr_buf *xdr, size_t base) 223 { 224 iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr), 225 xdr->page_len + xdr->page_base); 226 return xprt_sendmsg(sock, msg, base + xdr->page_base); 227 } 228 229 /* Common case: 230 * - stream transport 231 * - sending from byte 0 of the message 232 * - the message is wholly contained in @xdr's head iovec 233 */ 234 static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg, 235 rpc_fraghdr marker, struct kvec *vec, 236 size_t base) 237 { 238 struct kvec iov[2] = { 239 [0] = { 240 .iov_base = &marker, 241 .iov_len = sizeof(marker) 242 }, 243 [1] = *vec, 244 }; 245 size_t len = iov[0].iov_len + iov[1].iov_len; 246 247 iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); 248 return xprt_sendmsg(sock, msg, base); 249 } 250 251 /** 252 * xprt_sock_sendmsg - write an xdr_buf directly to a socket 253 * @sock: open socket to send on 254 * @msg: socket message metadata 255 * @xdr: xdr_buf containing this request 256 * @base: starting position in the buffer 257 * @marker: stream record marker field 258 * @sent_p: return the total number of bytes successfully queued for sending 259 * 260 * Return values: 261 * On success, returns zero and fills in @sent_p. 262 * %-ENOTSOCK if @sock is not a struct socket. 263 */ 264 int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg, 265 struct xdr_buf *xdr, unsigned int base, 266 rpc_fraghdr marker, unsigned int *sent_p) 267 { 268 unsigned int rmsize = marker ? sizeof(marker) : 0; 269 unsigned int remainder = rmsize + xdr->len - base; 270 unsigned int want; 271 int err = 0; 272 273 *sent_p = 0; 274 275 if (unlikely(!sock)) 276 return -ENOTSOCK; 277 278 msg->msg_flags |= MSG_MORE; 279 want = xdr->head[0].iov_len + rmsize; 280 if (base < want) { 281 unsigned int len = want - base; 282 283 remainder -= len; 284 if (remainder == 0) 285 msg->msg_flags &= ~MSG_MORE; 286 if (rmsize) 287 err = xprt_send_rm_and_kvec(sock, msg, marker, 288 &xdr->head[0], base); 289 else 290 err = xprt_send_kvec(sock, msg, &xdr->head[0], base); 291 if (remainder == 0 || err != len) 292 goto out; 293 *sent_p += err; 294 base = 0; 295 } else { 296 base -= want; 297 } 298 299 if (base < xdr->page_len) { 300 unsigned int len = xdr->page_len - base; 301 302 remainder -= len; 303 if (remainder == 0) 304 msg->msg_flags &= ~MSG_MORE; 305 err = xprt_send_pagedata(sock, msg, xdr, base); 306 if (remainder == 0 || err != len) 307 goto out; 308 *sent_p += err; 309 base = 0; 310 } else { 311 base -= xdr->page_len; 312 } 313 314 if (base >= xdr->tail[0].iov_len) 315 return 0; 316 msg->msg_flags &= ~MSG_MORE; 317 err = xprt_send_kvec(sock, msg, &xdr->tail[0], base); 318 out: 319 if (err > 0) { 320 *sent_p += err; 321 err = 0; 322 } 323 return err; 324 } 325