xref: /openbmc/linux/net/rds/rdma.c (revision ed1666f6)
1 /*
2  * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37 
38 #include "rds.h"
39 
40 /*
41  * XXX
42  *  - build with sparse
43  *  - should we detect duplicate keys on a socket?  hmm.
44  *  - an rdma is an mlock, apply rlimit?
45  */
46 
47 /*
48  * get the number of pages by looking at the page indices that the start and
49  * end addresses fall in.
50  *
51  * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
52  * causes the address to wrap or overflows an unsigned int.  This comes
53  * from being stored in the 'length' member of 'struct scatterlist'.
54  */
55 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56 {
57 	if ((vec->addr + vec->bytes <= vec->addr) ||
58 	    (vec->bytes > (u64)UINT_MAX))
59 		return 0;
60 
61 	return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 		(vec->addr >> PAGE_SHIFT);
63 }
64 
65 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 				       struct rds_mr *insert)
67 {
68 	struct rb_node **p = &root->rb_node;
69 	struct rb_node *parent = NULL;
70 	struct rds_mr *mr;
71 
72 	while (*p) {
73 		parent = *p;
74 		mr = rb_entry(parent, struct rds_mr, r_rb_node);
75 
76 		if (key < mr->r_key)
77 			p = &(*p)->rb_left;
78 		else if (key > mr->r_key)
79 			p = &(*p)->rb_right;
80 		else
81 			return mr;
82 	}
83 
84 	if (insert) {
85 		rb_link_node(&insert->r_rb_node, parent, p);
86 		rb_insert_color(&insert->r_rb_node, root);
87 		refcount_inc(&insert->r_refcount);
88 	}
89 	return NULL;
90 }
91 
92 /*
93  * Destroy the transport-specific part of a MR.
94  */
95 static void rds_destroy_mr(struct rds_mr *mr)
96 {
97 	struct rds_sock *rs = mr->r_sock;
98 	void *trans_private = NULL;
99 	unsigned long flags;
100 
101 	rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102 			mr->r_key, refcount_read(&mr->r_refcount));
103 
104 	if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
105 		return;
106 
107 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
108 	if (!RB_EMPTY_NODE(&mr->r_rb_node))
109 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
110 	trans_private = mr->r_trans_private;
111 	mr->r_trans_private = NULL;
112 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
113 
114 	if (trans_private)
115 		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
116 }
117 
118 void __rds_put_mr_final(struct rds_mr *mr)
119 {
120 	rds_destroy_mr(mr);
121 	kfree(mr);
122 }
123 
124 /*
125  * By the time this is called we can't have any more ioctls called on
126  * the socket so we don't need to worry about racing with others.
127  */
128 void rds_rdma_drop_keys(struct rds_sock *rs)
129 {
130 	struct rds_mr *mr;
131 	struct rb_node *node;
132 	unsigned long flags;
133 
134 	/* Release any MRs associated with this socket */
135 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
136 	while ((node = rb_first(&rs->rs_rdma_keys))) {
137 		mr = rb_entry(node, struct rds_mr, r_rb_node);
138 		if (mr->r_trans == rs->rs_transport)
139 			mr->r_invalidate = 0;
140 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
141 		RB_CLEAR_NODE(&mr->r_rb_node);
142 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
143 		rds_destroy_mr(mr);
144 		rds_mr_put(mr);
145 		spin_lock_irqsave(&rs->rs_rdma_lock, flags);
146 	}
147 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
148 
149 	if (rs->rs_transport && rs->rs_transport->flush_mrs)
150 		rs->rs_transport->flush_mrs();
151 }
152 
153 /*
154  * Helper function to pin user pages.
155  */
156 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
157 			struct page **pages, int write)
158 {
159 	int ret;
160 
161 	ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
162 
163 	if (ret >= 0 && ret < nr_pages) {
164 		while (ret--)
165 			put_page(pages[ret]);
166 		ret = -EFAULT;
167 	}
168 
169 	return ret;
170 }
171 
172 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173 			  u64 *cookie_ret, struct rds_mr **mr_ret,
174 			  struct rds_conn_path *cp)
175 {
176 	struct rds_mr *mr = NULL, *found;
177 	unsigned int nr_pages;
178 	struct page **pages = NULL;
179 	struct scatterlist *sg;
180 	void *trans_private;
181 	unsigned long flags;
182 	rds_rdma_cookie_t cookie;
183 	unsigned int nents;
184 	long i;
185 	int ret;
186 
187 	if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
188 		ret = -ENOTCONN; /* XXX not a great errno */
189 		goto out;
190 	}
191 
192 	if (!rs->rs_transport->get_mr) {
193 		ret = -EOPNOTSUPP;
194 		goto out;
195 	}
196 
197 	nr_pages = rds_pages_in_vec(&args->vec);
198 	if (nr_pages == 0) {
199 		ret = -EINVAL;
200 		goto out;
201 	}
202 
203 	/* Restrict the size of mr irrespective of underlying transport
204 	 * To account for unaligned mr regions, subtract one from nr_pages
205 	 */
206 	if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
207 		ret = -EMSGSIZE;
208 		goto out;
209 	}
210 
211 	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
212 		args->vec.addr, args->vec.bytes, nr_pages);
213 
214 	/* XXX clamp nr_pages to limit the size of this alloc? */
215 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
216 	if (!pages) {
217 		ret = -ENOMEM;
218 		goto out;
219 	}
220 
221 	mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
222 	if (!mr) {
223 		ret = -ENOMEM;
224 		goto out;
225 	}
226 
227 	refcount_set(&mr->r_refcount, 1);
228 	RB_CLEAR_NODE(&mr->r_rb_node);
229 	mr->r_trans = rs->rs_transport;
230 	mr->r_sock = rs;
231 
232 	if (args->flags & RDS_RDMA_USE_ONCE)
233 		mr->r_use_once = 1;
234 	if (args->flags & RDS_RDMA_INVALIDATE)
235 		mr->r_invalidate = 1;
236 	if (args->flags & RDS_RDMA_READWRITE)
237 		mr->r_write = 1;
238 
239 	/*
240 	 * Pin the pages that make up the user buffer and transfer the page
241 	 * pointers to the mr's sg array.  We check to see if we've mapped
242 	 * the whole region after transferring the partial page references
243 	 * to the sg array so that we can have one page ref cleanup path.
244 	 *
245 	 * For now we have no flag that tells us whether the mapping is
246 	 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
247 	 * the zero page.
248 	 */
249 	ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
250 	if (ret < 0)
251 		goto out;
252 
253 	nents = ret;
254 	sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
255 	if (!sg) {
256 		ret = -ENOMEM;
257 		goto out;
258 	}
259 	WARN_ON(!nents);
260 	sg_init_table(sg, nents);
261 
262 	/* Stick all pages into the scatterlist */
263 	for (i = 0 ; i < nents; i++)
264 		sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
265 
266 	rdsdebug("RDS: trans_private nents is %u\n", nents);
267 
268 	/* Obtain a transport specific MR. If this succeeds, the
269 	 * s/g list is now owned by the MR.
270 	 * Note that dma_map() implies that pending writes are
271 	 * flushed to RAM, so no dma_sync is needed here. */
272 	trans_private = rs->rs_transport->get_mr(sg, nents, rs,
273 						 &mr->r_key,
274 						 cp ? cp->cp_conn : NULL);
275 
276 	if (IS_ERR(trans_private)) {
277 		for (i = 0 ; i < nents; i++)
278 			put_page(sg_page(&sg[i]));
279 		kfree(sg);
280 		ret = PTR_ERR(trans_private);
281 		goto out;
282 	}
283 
284 	mr->r_trans_private = trans_private;
285 
286 	rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
287 	       mr->r_key, (void *)(unsigned long) args->cookie_addr);
288 
289 	/* The user may pass us an unaligned address, but we can only
290 	 * map page aligned regions. So we keep the offset, and build
291 	 * a 64bit cookie containing <R_Key, offset> and pass that
292 	 * around. */
293 	cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
294 	if (cookie_ret)
295 		*cookie_ret = cookie;
296 
297 	if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
298 		ret = -EFAULT;
299 		goto out;
300 	}
301 
302 	/* Inserting the new MR into the rbtree bumps its
303 	 * reference count. */
304 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
305 	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
306 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
307 
308 	BUG_ON(found && found != mr);
309 
310 	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
311 	if (mr_ret) {
312 		refcount_inc(&mr->r_refcount);
313 		*mr_ret = mr;
314 	}
315 
316 	ret = 0;
317 out:
318 	kfree(pages);
319 	if (mr)
320 		rds_mr_put(mr);
321 	return ret;
322 }
323 
324 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
325 {
326 	struct rds_get_mr_args args;
327 
328 	if (optlen != sizeof(struct rds_get_mr_args))
329 		return -EINVAL;
330 
331 	if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
332 			   sizeof(struct rds_get_mr_args)))
333 		return -EFAULT;
334 
335 	return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
336 }
337 
338 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
339 {
340 	struct rds_get_mr_for_dest_args args;
341 	struct rds_get_mr_args new_args;
342 
343 	if (optlen != sizeof(struct rds_get_mr_for_dest_args))
344 		return -EINVAL;
345 
346 	if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
347 			   sizeof(struct rds_get_mr_for_dest_args)))
348 		return -EFAULT;
349 
350 	/*
351 	 * Initially, just behave like get_mr().
352 	 * TODO: Implement get_mr as wrapper around this
353 	 *	 and deprecate it.
354 	 */
355 	new_args.vec = args.vec;
356 	new_args.cookie_addr = args.cookie_addr;
357 	new_args.flags = args.flags;
358 
359 	return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
360 }
361 
362 /*
363  * Free the MR indicated by the given R_Key
364  */
365 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
366 {
367 	struct rds_free_mr_args args;
368 	struct rds_mr *mr;
369 	unsigned long flags;
370 
371 	if (optlen != sizeof(struct rds_free_mr_args))
372 		return -EINVAL;
373 
374 	if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
375 			   sizeof(struct rds_free_mr_args)))
376 		return -EFAULT;
377 
378 	/* Special case - a null cookie means flush all unused MRs */
379 	if (args.cookie == 0) {
380 		if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
381 			return -EINVAL;
382 		rs->rs_transport->flush_mrs();
383 		return 0;
384 	}
385 
386 	/* Look up the MR given its R_key and remove it from the rbtree
387 	 * so nobody else finds it.
388 	 * This should also prevent races with rds_rdma_unuse.
389 	 */
390 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
391 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
392 	if (mr) {
393 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
394 		RB_CLEAR_NODE(&mr->r_rb_node);
395 		if (args.flags & RDS_RDMA_INVALIDATE)
396 			mr->r_invalidate = 1;
397 	}
398 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
399 
400 	if (!mr)
401 		return -EINVAL;
402 
403 	/*
404 	 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
405 	 * we return.  If we let rds_mr_put() do it it might not happen until
406 	 * someone else drops their ref.
407 	 */
408 	rds_destroy_mr(mr);
409 	rds_mr_put(mr);
410 	return 0;
411 }
412 
413 /*
414  * This is called when we receive an extension header that
415  * tells us this MR was used. It allows us to implement
416  * use_once semantics
417  */
418 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
419 {
420 	struct rds_mr *mr;
421 	unsigned long flags;
422 	int zot_me = 0;
423 
424 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
425 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
426 	if (!mr) {
427 		pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
428 			 r_key);
429 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
430 		return;
431 	}
432 
433 	if (mr->r_use_once || force) {
434 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
435 		RB_CLEAR_NODE(&mr->r_rb_node);
436 		zot_me = 1;
437 	}
438 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
439 
440 	/* May have to issue a dma_sync on this memory region.
441 	 * Note we could avoid this if the operation was a RDMA READ,
442 	 * but at this point we can't tell. */
443 	if (mr->r_trans->sync_mr)
444 		mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
445 
446 	/* If the MR was marked as invalidate, this will
447 	 * trigger an async flush. */
448 	if (zot_me) {
449 		rds_destroy_mr(mr);
450 		rds_mr_put(mr);
451 	}
452 }
453 
454 void rds_rdma_free_op(struct rm_rdma_op *ro)
455 {
456 	unsigned int i;
457 
458 	for (i = 0; i < ro->op_nents; i++) {
459 		struct page *page = sg_page(&ro->op_sg[i]);
460 
461 		/* Mark page dirty if it was possibly modified, which
462 		 * is the case for a RDMA_READ which copies from remote
463 		 * to local memory */
464 		if (!ro->op_write) {
465 			WARN_ON(!page->mapping && irqs_disabled());
466 			set_page_dirty(page);
467 		}
468 		put_page(page);
469 	}
470 
471 	kfree(ro->op_notifier);
472 	ro->op_notifier = NULL;
473 	ro->op_active = 0;
474 }
475 
476 void rds_atomic_free_op(struct rm_atomic_op *ao)
477 {
478 	struct page *page = sg_page(ao->op_sg);
479 
480 	/* Mark page dirty if it was possibly modified, which
481 	 * is the case for a RDMA_READ which copies from remote
482 	 * to local memory */
483 	set_page_dirty(page);
484 	put_page(page);
485 
486 	kfree(ao->op_notifier);
487 	ao->op_notifier = NULL;
488 	ao->op_active = 0;
489 }
490 
491 
492 /*
493  * Count the number of pages needed to describe an incoming iovec array.
494  */
495 static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
496 {
497 	int tot_pages = 0;
498 	unsigned int nr_pages;
499 	unsigned int i;
500 
501 	/* figure out the number of pages in the vector */
502 	for (i = 0; i < nr_iovecs; i++) {
503 		nr_pages = rds_pages_in_vec(&iov[i]);
504 		if (nr_pages == 0)
505 			return -EINVAL;
506 
507 		tot_pages += nr_pages;
508 
509 		/*
510 		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
511 		 * so tot_pages cannot overflow without first going negative.
512 		 */
513 		if (tot_pages < 0)
514 			return -EINVAL;
515 	}
516 
517 	return tot_pages;
518 }
519 
520 int rds_rdma_extra_size(struct rds_rdma_args *args,
521 			struct rds_iov_vector *iov)
522 {
523 	struct rds_iovec *vec;
524 	struct rds_iovec __user *local_vec;
525 	int tot_pages = 0;
526 	unsigned int nr_pages;
527 	unsigned int i;
528 
529 	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
530 
531 	if (args->nr_local == 0)
532 		return -EINVAL;
533 
534 	iov->iov = kcalloc(args->nr_local,
535 			   sizeof(struct rds_iovec),
536 			   GFP_KERNEL);
537 	if (!iov->iov)
538 		return -ENOMEM;
539 
540 	vec = &iov->iov[0];
541 
542 	if (copy_from_user(vec, local_vec, args->nr_local *
543 			   sizeof(struct rds_iovec)))
544 		return -EFAULT;
545 	iov->len = args->nr_local;
546 
547 	/* figure out the number of pages in the vector */
548 	for (i = 0; i < args->nr_local; i++, vec++) {
549 
550 		nr_pages = rds_pages_in_vec(vec);
551 		if (nr_pages == 0)
552 			return -EINVAL;
553 
554 		tot_pages += nr_pages;
555 
556 		/*
557 		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
558 		 * so tot_pages cannot overflow without first going negative.
559 		 */
560 		if (tot_pages < 0)
561 			return -EINVAL;
562 	}
563 
564 	return tot_pages * sizeof(struct scatterlist);
565 }
566 
567 /*
568  * The application asks for a RDMA transfer.
569  * Extract all arguments and set up the rdma_op
570  */
571 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
572 		       struct cmsghdr *cmsg,
573 		       struct rds_iov_vector *vec)
574 {
575 	struct rds_rdma_args *args;
576 	struct rm_rdma_op *op = &rm->rdma;
577 	int nr_pages;
578 	unsigned int nr_bytes;
579 	struct page **pages = NULL;
580 	struct rds_iovec *iovs;
581 	unsigned int i, j;
582 	int ret = 0;
583 
584 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
585 	    || rm->rdma.op_active)
586 		return -EINVAL;
587 
588 	args = CMSG_DATA(cmsg);
589 
590 	if (ipv6_addr_any(&rs->rs_bound_addr)) {
591 		ret = -ENOTCONN; /* XXX not a great errno */
592 		goto out_ret;
593 	}
594 
595 	if (args->nr_local > UIO_MAXIOV) {
596 		ret = -EMSGSIZE;
597 		goto out_ret;
598 	}
599 
600 	if (vec->len != args->nr_local) {
601 		ret = -EINVAL;
602 		goto out_ret;
603 	}
604 
605 	iovs = vec->iov;
606 
607 	nr_pages = rds_rdma_pages(iovs, args->nr_local);
608 	if (nr_pages < 0) {
609 		ret = -EINVAL;
610 		goto out_ret;
611 	}
612 
613 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
614 	if (!pages) {
615 		ret = -ENOMEM;
616 		goto out_ret;
617 	}
618 
619 	op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
620 	op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
621 	op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
622 	op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
623 	op->op_active = 1;
624 	op->op_recverr = rs->rs_recverr;
625 	WARN_ON(!nr_pages);
626 	op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
627 	if (!op->op_sg)
628 		goto out_pages;
629 
630 	if (op->op_notify || op->op_recverr) {
631 		/* We allocate an uninitialized notifier here, because
632 		 * we don't want to do that in the completion handler. We
633 		 * would have to use GFP_ATOMIC there, and don't want to deal
634 		 * with failed allocations.
635 		 */
636 		op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
637 		if (!op->op_notifier) {
638 			ret = -ENOMEM;
639 			goto out_pages;
640 		}
641 		op->op_notifier->n_user_token = args->user_token;
642 		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
643 
644 		/* Enable rmda notification on data operation for composite
645 		 * rds messages and make sure notification is enabled only
646 		 * for the data operation which follows it so that application
647 		 * gets notified only after full message gets delivered.
648 		 */
649 		if (rm->data.op_sg) {
650 			rm->rdma.op_notify = 0;
651 			rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
652 		}
653 	}
654 
655 	/* The cookie contains the R_Key of the remote memory region, and
656 	 * optionally an offset into it. This is how we implement RDMA into
657 	 * unaligned memory.
658 	 * When setting up the RDMA, we need to add that offset to the
659 	 * destination address (which is really an offset into the MR)
660 	 * FIXME: We may want to move this into ib_rdma.c
661 	 */
662 	op->op_rkey = rds_rdma_cookie_key(args->cookie);
663 	op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
664 
665 	nr_bytes = 0;
666 
667 	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
668 	       (unsigned long long)args->nr_local,
669 	       (unsigned long long)args->remote_vec.addr,
670 	       op->op_rkey);
671 
672 	for (i = 0; i < args->nr_local; i++) {
673 		struct rds_iovec *iov = &iovs[i];
674 		/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
675 		unsigned int nr = rds_pages_in_vec(iov);
676 
677 		rs->rs_user_addr = iov->addr;
678 		rs->rs_user_bytes = iov->bytes;
679 
680 		/* If it's a WRITE operation, we want to pin the pages for reading.
681 		 * If it's a READ operation, we need to pin the pages for writing.
682 		 */
683 		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
684 		if (ret < 0)
685 			goto out_pages;
686 		else
687 			ret = 0;
688 
689 		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
690 			 nr_bytes, nr, iov->bytes, iov->addr);
691 
692 		nr_bytes += iov->bytes;
693 
694 		for (j = 0; j < nr; j++) {
695 			unsigned int offset = iov->addr & ~PAGE_MASK;
696 			struct scatterlist *sg;
697 
698 			sg = &op->op_sg[op->op_nents + j];
699 			sg_set_page(sg, pages[j],
700 					min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
701 					offset);
702 
703 			rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
704 			       sg->offset, sg->length, iov->addr, iov->bytes);
705 
706 			iov->addr += sg->length;
707 			iov->bytes -= sg->length;
708 		}
709 
710 		op->op_nents += nr;
711 	}
712 
713 	if (nr_bytes > args->remote_vec.bytes) {
714 		rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
715 				nr_bytes,
716 				(unsigned int) args->remote_vec.bytes);
717 		ret = -EINVAL;
718 		goto out_pages;
719 	}
720 	op->op_bytes = nr_bytes;
721 
722 out_pages:
723 	kfree(pages);
724 out_ret:
725 	if (ret)
726 		rds_rdma_free_op(op);
727 	else
728 		rds_stats_inc(s_send_rdma);
729 
730 	return ret;
731 }
732 
733 /*
734  * The application wants us to pass an RDMA destination (aka MR)
735  * to the remote
736  */
737 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
738 			  struct cmsghdr *cmsg)
739 {
740 	unsigned long flags;
741 	struct rds_mr *mr;
742 	u32 r_key;
743 	int err = 0;
744 
745 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
746 	    rm->m_rdma_cookie != 0)
747 		return -EINVAL;
748 
749 	memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
750 
751 	/* We are reusing a previously mapped MR here. Most likely, the
752 	 * application has written to the buffer, so we need to explicitly
753 	 * flush those writes to RAM. Otherwise the HCA may not see them
754 	 * when doing a DMA from that buffer.
755 	 */
756 	r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
757 
758 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
759 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
760 	if (!mr)
761 		err = -EINVAL;	/* invalid r_key */
762 	else
763 		refcount_inc(&mr->r_refcount);
764 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
765 
766 	if (mr) {
767 		mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
768 		rm->rdma.op_rdma_mr = mr;
769 	}
770 	return err;
771 }
772 
773 /*
774  * The application passes us an address range it wants to enable RDMA
775  * to/from. We map the area, and save the <R_Key,offset> pair
776  * in rm->m_rdma_cookie. This causes it to be sent along to the peer
777  * in an extension header.
778  */
779 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
780 			  struct cmsghdr *cmsg)
781 {
782 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
783 	    rm->m_rdma_cookie != 0)
784 		return -EINVAL;
785 
786 	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
787 			      &rm->rdma.op_rdma_mr, rm->m_conn_path);
788 }
789 
790 /*
791  * Fill in rds_message for an atomic request.
792  */
793 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
794 		    struct cmsghdr *cmsg)
795 {
796 	struct page *page = NULL;
797 	struct rds_atomic_args *args;
798 	int ret = 0;
799 
800 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
801 	 || rm->atomic.op_active)
802 		return -EINVAL;
803 
804 	args = CMSG_DATA(cmsg);
805 
806 	/* Nonmasked & masked cmsg ops converted to masked hw ops */
807 	switch (cmsg->cmsg_type) {
808 	case RDS_CMSG_ATOMIC_FADD:
809 		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
810 		rm->atomic.op_m_fadd.add = args->fadd.add;
811 		rm->atomic.op_m_fadd.nocarry_mask = 0;
812 		break;
813 	case RDS_CMSG_MASKED_ATOMIC_FADD:
814 		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
815 		rm->atomic.op_m_fadd.add = args->m_fadd.add;
816 		rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
817 		break;
818 	case RDS_CMSG_ATOMIC_CSWP:
819 		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
820 		rm->atomic.op_m_cswp.compare = args->cswp.compare;
821 		rm->atomic.op_m_cswp.swap = args->cswp.swap;
822 		rm->atomic.op_m_cswp.compare_mask = ~0;
823 		rm->atomic.op_m_cswp.swap_mask = ~0;
824 		break;
825 	case RDS_CMSG_MASKED_ATOMIC_CSWP:
826 		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
827 		rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
828 		rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
829 		rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
830 		rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
831 		break;
832 	default:
833 		BUG(); /* should never happen */
834 	}
835 
836 	rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
837 	rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
838 	rm->atomic.op_active = 1;
839 	rm->atomic.op_recverr = rs->rs_recverr;
840 	rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
841 	if (!rm->atomic.op_sg)
842 		goto err;
843 
844 	/* verify 8 byte-aligned */
845 	if (args->local_addr & 0x7) {
846 		ret = -EFAULT;
847 		goto err;
848 	}
849 
850 	ret = rds_pin_pages(args->local_addr, 1, &page, 1);
851 	if (ret != 1)
852 		goto err;
853 	ret = 0;
854 
855 	sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
856 
857 	if (rm->atomic.op_notify || rm->atomic.op_recverr) {
858 		/* We allocate an uninitialized notifier here, because
859 		 * we don't want to do that in the completion handler. We
860 		 * would have to use GFP_ATOMIC there, and don't want to deal
861 		 * with failed allocations.
862 		 */
863 		rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
864 		if (!rm->atomic.op_notifier) {
865 			ret = -ENOMEM;
866 			goto err;
867 		}
868 
869 		rm->atomic.op_notifier->n_user_token = args->user_token;
870 		rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
871 	}
872 
873 	rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
874 	rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
875 
876 	return ret;
877 err:
878 	if (page)
879 		put_page(page);
880 	rm->atomic.op_active = 0;
881 	kfree(rm->atomic.op_notifier);
882 
883 	return ret;
884 }
885