xref: /openbmc/linux/net/rds/rdma.c (revision b04b4f78)
1 /*
2  * Copyright (c) 2007 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/pagemap.h>
34 #include <linux/rbtree.h>
35 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
36 
37 #include "rdma.h"
38 
39 /*
40  * XXX
41  *  - build with sparse
42  *  - should we limit the size of a mr region?  let transport return failure?
43  *  - should we detect duplicate keys on a socket?  hmm.
44  *  - an rdma is an mlock, apply rlimit?
45  */
46 
47 /*
48  * get the number of pages by looking at the page indices that the start and
49  * end addresses fall in.
50  *
51  * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
52  * causes the address to wrap or overflows an unsigned int.  This comes
53  * from being stored in the 'length' member of 'struct scatterlist'.
54  */
55 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56 {
57 	if ((vec->addr + vec->bytes <= vec->addr) ||
58 	    (vec->bytes > (u64)UINT_MAX))
59 		return 0;
60 
61 	return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 		(vec->addr >> PAGE_SHIFT);
63 }
64 
65 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 				       struct rds_mr *insert)
67 {
68 	struct rb_node **p = &root->rb_node;
69 	struct rb_node *parent = NULL;
70 	struct rds_mr *mr;
71 
72 	while (*p) {
73 		parent = *p;
74 		mr = rb_entry(parent, struct rds_mr, r_rb_node);
75 
76 		if (key < mr->r_key)
77 			p = &(*p)->rb_left;
78 		else if (key > mr->r_key)
79 			p = &(*p)->rb_right;
80 		else
81 			return mr;
82 	}
83 
84 	if (insert) {
85 		rb_link_node(&insert->r_rb_node, parent, p);
86 		rb_insert_color(&insert->r_rb_node, root);
87 		atomic_inc(&insert->r_refcount);
88 	}
89 	return NULL;
90 }
91 
92 /*
93  * Destroy the transport-specific part of a MR.
94  */
95 static void rds_destroy_mr(struct rds_mr *mr)
96 {
97 	struct rds_sock *rs = mr->r_sock;
98 	void *trans_private = NULL;
99 	unsigned long flags;
100 
101 	rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102 			mr->r_key, atomic_read(&mr->r_refcount));
103 
104 	if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
105 		return;
106 
107 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
108 	if (!RB_EMPTY_NODE(&mr->r_rb_node))
109 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
110 	trans_private = mr->r_trans_private;
111 	mr->r_trans_private = NULL;
112 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
113 
114 	if (trans_private)
115 		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
116 }
117 
118 void __rds_put_mr_final(struct rds_mr *mr)
119 {
120 	rds_destroy_mr(mr);
121 	kfree(mr);
122 }
123 
124 /*
125  * By the time this is called we can't have any more ioctls called on
126  * the socket so we don't need to worry about racing with others.
127  */
128 void rds_rdma_drop_keys(struct rds_sock *rs)
129 {
130 	struct rds_mr *mr;
131 	struct rb_node *node;
132 
133 	/* Release any MRs associated with this socket */
134 	while ((node = rb_first(&rs->rs_rdma_keys))) {
135 		mr = container_of(node, struct rds_mr, r_rb_node);
136 		if (mr->r_trans == rs->rs_transport)
137 			mr->r_invalidate = 0;
138 		rds_mr_put(mr);
139 	}
140 
141 	if (rs->rs_transport && rs->rs_transport->flush_mrs)
142 		rs->rs_transport->flush_mrs();
143 }
144 
145 /*
146  * Helper function to pin user pages.
147  */
148 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
149 			struct page **pages, int write)
150 {
151 	int ret;
152 
153 	down_read(&current->mm->mmap_sem);
154 	ret = get_user_pages(current, current->mm, user_addr,
155 			     nr_pages, write, 0, pages, NULL);
156 	up_read(&current->mm->mmap_sem);
157 
158 	if (0 <= ret && (unsigned) ret < nr_pages) {
159 		while (ret--)
160 			put_page(pages[ret]);
161 		ret = -EFAULT;
162 	}
163 
164 	return ret;
165 }
166 
167 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
168 				u64 *cookie_ret, struct rds_mr **mr_ret)
169 {
170 	struct rds_mr *mr = NULL, *found;
171 	unsigned int nr_pages;
172 	struct page **pages = NULL;
173 	struct scatterlist *sg;
174 	void *trans_private;
175 	unsigned long flags;
176 	rds_rdma_cookie_t cookie;
177 	unsigned int nents;
178 	long i;
179 	int ret;
180 
181 	if (rs->rs_bound_addr == 0) {
182 		ret = -ENOTCONN; /* XXX not a great errno */
183 		goto out;
184 	}
185 
186 	if (rs->rs_transport->get_mr == NULL) {
187 		ret = -EOPNOTSUPP;
188 		goto out;
189 	}
190 
191 	nr_pages = rds_pages_in_vec(&args->vec);
192 	if (nr_pages == 0) {
193 		ret = -EINVAL;
194 		goto out;
195 	}
196 
197 	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
198 		args->vec.addr, args->vec.bytes, nr_pages);
199 
200 	/* XXX clamp nr_pages to limit the size of this alloc? */
201 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
202 	if (pages == NULL) {
203 		ret = -ENOMEM;
204 		goto out;
205 	}
206 
207 	mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
208 	if (mr == NULL) {
209 		ret = -ENOMEM;
210 		goto out;
211 	}
212 
213 	atomic_set(&mr->r_refcount, 1);
214 	RB_CLEAR_NODE(&mr->r_rb_node);
215 	mr->r_trans = rs->rs_transport;
216 	mr->r_sock = rs;
217 
218 	if (args->flags & RDS_RDMA_USE_ONCE)
219 		mr->r_use_once = 1;
220 	if (args->flags & RDS_RDMA_INVALIDATE)
221 		mr->r_invalidate = 1;
222 	if (args->flags & RDS_RDMA_READWRITE)
223 		mr->r_write = 1;
224 
225 	/*
226 	 * Pin the pages that make up the user buffer and transfer the page
227 	 * pointers to the mr's sg array.  We check to see if we've mapped
228 	 * the whole region after transferring the partial page references
229 	 * to the sg array so that we can have one page ref cleanup path.
230 	 *
231 	 * For now we have no flag that tells us whether the mapping is
232 	 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
233 	 * the zero page.
234 	 */
235 	ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
236 	if (ret < 0)
237 		goto out;
238 
239 	nents = ret;
240 	sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
241 	if (sg == NULL) {
242 		ret = -ENOMEM;
243 		goto out;
244 	}
245 	WARN_ON(!nents);
246 	sg_init_table(sg, nents);
247 
248 	/* Stick all pages into the scatterlist */
249 	for (i = 0 ; i < nents; i++)
250 		sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
251 
252 	rdsdebug("RDS: trans_private nents is %u\n", nents);
253 
254 	/* Obtain a transport specific MR. If this succeeds, the
255 	 * s/g list is now owned by the MR.
256 	 * Note that dma_map() implies that pending writes are
257 	 * flushed to RAM, so no dma_sync is needed here. */
258 	trans_private = rs->rs_transport->get_mr(sg, nents, rs,
259 						 &mr->r_key);
260 
261 	if (IS_ERR(trans_private)) {
262 		for (i = 0 ; i < nents; i++)
263 			put_page(sg_page(&sg[i]));
264 		kfree(sg);
265 		ret = PTR_ERR(trans_private);
266 		goto out;
267 	}
268 
269 	mr->r_trans_private = trans_private;
270 
271 	rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
272 	       mr->r_key, (void *)(unsigned long) args->cookie_addr);
273 
274 	/* The user may pass us an unaligned address, but we can only
275 	 * map page aligned regions. So we keep the offset, and build
276 	 * a 64bit cookie containing <R_Key, offset> and pass that
277 	 * around. */
278 	cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
279 	if (cookie_ret)
280 		*cookie_ret = cookie;
281 
282 	if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
283 		ret = -EFAULT;
284 		goto out;
285 	}
286 
287 	/* Inserting the new MR into the rbtree bumps its
288 	 * reference count. */
289 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
290 	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
291 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
292 
293 	BUG_ON(found && found != mr);
294 
295 	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
296 	if (mr_ret) {
297 		atomic_inc(&mr->r_refcount);
298 		*mr_ret = mr;
299 	}
300 
301 	ret = 0;
302 out:
303 	kfree(pages);
304 	if (mr)
305 		rds_mr_put(mr);
306 	return ret;
307 }
308 
309 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
310 {
311 	struct rds_get_mr_args args;
312 
313 	if (optlen != sizeof(struct rds_get_mr_args))
314 		return -EINVAL;
315 
316 	if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
317 			   sizeof(struct rds_get_mr_args)))
318 		return -EFAULT;
319 
320 	return __rds_rdma_map(rs, &args, NULL, NULL);
321 }
322 
323 /*
324  * Free the MR indicated by the given R_Key
325  */
326 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
327 {
328 	struct rds_free_mr_args args;
329 	struct rds_mr *mr;
330 	unsigned long flags;
331 
332 	if (optlen != sizeof(struct rds_free_mr_args))
333 		return -EINVAL;
334 
335 	if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
336 			   sizeof(struct rds_free_mr_args)))
337 		return -EFAULT;
338 
339 	/* Special case - a null cookie means flush all unused MRs */
340 	if (args.cookie == 0) {
341 		if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
342 			return -EINVAL;
343 		rs->rs_transport->flush_mrs();
344 		return 0;
345 	}
346 
347 	/* Look up the MR given its R_key and remove it from the rbtree
348 	 * so nobody else finds it.
349 	 * This should also prevent races with rds_rdma_unuse.
350 	 */
351 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
352 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
353 	if (mr) {
354 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
355 		RB_CLEAR_NODE(&mr->r_rb_node);
356 		if (args.flags & RDS_RDMA_INVALIDATE)
357 			mr->r_invalidate = 1;
358 	}
359 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
360 
361 	if (!mr)
362 		return -EINVAL;
363 
364 	/*
365 	 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
366 	 * we return.  If we let rds_mr_put() do it it might not happen until
367 	 * someone else drops their ref.
368 	 */
369 	rds_destroy_mr(mr);
370 	rds_mr_put(mr);
371 	return 0;
372 }
373 
374 /*
375  * This is called when we receive an extension header that
376  * tells us this MR was used. It allows us to implement
377  * use_once semantics
378  */
379 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
380 {
381 	struct rds_mr *mr;
382 	unsigned long flags;
383 	int zot_me = 0;
384 
385 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
386 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
387 	if (mr && (mr->r_use_once || force)) {
388 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
389 		RB_CLEAR_NODE(&mr->r_rb_node);
390 		zot_me = 1;
391 	} else if (mr)
392 		atomic_inc(&mr->r_refcount);
393 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
394 
395 	/* May have to issue a dma_sync on this memory region.
396 	 * Note we could avoid this if the operation was a RDMA READ,
397 	 * but at this point we can't tell. */
398 	if (mr != NULL) {
399 		if (mr->r_trans->sync_mr)
400 			mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
401 
402 		/* If the MR was marked as invalidate, this will
403 		 * trigger an async flush. */
404 		if (zot_me)
405 			rds_destroy_mr(mr);
406 		rds_mr_put(mr);
407 	}
408 }
409 
410 void rds_rdma_free_op(struct rds_rdma_op *ro)
411 {
412 	unsigned int i;
413 
414 	for (i = 0; i < ro->r_nents; i++) {
415 		struct page *page = sg_page(&ro->r_sg[i]);
416 
417 		/* Mark page dirty if it was possibly modified, which
418 		 * is the case for a RDMA_READ which copies from remote
419 		 * to local memory */
420 		if (!ro->r_write)
421 			set_page_dirty(page);
422 		put_page(page);
423 	}
424 
425 	kfree(ro->r_notifier);
426 	kfree(ro);
427 }
428 
429 /*
430  * args is a pointer to an in-kernel copy in the sendmsg cmsg.
431  */
432 static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
433 					    struct rds_rdma_args *args)
434 {
435 	struct rds_iovec vec;
436 	struct rds_rdma_op *op = NULL;
437 	unsigned int nr_pages;
438 	unsigned int max_pages;
439 	unsigned int nr_bytes;
440 	struct page **pages = NULL;
441 	struct rds_iovec __user *local_vec;
442 	struct scatterlist *sg;
443 	unsigned int nr;
444 	unsigned int i, j;
445 	int ret;
446 
447 
448 	if (rs->rs_bound_addr == 0) {
449 		ret = -ENOTCONN; /* XXX not a great errno */
450 		goto out;
451 	}
452 
453 	if (args->nr_local > (u64)UINT_MAX) {
454 		ret = -EMSGSIZE;
455 		goto out;
456 	}
457 
458 	nr_pages = 0;
459 	max_pages = 0;
460 
461 	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
462 
463 	/* figure out the number of pages in the vector */
464 	for (i = 0; i < args->nr_local; i++) {
465 		if (copy_from_user(&vec, &local_vec[i],
466 				   sizeof(struct rds_iovec))) {
467 			ret = -EFAULT;
468 			goto out;
469 		}
470 
471 		nr = rds_pages_in_vec(&vec);
472 		if (nr == 0) {
473 			ret = -EINVAL;
474 			goto out;
475 		}
476 
477 		max_pages = max(nr, max_pages);
478 		nr_pages += nr;
479 	}
480 
481 	pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
482 	if (pages == NULL) {
483 		ret = -ENOMEM;
484 		goto out;
485 	}
486 
487 	op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
488 	if (op == NULL) {
489 		ret = -ENOMEM;
490 		goto out;
491 	}
492 
493 	op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
494 	op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
495 	op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
496 	op->r_recverr = rs->rs_recverr;
497 	WARN_ON(!nr_pages);
498 	sg_init_table(op->r_sg, nr_pages);
499 
500 	if (op->r_notify || op->r_recverr) {
501 		/* We allocate an uninitialized notifier here, because
502 		 * we don't want to do that in the completion handler. We
503 		 * would have to use GFP_ATOMIC there, and don't want to deal
504 		 * with failed allocations.
505 		 */
506 		op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
507 		if (!op->r_notifier) {
508 			ret = -ENOMEM;
509 			goto out;
510 		}
511 		op->r_notifier->n_user_token = args->user_token;
512 		op->r_notifier->n_status = RDS_RDMA_SUCCESS;
513 	}
514 
515 	/* The cookie contains the R_Key of the remote memory region, and
516 	 * optionally an offset into it. This is how we implement RDMA into
517 	 * unaligned memory.
518 	 * When setting up the RDMA, we need to add that offset to the
519 	 * destination address (which is really an offset into the MR)
520 	 * FIXME: We may want to move this into ib_rdma.c
521 	 */
522 	op->r_key = rds_rdma_cookie_key(args->cookie);
523 	op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
524 
525 	nr_bytes = 0;
526 
527 	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
528 	       (unsigned long long)args->nr_local,
529 	       (unsigned long long)args->remote_vec.addr,
530 	       op->r_key);
531 
532 	for (i = 0; i < args->nr_local; i++) {
533 		if (copy_from_user(&vec, &local_vec[i],
534 				   sizeof(struct rds_iovec))) {
535 			ret = -EFAULT;
536 			goto out;
537 		}
538 
539 		nr = rds_pages_in_vec(&vec);
540 		if (nr == 0) {
541 			ret = -EINVAL;
542 			goto out;
543 		}
544 
545 		rs->rs_user_addr = vec.addr;
546 		rs->rs_user_bytes = vec.bytes;
547 
548 		/* did the user change the vec under us? */
549 		if (nr > max_pages || op->r_nents + nr > nr_pages) {
550 			ret = -EINVAL;
551 			goto out;
552 		}
553 		/* If it's a WRITE operation, we want to pin the pages for reading.
554 		 * If it's a READ operation, we need to pin the pages for writing.
555 		 */
556 		ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
557 		if (ret < 0)
558 			goto out;
559 
560 		rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
561 		       nr_bytes, nr, vec.bytes, vec.addr);
562 
563 		nr_bytes += vec.bytes;
564 
565 		for (j = 0; j < nr; j++) {
566 			unsigned int offset = vec.addr & ~PAGE_MASK;
567 
568 			sg = &op->r_sg[op->r_nents + j];
569 			sg_set_page(sg, pages[j],
570 					min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
571 					offset);
572 
573 			rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
574 			       sg->offset, sg->length, vec.addr, vec.bytes);
575 
576 			vec.addr += sg->length;
577 			vec.bytes -= sg->length;
578 		}
579 
580 		op->r_nents += nr;
581 	}
582 
583 
584 	if (nr_bytes > args->remote_vec.bytes) {
585 		rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
586 				nr_bytes,
587 				(unsigned int) args->remote_vec.bytes);
588 		ret = -EINVAL;
589 		goto out;
590 	}
591 	op->r_bytes = nr_bytes;
592 
593 	ret = 0;
594 out:
595 	kfree(pages);
596 	if (ret) {
597 		if (op)
598 			rds_rdma_free_op(op);
599 		op = ERR_PTR(ret);
600 	}
601 	return op;
602 }
603 
604 /*
605  * The application asks for a RDMA transfer.
606  * Extract all arguments and set up the rdma_op
607  */
608 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
609 			  struct cmsghdr *cmsg)
610 {
611 	struct rds_rdma_op *op;
612 
613 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
614 	 || rm->m_rdma_op != NULL)
615 		return -EINVAL;
616 
617 	op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
618 	if (IS_ERR(op))
619 		return PTR_ERR(op);
620 	rds_stats_inc(s_send_rdma);
621 	rm->m_rdma_op = op;
622 	return 0;
623 }
624 
625 /*
626  * The application wants us to pass an RDMA destination (aka MR)
627  * to the remote
628  */
629 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
630 			  struct cmsghdr *cmsg)
631 {
632 	unsigned long flags;
633 	struct rds_mr *mr;
634 	u32 r_key;
635 	int err = 0;
636 
637 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t))
638 	 || rm->m_rdma_cookie != 0)
639 		return -EINVAL;
640 
641 	memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
642 
643 	/* We are reusing a previously mapped MR here. Most likely, the
644 	 * application has written to the buffer, so we need to explicitly
645 	 * flush those writes to RAM. Otherwise the HCA may not see them
646 	 * when doing a DMA from that buffer.
647 	 */
648 	r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
649 
650 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
651 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
652 	if (mr == NULL)
653 		err = -EINVAL;	/* invalid r_key */
654 	else
655 		atomic_inc(&mr->r_refcount);
656 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
657 
658 	if (mr) {
659 		mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
660 		rm->m_rdma_mr = mr;
661 	}
662 	return err;
663 }
664 
665 /*
666  * The application passes us an address range it wants to enable RDMA
667  * to/from. We map the area, and save the <R_Key,offset> pair
668  * in rm->m_rdma_cookie. This causes it to be sent along to the peer
669  * in an extension header.
670  */
671 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
672 			  struct cmsghdr *cmsg)
673 {
674 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args))
675 	 || rm->m_rdma_cookie != 0)
676 		return -EINVAL;
677 
678 	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
679 }
680