xref: /openbmc/linux/net/rds/recv.c (revision 82e6fdd6)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <net/sock.h>
36 #include <linux/in.h>
37 #include <linux/export.h>
38 #include <linux/time.h>
39 #include <linux/rds.h>
40 
41 #include "rds.h"
42 
43 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
44 		  __be32 saddr)
45 {
46 	int i;
47 
48 	refcount_set(&inc->i_refcount, 1);
49 	INIT_LIST_HEAD(&inc->i_item);
50 	inc->i_conn = conn;
51 	inc->i_saddr = saddr;
52 	inc->i_rdma_cookie = 0;
53 	inc->i_rx_tstamp.tv_sec = 0;
54 	inc->i_rx_tstamp.tv_usec = 0;
55 
56 	for (i = 0; i < RDS_RX_MAX_TRACES; i++)
57 		inc->i_rx_lat_trace[i] = 0;
58 }
59 EXPORT_SYMBOL_GPL(rds_inc_init);
60 
61 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
62 		       __be32 saddr)
63 {
64 	refcount_set(&inc->i_refcount, 1);
65 	INIT_LIST_HEAD(&inc->i_item);
66 	inc->i_conn = cp->cp_conn;
67 	inc->i_conn_path = cp;
68 	inc->i_saddr = saddr;
69 	inc->i_rdma_cookie = 0;
70 	inc->i_rx_tstamp.tv_sec = 0;
71 	inc->i_rx_tstamp.tv_usec = 0;
72 }
73 EXPORT_SYMBOL_GPL(rds_inc_path_init);
74 
75 static void rds_inc_addref(struct rds_incoming *inc)
76 {
77 	rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
78 	refcount_inc(&inc->i_refcount);
79 }
80 
81 void rds_inc_put(struct rds_incoming *inc)
82 {
83 	rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
84 	if (refcount_dec_and_test(&inc->i_refcount)) {
85 		BUG_ON(!list_empty(&inc->i_item));
86 
87 		inc->i_conn->c_trans->inc_free(inc);
88 	}
89 }
90 EXPORT_SYMBOL_GPL(rds_inc_put);
91 
92 static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
93 				  struct rds_cong_map *map,
94 				  int delta, __be16 port)
95 {
96 	int now_congested;
97 
98 	if (delta == 0)
99 		return;
100 
101 	rs->rs_rcv_bytes += delta;
102 	if (delta > 0)
103 		rds_stats_add(s_recv_bytes_added_to_socket, delta);
104 	else
105 		rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
106 	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107 
108 	rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
109 	  "now_cong %d delta %d\n",
110 	  rs, &rs->rs_bound_addr,
111 	  ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
112 	  rds_sk_rcvbuf(rs), now_congested, delta);
113 
114 	/* wasn't -> am congested */
115 	if (!rs->rs_congested && now_congested) {
116 		rs->rs_congested = 1;
117 		rds_cong_set_bit(map, port);
118 		rds_cong_queue_updates(map);
119 	}
120 	/* was -> aren't congested */
121 	/* Require more free space before reporting uncongested to prevent
122 	   bouncing cong/uncong state too often */
123 	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
124 		rs->rs_congested = 0;
125 		rds_cong_clear_bit(map, port);
126 		rds_cong_queue_updates(map);
127 	}
128 
129 	/* do nothing if no change in cong state */
130 }
131 
132 static void rds_conn_peer_gen_update(struct rds_connection *conn,
133 				     u32 peer_gen_num)
134 {
135 	int i;
136 	struct rds_message *rm, *tmp;
137 	unsigned long flags;
138 
139 	WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
140 	if (peer_gen_num != 0) {
141 		if (conn->c_peer_gen_num != 0 &&
142 		    peer_gen_num != conn->c_peer_gen_num) {
143 			for (i = 0; i < RDS_MPATH_WORKERS; i++) {
144 				struct rds_conn_path *cp;
145 
146 				cp = &conn->c_path[i];
147 				spin_lock_irqsave(&cp->cp_lock, flags);
148 				cp->cp_next_tx_seq = 1;
149 				cp->cp_next_rx_seq = 0;
150 				list_for_each_entry_safe(rm, tmp,
151 							 &cp->cp_retrans,
152 							 m_conn_item) {
153 					set_bit(RDS_MSG_FLUSH, &rm->m_flags);
154 				}
155 				spin_unlock_irqrestore(&cp->cp_lock, flags);
156 			}
157 		}
158 		conn->c_peer_gen_num = peer_gen_num;
159 	}
160 }
161 
162 /*
163  * Process all extension headers that come with this message.
164  */
165 static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
166 {
167 	struct rds_header *hdr = &inc->i_hdr;
168 	unsigned int pos = 0, type, len;
169 	union {
170 		struct rds_ext_header_version version;
171 		struct rds_ext_header_rdma rdma;
172 		struct rds_ext_header_rdma_dest rdma_dest;
173 	} buffer;
174 
175 	while (1) {
176 		len = sizeof(buffer);
177 		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
178 		if (type == RDS_EXTHDR_NONE)
179 			break;
180 		/* Process extension header here */
181 		switch (type) {
182 		case RDS_EXTHDR_RDMA:
183 			rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
184 			break;
185 
186 		case RDS_EXTHDR_RDMA_DEST:
187 			/* We ignore the size for now. We could stash it
188 			 * somewhere and use it for error checking. */
189 			inc->i_rdma_cookie = rds_rdma_make_cookie(
190 					be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
191 					be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
192 
193 			break;
194 		}
195 	}
196 }
197 
198 static void rds_recv_hs_exthdrs(struct rds_header *hdr,
199 				struct rds_connection *conn)
200 {
201 	unsigned int pos = 0, type, len;
202 	union {
203 		struct rds_ext_header_version version;
204 		u16 rds_npaths;
205 		u32 rds_gen_num;
206 	} buffer;
207 	u32 new_peer_gen_num = 0;
208 
209 	while (1) {
210 		len = sizeof(buffer);
211 		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
212 		if (type == RDS_EXTHDR_NONE)
213 			break;
214 		/* Process extension header here */
215 		switch (type) {
216 		case RDS_EXTHDR_NPATHS:
217 			conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
218 					       be16_to_cpu(buffer.rds_npaths));
219 			break;
220 		case RDS_EXTHDR_GEN_NUM:
221 			new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
222 			break;
223 		default:
224 			pr_warn_ratelimited("ignoring unknown exthdr type "
225 					     "0x%x\n", type);
226 		}
227 	}
228 	/* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
229 	conn->c_npaths = max_t(int, conn->c_npaths, 1);
230 	conn->c_ping_triggered = 0;
231 	rds_conn_peer_gen_update(conn, new_peer_gen_num);
232 }
233 
234 /* rds_start_mprds() will synchronously start multiple paths when appropriate.
235  * The scheme is based on the following rules:
236  *
237  * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
238  *    sender's npaths (s_npaths)
239  * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
240  *    sends back a probe-pong with r_npaths. After that, if rcvr is the
241  *    smaller ip addr, it starts rds_conn_path_connect_if_down on all
242  *    mprds_paths.
243  * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
244  *    If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
245  *    called after reception of the probe-pong on all mprds_paths.
246  *    Otherwise (sender of probe-ping is not the smaller ip addr): just call
247  *    rds_conn_path_connect_if_down on the hashed path. (see rule 4)
248  * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
249  * 5. sender may end up queuing the packet on the cp. will get sent out later.
250  *    when connection is completed.
251  */
252 static void rds_start_mprds(struct rds_connection *conn)
253 {
254 	int i;
255 	struct rds_conn_path *cp;
256 
257 	if (conn->c_npaths > 1 &&
258 	    IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
259 		for (i = 0; i < conn->c_npaths; i++) {
260 			cp = &conn->c_path[i];
261 			rds_conn_path_connect_if_down(cp);
262 		}
263 	}
264 }
265 
266 /*
267  * The transport must make sure that this is serialized against other
268  * rx and conn reset on this specific conn.
269  *
270  * We currently assert that only one fragmented message will be sent
271  * down a connection at a time.  This lets us reassemble in the conn
272  * instead of per-flow which means that we don't have to go digging through
273  * flows to tear down partial reassembly progress on conn failure and
274  * we save flow lookup and locking for each frag arrival.  It does mean
275  * that small messages will wait behind large ones.  Fragmenting at all
276  * is only to reduce the memory consumption of pre-posted buffers.
277  *
278  * The caller passes in saddr and daddr instead of us getting it from the
279  * conn.  This lets loopback, who only has one conn for both directions,
280  * tell us which roles the addrs in the conn are playing for this message.
281  */
282 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
283 		       struct rds_incoming *inc, gfp_t gfp)
284 {
285 	struct rds_sock *rs = NULL;
286 	struct sock *sk;
287 	unsigned long flags;
288 	struct rds_conn_path *cp;
289 
290 	inc->i_conn = conn;
291 	inc->i_rx_jiffies = jiffies;
292 	if (conn->c_trans->t_mp_capable)
293 		cp = inc->i_conn_path;
294 	else
295 		cp = &conn->c_path[0];
296 
297 	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
298 		 "flags 0x%x rx_jiffies %lu\n", conn,
299 		 (unsigned long long)cp->cp_next_rx_seq,
300 		 inc,
301 		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
302 		 be32_to_cpu(inc->i_hdr.h_len),
303 		 be16_to_cpu(inc->i_hdr.h_sport),
304 		 be16_to_cpu(inc->i_hdr.h_dport),
305 		 inc->i_hdr.h_flags,
306 		 inc->i_rx_jiffies);
307 
308 	/*
309 	 * Sequence numbers should only increase.  Messages get their
310 	 * sequence number as they're queued in a sending conn.  They
311 	 * can be dropped, though, if the sending socket is closed before
312 	 * they hit the wire.  So sequence numbers can skip forward
313 	 * under normal operation.  They can also drop back in the conn
314 	 * failover case as previously sent messages are resent down the
315 	 * new instance of a conn.  We drop those, otherwise we have
316 	 * to assume that the next valid seq does not come after a
317 	 * hole in the fragment stream.
318 	 *
319 	 * The headers don't give us a way to realize if fragments of
320 	 * a message have been dropped.  We assume that frags that arrive
321 	 * to a flow are part of the current message on the flow that is
322 	 * being reassembled.  This means that senders can't drop messages
323 	 * from the sending conn until all their frags are sent.
324 	 *
325 	 * XXX we could spend more on the wire to get more robust failure
326 	 * detection, arguably worth it to avoid data corruption.
327 	 */
328 	if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
329 	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
330 		rds_stats_inc(s_recv_drop_old_seq);
331 		goto out;
332 	}
333 	cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
334 
335 	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
336 		if (inc->i_hdr.h_sport == 0) {
337 			rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
338 			goto out;
339 		}
340 		rds_stats_inc(s_recv_ping);
341 		rds_send_pong(cp, inc->i_hdr.h_sport);
342 		/* if this is a handshake ping, start multipath if necessary */
343 		if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
344 				 be16_to_cpu(inc->i_hdr.h_dport))) {
345 			rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
346 			rds_start_mprds(cp->cp_conn);
347 		}
348 		goto out;
349 	}
350 
351 	if (be16_to_cpu(inc->i_hdr.h_dport) ==  RDS_FLAG_PROBE_PORT &&
352 	    inc->i_hdr.h_sport == 0) {
353 		rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
354 		/* if this is a handshake pong, start multipath if necessary */
355 		rds_start_mprds(cp->cp_conn);
356 		wake_up(&cp->cp_conn->c_hs_waitq);
357 		goto out;
358 	}
359 
360 	rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
361 	if (!rs) {
362 		rds_stats_inc(s_recv_drop_no_sock);
363 		goto out;
364 	}
365 
366 	/* Process extension headers */
367 	rds_recv_incoming_exthdrs(inc, rs);
368 
369 	/* We can be racing with rds_release() which marks the socket dead. */
370 	sk = rds_rs_to_sk(rs);
371 
372 	/* serialize with rds_release -> sock_orphan */
373 	write_lock_irqsave(&rs->rs_recv_lock, flags);
374 	if (!sock_flag(sk, SOCK_DEAD)) {
375 		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
376 		rds_stats_inc(s_recv_queued);
377 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
378 				      be32_to_cpu(inc->i_hdr.h_len),
379 				      inc->i_hdr.h_dport);
380 		if (sock_flag(sk, SOCK_RCVTSTAMP))
381 			do_gettimeofday(&inc->i_rx_tstamp);
382 		rds_inc_addref(inc);
383 		inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
384 		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
385 		__rds_wake_sk_sleep(sk);
386 	} else {
387 		rds_stats_inc(s_recv_drop_dead_sock);
388 	}
389 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
390 
391 out:
392 	if (rs)
393 		rds_sock_put(rs);
394 }
395 EXPORT_SYMBOL_GPL(rds_recv_incoming);
396 
397 /*
398  * be very careful here.  This is being called as the condition in
399  * wait_event_*() needs to cope with being called many times.
400  */
401 static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
402 {
403 	unsigned long flags;
404 
405 	if (!*inc) {
406 		read_lock_irqsave(&rs->rs_recv_lock, flags);
407 		if (!list_empty(&rs->rs_recv_queue)) {
408 			*inc = list_entry(rs->rs_recv_queue.next,
409 					  struct rds_incoming,
410 					  i_item);
411 			rds_inc_addref(*inc);
412 		}
413 		read_unlock_irqrestore(&rs->rs_recv_lock, flags);
414 	}
415 
416 	return *inc != NULL;
417 }
418 
419 static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
420 			    int drop)
421 {
422 	struct sock *sk = rds_rs_to_sk(rs);
423 	int ret = 0;
424 	unsigned long flags;
425 
426 	write_lock_irqsave(&rs->rs_recv_lock, flags);
427 	if (!list_empty(&inc->i_item)) {
428 		ret = 1;
429 		if (drop) {
430 			/* XXX make sure this i_conn is reliable */
431 			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
432 					      -be32_to_cpu(inc->i_hdr.h_len),
433 					      inc->i_hdr.h_dport);
434 			list_del_init(&inc->i_item);
435 			rds_inc_put(inc);
436 		}
437 	}
438 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
439 
440 	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
441 	return ret;
442 }
443 
444 /*
445  * Pull errors off the error queue.
446  * If msghdr is NULL, we will just purge the error queue.
447  */
448 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
449 {
450 	struct rds_notifier *notifier;
451 	struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
452 	unsigned int count = 0, max_messages = ~0U;
453 	unsigned long flags;
454 	LIST_HEAD(copy);
455 	int err = 0;
456 
457 
458 	/* put_cmsg copies to user space and thus may sleep. We can't do this
459 	 * with rs_lock held, so first grab as many notifications as we can stuff
460 	 * in the user provided cmsg buffer. We don't try to copy more, to avoid
461 	 * losing notifications - except when the buffer is so small that it wouldn't
462 	 * even hold a single notification. Then we give him as much of this single
463 	 * msg as we can squeeze in, and set MSG_CTRUNC.
464 	 */
465 	if (msghdr) {
466 		max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
467 		if (!max_messages)
468 			max_messages = 1;
469 	}
470 
471 	spin_lock_irqsave(&rs->rs_lock, flags);
472 	while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
473 		notifier = list_entry(rs->rs_notify_queue.next,
474 				struct rds_notifier, n_list);
475 		list_move(&notifier->n_list, &copy);
476 		count++;
477 	}
478 	spin_unlock_irqrestore(&rs->rs_lock, flags);
479 
480 	if (!count)
481 		return 0;
482 
483 	while (!list_empty(&copy)) {
484 		notifier = list_entry(copy.next, struct rds_notifier, n_list);
485 
486 		if (msghdr) {
487 			cmsg.user_token = notifier->n_user_token;
488 			cmsg.status = notifier->n_status;
489 
490 			err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
491 				       sizeof(cmsg), &cmsg);
492 			if (err)
493 				break;
494 		}
495 
496 		list_del_init(&notifier->n_list);
497 		kfree(notifier);
498 	}
499 
500 	/* If we bailed out because of an error in put_cmsg,
501 	 * we may be left with one or more notifications that we
502 	 * didn't process. Return them to the head of the list. */
503 	if (!list_empty(&copy)) {
504 		spin_lock_irqsave(&rs->rs_lock, flags);
505 		list_splice(&copy, &rs->rs_notify_queue);
506 		spin_unlock_irqrestore(&rs->rs_lock, flags);
507 	}
508 
509 	return err;
510 }
511 
512 /*
513  * Queue a congestion notification
514  */
515 static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
516 {
517 	uint64_t notify = rs->rs_cong_notify;
518 	unsigned long flags;
519 	int err;
520 
521 	err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
522 			sizeof(notify), &notify);
523 	if (err)
524 		return err;
525 
526 	spin_lock_irqsave(&rs->rs_lock, flags);
527 	rs->rs_cong_notify &= ~notify;
528 	spin_unlock_irqrestore(&rs->rs_lock, flags);
529 
530 	return 0;
531 }
532 
533 /*
534  * Receive any control messages.
535  */
536 static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
537 			 struct rds_sock *rs)
538 {
539 	int ret = 0;
540 
541 	if (inc->i_rdma_cookie) {
542 		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
543 				sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
544 		if (ret)
545 			goto out;
546 	}
547 
548 	if ((inc->i_rx_tstamp.tv_sec != 0) &&
549 	    sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
550 		ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
551 			       sizeof(struct timeval),
552 			       &inc->i_rx_tstamp);
553 		if (ret)
554 			goto out;
555 	}
556 
557 	if (rs->rs_rx_traces) {
558 		struct rds_cmsg_rx_trace t;
559 		int i, j;
560 
561 		inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
562 		t.rx_traces =  rs->rs_rx_traces;
563 		for (i = 0; i < rs->rs_rx_traces; i++) {
564 			j = rs->rs_rx_trace[i];
565 			t.rx_trace_pos[i] = j;
566 			t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
567 					  inc->i_rx_lat_trace[j];
568 		}
569 
570 		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
571 			       sizeof(t), &t);
572 		if (ret)
573 			goto out;
574 	}
575 
576 out:
577 	return ret;
578 }
579 
580 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
581 		int msg_flags)
582 {
583 	struct sock *sk = sock->sk;
584 	struct rds_sock *rs = rds_sk_to_rs(sk);
585 	long timeo;
586 	int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
587 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
588 	struct rds_incoming *inc = NULL;
589 
590 	/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
591 	timeo = sock_rcvtimeo(sk, nonblock);
592 
593 	rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
594 
595 	if (msg_flags & MSG_OOB)
596 		goto out;
597 
598 	while (1) {
599 		/* If there are pending notifications, do those - and nothing else */
600 		if (!list_empty(&rs->rs_notify_queue)) {
601 			ret = rds_notify_queue_get(rs, msg);
602 			break;
603 		}
604 
605 		if (rs->rs_cong_notify) {
606 			ret = rds_notify_cong(rs, msg);
607 			break;
608 		}
609 
610 		if (!rds_next_incoming(rs, &inc)) {
611 			if (nonblock) {
612 				ret = -EAGAIN;
613 				break;
614 			}
615 
616 			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
617 					(!list_empty(&rs->rs_notify_queue) ||
618 					 rs->rs_cong_notify ||
619 					 rds_next_incoming(rs, &inc)), timeo);
620 			rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
621 				 timeo);
622 			if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
623 				continue;
624 
625 			ret = timeo;
626 			if (ret == 0)
627 				ret = -ETIMEDOUT;
628 			break;
629 		}
630 
631 		rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
632 			 &inc->i_conn->c_faddr,
633 			 ntohs(inc->i_hdr.h_sport));
634 		ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
635 		if (ret < 0)
636 			break;
637 
638 		/*
639 		 * if the message we just copied isn't at the head of the
640 		 * recv queue then someone else raced us to return it, try
641 		 * to get the next message.
642 		 */
643 		if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
644 			rds_inc_put(inc);
645 			inc = NULL;
646 			rds_stats_inc(s_recv_deliver_raced);
647 			iov_iter_revert(&msg->msg_iter, ret);
648 			continue;
649 		}
650 
651 		if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
652 			if (msg_flags & MSG_TRUNC)
653 				ret = be32_to_cpu(inc->i_hdr.h_len);
654 			msg->msg_flags |= MSG_TRUNC;
655 		}
656 
657 		if (rds_cmsg_recv(inc, msg, rs)) {
658 			ret = -EFAULT;
659 			goto out;
660 		}
661 
662 		rds_stats_inc(s_recv_delivered);
663 
664 		if (sin) {
665 			sin->sin_family = AF_INET;
666 			sin->sin_port = inc->i_hdr.h_sport;
667 			sin->sin_addr.s_addr = inc->i_saddr;
668 			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
669 			msg->msg_namelen = sizeof(*sin);
670 		}
671 		break;
672 	}
673 
674 	if (inc)
675 		rds_inc_put(inc);
676 
677 out:
678 	return ret;
679 }
680 
681 /*
682  * The socket is being shut down and we're asked to drop messages that were
683  * queued for recvmsg.  The caller has unbound the socket so the receive path
684  * won't queue any more incoming fragments or messages on the socket.
685  */
686 void rds_clear_recv_queue(struct rds_sock *rs)
687 {
688 	struct sock *sk = rds_rs_to_sk(rs);
689 	struct rds_incoming *inc, *tmp;
690 	unsigned long flags;
691 
692 	write_lock_irqsave(&rs->rs_recv_lock, flags);
693 	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
694 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
695 				      -be32_to_cpu(inc->i_hdr.h_len),
696 				      inc->i_hdr.h_dport);
697 		list_del_init(&inc->i_item);
698 		rds_inc_put(inc);
699 	}
700 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
701 }
702 
703 /*
704  * inc->i_saddr isn't used here because it is only set in the receive
705  * path.
706  */
707 void rds_inc_info_copy(struct rds_incoming *inc,
708 		       struct rds_info_iterator *iter,
709 		       __be32 saddr, __be32 daddr, int flip)
710 {
711 	struct rds_info_message minfo;
712 
713 	minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
714 	minfo.len = be32_to_cpu(inc->i_hdr.h_len);
715 
716 	if (flip) {
717 		minfo.laddr = daddr;
718 		minfo.faddr = saddr;
719 		minfo.lport = inc->i_hdr.h_dport;
720 		minfo.fport = inc->i_hdr.h_sport;
721 	} else {
722 		minfo.laddr = saddr;
723 		minfo.faddr = daddr;
724 		minfo.lport = inc->i_hdr.h_sport;
725 		minfo.fport = inc->i_hdr.h_dport;
726 	}
727 
728 	minfo.flags = 0;
729 
730 	rds_info_copy(iter, &minfo, sizeof(minfo));
731 }
732