xref: /openbmc/linux/net/rds/send.c (revision d5532ee7)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <net/sock.h>
36 #include <linux/in.h>
37 #include <linux/list.h>
38 
39 #include "rds.h"
40 #include "rdma.h"
41 
42 /* When transmitting messages in rds_send_xmit, we need to emerge from
43  * time to time and briefly release the CPU. Otherwise the softlock watchdog
44  * will kick our shin.
45  * Also, it seems fairer to not let one busy connection stall all the
46  * others.
47  *
48  * send_batch_count is the number of times we'll loop in send_xmit. Setting
49  * it to 0 will restore the old behavior (where we looped until we had
50  * drained the queue).
51  */
52 static int send_batch_count = 64;
53 module_param(send_batch_count, int, 0444);
54 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
55 
56 /*
57  * Reset the send state. Caller must hold c_send_lock when calling here.
58  */
59 void rds_send_reset(struct rds_connection *conn)
60 {
61 	struct rds_message *rm, *tmp;
62 	unsigned long flags;
63 
64 	if (conn->c_xmit_rm) {
65 		/* Tell the user the RDMA op is no longer mapped by the
66 		 * transport. This isn't entirely true (it's flushed out
67 		 * independently) but as the connection is down, there's
68 		 * no ongoing RDMA to/from that memory */
69 		rds_message_unmapped(conn->c_xmit_rm);
70 		rds_message_put(conn->c_xmit_rm);
71 		conn->c_xmit_rm = NULL;
72 	}
73 	conn->c_xmit_sg = 0;
74 	conn->c_xmit_hdr_off = 0;
75 	conn->c_xmit_data_off = 0;
76 	conn->c_xmit_rdma_sent = 0;
77 
78 	conn->c_map_queued = 0;
79 
80 	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
81 	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
82 
83 	/* Mark messages as retransmissions, and move them to the send q */
84 	spin_lock_irqsave(&conn->c_lock, flags);
85 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
86 		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
87 		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
88 	}
89 	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
90 	spin_unlock_irqrestore(&conn->c_lock, flags);
91 }
92 
93 /*
94  * We're making the concious trade-off here to only send one message
95  * down the connection at a time.
96  *   Pro:
97  *      - tx queueing is a simple fifo list
98  *   	- reassembly is optional and easily done by transports per conn
99  *      - no per flow rx lookup at all, straight to the socket
100  *   	- less per-frag memory and wire overhead
101  *   Con:
102  *      - queued acks can be delayed behind large messages
103  *   Depends:
104  *      - small message latency is higher behind queued large messages
105  *      - large message latency isn't starved by intervening small sends
106  */
107 int rds_send_xmit(struct rds_connection *conn)
108 {
109 	struct rds_message *rm;
110 	unsigned long flags;
111 	unsigned int tmp;
112 	unsigned int send_quota = send_batch_count;
113 	struct scatterlist *sg;
114 	int ret = 0;
115 	int was_empty = 0;
116 	LIST_HEAD(to_be_dropped);
117 
118 	/*
119 	 * sendmsg calls here after having queued its message on the send
120 	 * queue.  We only have one task feeding the connection at a time.  If
121 	 * another thread is already feeding the queue then we back off.  This
122 	 * avoids blocking the caller and trading per-connection data between
123 	 * caches per message.
124 	 *
125 	 * The sem holder will issue a retry if they notice that someone queued
126 	 * a message after they stopped walking the send queue but before they
127 	 * dropped the sem.
128 	 */
129 	if (!mutex_trylock(&conn->c_send_lock)) {
130 		rds_stats_inc(s_send_sem_contention);
131 		ret = -ENOMEM;
132 		goto out;
133 	}
134 
135 	if (conn->c_trans->xmit_prepare)
136 		conn->c_trans->xmit_prepare(conn);
137 
138 	/*
139 	 * spin trying to push headers and data down the connection until
140 	 * the connection doens't make forward progress.
141 	 */
142 	while (--send_quota) {
143 		/*
144 		 * See if need to send a congestion map update if we're
145 		 * between sending messages.  The send_sem protects our sole
146 		 * use of c_map_offset and _bytes.
147 		 * Note this is used only by transports that define a special
148 		 * xmit_cong_map function. For all others, we create allocate
149 		 * a cong_map message and treat it just like any other send.
150 		 */
151 		if (conn->c_map_bytes) {
152 			ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
153 						conn->c_map_offset);
154 			if (ret <= 0)
155 				break;
156 
157 			conn->c_map_offset += ret;
158 			conn->c_map_bytes -= ret;
159 			if (conn->c_map_bytes)
160 				continue;
161 		}
162 
163 		/* If we're done sending the current message, clear the
164 		 * offset and S/G temporaries.
165 		 */
166 		rm = conn->c_xmit_rm;
167 		if (rm != NULL &&
168 		    conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169 		    conn->c_xmit_sg == rm->m_nents) {
170 			conn->c_xmit_rm = NULL;
171 			conn->c_xmit_sg = 0;
172 			conn->c_xmit_hdr_off = 0;
173 			conn->c_xmit_data_off = 0;
174 			conn->c_xmit_rdma_sent = 0;
175 
176 			/* Release the reference to the previous message. */
177 			rds_message_put(rm);
178 			rm = NULL;
179 		}
180 
181 		/* If we're asked to send a cong map update, do so.
182 		 */
183 		if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
184 			if (conn->c_trans->xmit_cong_map != NULL) {
185 				conn->c_map_offset = 0;
186 				conn->c_map_bytes = sizeof(struct rds_header) +
187 					RDS_CONG_MAP_BYTES;
188 				continue;
189 			}
190 
191 			rm = rds_cong_update_alloc(conn);
192 			if (IS_ERR(rm)) {
193 				ret = PTR_ERR(rm);
194 				break;
195 			}
196 
197 			conn->c_xmit_rm = rm;
198 		}
199 
200 		/*
201 		 * Grab the next message from the send queue, if there is one.
202 		 *
203 		 * c_xmit_rm holds a ref while we're sending this message down
204 		 * the connction.  We can use this ref while holding the
205 		 * send_sem.. rds_send_reset() is serialized with it.
206 		 */
207 		if (rm == NULL) {
208 			unsigned int len;
209 
210 			spin_lock_irqsave(&conn->c_lock, flags);
211 
212 			if (!list_empty(&conn->c_send_queue)) {
213 				rm = list_entry(conn->c_send_queue.next,
214 						struct rds_message,
215 						m_conn_item);
216 				rds_message_addref(rm);
217 
218 				/*
219 				 * Move the message from the send queue to the retransmit
220 				 * list right away.
221 				 */
222 				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
223 			}
224 
225 			spin_unlock_irqrestore(&conn->c_lock, flags);
226 
227 			if (rm == NULL) {
228 				was_empty = 1;
229 				break;
230 			}
231 
232 			/* Unfortunately, the way Infiniband deals with
233 			 * RDMA to a bad MR key is by moving the entire
234 			 * queue pair to error state. We cold possibly
235 			 * recover from that, but right now we drop the
236 			 * connection.
237 			 * Therefore, we never retransmit messages with RDMA ops.
238 			 */
239 			if (rm->m_rdma_op &&
240 			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
241 				spin_lock_irqsave(&conn->c_lock, flags);
242 				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
243 					list_move(&rm->m_conn_item, &to_be_dropped);
244 				spin_unlock_irqrestore(&conn->c_lock, flags);
245 				rds_message_put(rm);
246 				continue;
247 			}
248 
249 			/* Require an ACK every once in a while */
250 			len = ntohl(rm->m_inc.i_hdr.h_len);
251 			if (conn->c_unacked_packets == 0 ||
252 			    conn->c_unacked_bytes < len) {
253 				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
254 
255 				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
256 				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
257 				rds_stats_inc(s_send_ack_required);
258 			} else {
259 				conn->c_unacked_bytes -= len;
260 				conn->c_unacked_packets--;
261 			}
262 
263 			conn->c_xmit_rm = rm;
264 		}
265 
266 		/*
267 		 * Try and send an rdma message.  Let's see if we can
268 		 * keep this simple and require that the transport either
269 		 * send the whole rdma or none of it.
270 		 */
271 		if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
272 			ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
273 			if (ret)
274 				break;
275 			conn->c_xmit_rdma_sent = 1;
276 			/* The transport owns the mapped memory for now.
277 			 * You can't unmap it while it's on the send queue */
278 			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
279 		}
280 
281 		if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
282 		    conn->c_xmit_sg < rm->m_nents) {
283 			ret = conn->c_trans->xmit(conn, rm,
284 						  conn->c_xmit_hdr_off,
285 						  conn->c_xmit_sg,
286 						  conn->c_xmit_data_off);
287 			if (ret <= 0)
288 				break;
289 
290 			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
291 				tmp = min_t(int, ret,
292 					    sizeof(struct rds_header) -
293 					    conn->c_xmit_hdr_off);
294 				conn->c_xmit_hdr_off += tmp;
295 				ret -= tmp;
296 			}
297 
298 			sg = &rm->m_sg[conn->c_xmit_sg];
299 			while (ret) {
300 				tmp = min_t(int, ret, sg->length -
301 						      conn->c_xmit_data_off);
302 				conn->c_xmit_data_off += tmp;
303 				ret -= tmp;
304 				if (conn->c_xmit_data_off == sg->length) {
305 					conn->c_xmit_data_off = 0;
306 					sg++;
307 					conn->c_xmit_sg++;
308 					BUG_ON(ret != 0 &&
309 					       conn->c_xmit_sg == rm->m_nents);
310 				}
311 			}
312 		}
313 	}
314 
315 	/* Nuke any messages we decided not to retransmit. */
316 	if (!list_empty(&to_be_dropped))
317 		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
318 
319 	if (conn->c_trans->xmit_complete)
320 		conn->c_trans->xmit_complete(conn);
321 
322 	/*
323 	 * We might be racing with another sender who queued a message but
324 	 * backed off on noticing that we held the c_send_lock.  If we check
325 	 * for queued messages after dropping the sem then either we'll
326 	 * see the queued message or the queuer will get the sem.  If we
327 	 * notice the queued message then we trigger an immediate retry.
328 	 *
329 	 * We need to be careful only to do this when we stopped processing
330 	 * the send queue because it was empty.  It's the only way we
331 	 * stop processing the loop when the transport hasn't taken
332 	 * responsibility for forward progress.
333 	 */
334 	mutex_unlock(&conn->c_send_lock);
335 
336 	if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
337 		/* We exhausted the send quota, but there's work left to
338 		 * do. Return and (re-)schedule the send worker.
339 		 */
340 		ret = -EAGAIN;
341 	}
342 
343 	if (ret == 0 && was_empty) {
344 		/* A simple bit test would be way faster than taking the
345 		 * spin lock */
346 		spin_lock_irqsave(&conn->c_lock, flags);
347 		if (!list_empty(&conn->c_send_queue)) {
348 			rds_stats_inc(s_send_sem_queue_raced);
349 			ret = -EAGAIN;
350 		}
351 		spin_unlock_irqrestore(&conn->c_lock, flags);
352 	}
353 out:
354 	return ret;
355 }
356 
357 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
358 {
359 	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
360 
361 	assert_spin_locked(&rs->rs_lock);
362 
363 	BUG_ON(rs->rs_snd_bytes < len);
364 	rs->rs_snd_bytes -= len;
365 
366 	if (rs->rs_snd_bytes == 0)
367 		rds_stats_inc(s_send_queue_empty);
368 }
369 
370 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
371 				    is_acked_func is_acked)
372 {
373 	if (is_acked)
374 		return is_acked(rm, ack);
375 	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
376 }
377 
378 /*
379  * Returns true if there are no messages on the send and retransmit queues
380  * which have a sequence number greater than or equal to the given sequence
381  * number.
382  */
383 int rds_send_acked_before(struct rds_connection *conn, u64 seq)
384 {
385 	struct rds_message *rm, *tmp;
386 	int ret = 1;
387 
388 	spin_lock(&conn->c_lock);
389 
390 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
391 		if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
392 			ret = 0;
393 		break;
394 	}
395 
396 	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
397 		if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
398 			ret = 0;
399 		break;
400 	}
401 
402 	spin_unlock(&conn->c_lock);
403 
404 	return ret;
405 }
406 
407 /*
408  * This is pretty similar to what happens below in the ACK
409  * handling code - except that we call here as soon as we get
410  * the IB send completion on the RDMA op and the accompanying
411  * message.
412  */
413 void rds_rdma_send_complete(struct rds_message *rm, int status)
414 {
415 	struct rds_sock *rs = NULL;
416 	struct rds_rdma_op *ro;
417 	struct rds_notifier *notifier;
418 
419 	spin_lock(&rm->m_rs_lock);
420 
421 	ro = rm->m_rdma_op;
422 	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
423 	    ro && ro->r_notify && ro->r_notifier) {
424 		notifier = ro->r_notifier;
425 		rs = rm->m_rs;
426 		sock_hold(rds_rs_to_sk(rs));
427 
428 		notifier->n_status = status;
429 		spin_lock(&rs->rs_lock);
430 		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
431 		spin_unlock(&rs->rs_lock);
432 
433 		ro->r_notifier = NULL;
434 	}
435 
436 	spin_unlock(&rm->m_rs_lock);
437 
438 	if (rs) {
439 		rds_wake_sk_sleep(rs);
440 		sock_put(rds_rs_to_sk(rs));
441 	}
442 }
443 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
444 
445 /*
446  * This is the same as rds_rdma_send_complete except we
447  * don't do any locking - we have all the ingredients (message,
448  * socket, socket lock) and can just move the notifier.
449  */
450 static inline void
451 __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
452 {
453 	struct rds_rdma_op *ro;
454 
455 	ro = rm->m_rdma_op;
456 	if (ro && ro->r_notify && ro->r_notifier) {
457 		ro->r_notifier->n_status = status;
458 		list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
459 		ro->r_notifier = NULL;
460 	}
461 
462 	/* No need to wake the app - caller does this */
463 }
464 
465 /*
466  * This is called from the IB send completion when we detect
467  * a RDMA operation that failed with remote access error.
468  * So speed is not an issue here.
469  */
470 struct rds_message *rds_send_get_message(struct rds_connection *conn,
471 					 struct rds_rdma_op *op)
472 {
473 	struct rds_message *rm, *tmp, *found = NULL;
474 	unsigned long flags;
475 
476 	spin_lock_irqsave(&conn->c_lock, flags);
477 
478 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
479 		if (rm->m_rdma_op == op) {
480 			atomic_inc(&rm->m_refcount);
481 			found = rm;
482 			goto out;
483 		}
484 	}
485 
486 	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
487 		if (rm->m_rdma_op == op) {
488 			atomic_inc(&rm->m_refcount);
489 			found = rm;
490 			break;
491 		}
492 	}
493 
494 out:
495 	spin_unlock_irqrestore(&conn->c_lock, flags);
496 
497 	return found;
498 }
499 EXPORT_SYMBOL_GPL(rds_send_get_message);
500 
501 /*
502  * This removes messages from the socket's list if they're on it.  The list
503  * argument must be private to the caller, we must be able to modify it
504  * without locks.  The messages must have a reference held for their
505  * position on the list.  This function will drop that reference after
506  * removing the messages from the 'messages' list regardless of if it found
507  * the messages on the socket list or not.
508  */
509 void rds_send_remove_from_sock(struct list_head *messages, int status)
510 {
511 	unsigned long flags;
512 	struct rds_sock *rs = NULL;
513 	struct rds_message *rm;
514 
515 	while (!list_empty(messages)) {
516 		int was_on_sock = 0;
517 
518 		rm = list_entry(messages->next, struct rds_message,
519 				m_conn_item);
520 		list_del_init(&rm->m_conn_item);
521 
522 		/*
523 		 * If we see this flag cleared then we're *sure* that someone
524 		 * else beat us to removing it from the sock.  If we race
525 		 * with their flag update we'll get the lock and then really
526 		 * see that the flag has been cleared.
527 		 *
528 		 * The message spinlock makes sure nobody clears rm->m_rs
529 		 * while we're messing with it. It does not prevent the
530 		 * message from being removed from the socket, though.
531 		 */
532 		spin_lock_irqsave(&rm->m_rs_lock, flags);
533 		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
534 			goto unlock_and_drop;
535 
536 		if (rs != rm->m_rs) {
537 			if (rs) {
538 				rds_wake_sk_sleep(rs);
539 				sock_put(rds_rs_to_sk(rs));
540 			}
541 			rs = rm->m_rs;
542 			sock_hold(rds_rs_to_sk(rs));
543 		}
544 		spin_lock(&rs->rs_lock);
545 
546 		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
547 			struct rds_rdma_op *ro = rm->m_rdma_op;
548 			struct rds_notifier *notifier;
549 
550 			list_del_init(&rm->m_sock_item);
551 			rds_send_sndbuf_remove(rs, rm);
552 
553 			if (ro && ro->r_notifier && (status || ro->r_notify)) {
554 				notifier = ro->r_notifier;
555 				list_add_tail(&notifier->n_list,
556 						&rs->rs_notify_queue);
557 				if (!notifier->n_status)
558 					notifier->n_status = status;
559 				rm->m_rdma_op->r_notifier = NULL;
560 			}
561 			was_on_sock = 1;
562 			rm->m_rs = NULL;
563 		}
564 		spin_unlock(&rs->rs_lock);
565 
566 unlock_and_drop:
567 		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
568 		rds_message_put(rm);
569 		if (was_on_sock)
570 			rds_message_put(rm);
571 	}
572 
573 	if (rs) {
574 		rds_wake_sk_sleep(rs);
575 		sock_put(rds_rs_to_sk(rs));
576 	}
577 }
578 
579 /*
580  * Transports call here when they've determined that the receiver queued
581  * messages up to, and including, the given sequence number.  Messages are
582  * moved to the retrans queue when rds_send_xmit picks them off the send
583  * queue. This means that in the TCP case, the message may not have been
584  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
585  * checks the RDS_MSG_HAS_ACK_SEQ bit.
586  *
587  * XXX It's not clear to me how this is safely serialized with socket
588  * destruction.  Maybe it should bail if it sees SOCK_DEAD.
589  */
590 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
591 			 is_acked_func is_acked)
592 {
593 	struct rds_message *rm, *tmp;
594 	unsigned long flags;
595 	LIST_HEAD(list);
596 
597 	spin_lock_irqsave(&conn->c_lock, flags);
598 
599 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
600 		if (!rds_send_is_acked(rm, ack, is_acked))
601 			break;
602 
603 		list_move(&rm->m_conn_item, &list);
604 		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
605 	}
606 
607 	/* order flag updates with spin locks */
608 	if (!list_empty(&list))
609 		smp_mb__after_clear_bit();
610 
611 	spin_unlock_irqrestore(&conn->c_lock, flags);
612 
613 	/* now remove the messages from the sock list as needed */
614 	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
615 }
616 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
617 
618 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
619 {
620 	struct rds_message *rm, *tmp;
621 	struct rds_connection *conn;
622 	unsigned long flags, flags2;
623 	LIST_HEAD(list);
624 	int wake = 0;
625 
626 	/* get all the messages we're dropping under the rs lock */
627 	spin_lock_irqsave(&rs->rs_lock, flags);
628 
629 	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
630 		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
631 			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
632 			continue;
633 
634 		wake = 1;
635 		list_move(&rm->m_sock_item, &list);
636 		rds_send_sndbuf_remove(rs, rm);
637 		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
638 	}
639 
640 	/* order flag updates with the rs lock */
641 	if (wake)
642 		smp_mb__after_clear_bit();
643 
644 	spin_unlock_irqrestore(&rs->rs_lock, flags);
645 
646 	conn = NULL;
647 
648 	/* now remove the messages from the conn list as needed */
649 	list_for_each_entry(rm, &list, m_sock_item) {
650 		/* We do this here rather than in the loop above, so that
651 		 * we don't have to nest m_rs_lock under rs->rs_lock */
652 		spin_lock_irqsave(&rm->m_rs_lock, flags2);
653 		/* If this is a RDMA operation, notify the app. */
654 		spin_lock(&rs->rs_lock);
655 		__rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
656 		spin_unlock(&rs->rs_lock);
657 		rm->m_rs = NULL;
658 		spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
659 
660 		/*
661 		 * If we see this flag cleared then we're *sure* that someone
662 		 * else beat us to removing it from the conn.  If we race
663 		 * with their flag update we'll get the lock and then really
664 		 * see that the flag has been cleared.
665 		 */
666 		if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags))
667 			continue;
668 
669 		if (conn != rm->m_inc.i_conn) {
670 			if (conn)
671 				spin_unlock_irqrestore(&conn->c_lock, flags);
672 			conn = rm->m_inc.i_conn;
673 			spin_lock_irqsave(&conn->c_lock, flags);
674 		}
675 
676 		if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
677 			list_del_init(&rm->m_conn_item);
678 			rds_message_put(rm);
679 		}
680 	}
681 
682 	if (conn)
683 		spin_unlock_irqrestore(&conn->c_lock, flags);
684 
685 	if (wake)
686 		rds_wake_sk_sleep(rs);
687 
688 	while (!list_empty(&list)) {
689 		rm = list_entry(list.next, struct rds_message, m_sock_item);
690 		list_del_init(&rm->m_sock_item);
691 
692 		rds_message_wait(rm);
693 		rds_message_put(rm);
694 	}
695 }
696 
697 /*
698  * we only want this to fire once so we use the callers 'queued'.  It's
699  * possible that another thread can race with us and remove the
700  * message from the flow with RDS_CANCEL_SENT_TO.
701  */
702 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
703 			     struct rds_message *rm, __be16 sport,
704 			     __be16 dport, int *queued)
705 {
706 	unsigned long flags;
707 	u32 len;
708 
709 	if (*queued)
710 		goto out;
711 
712 	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
713 
714 	/* this is the only place which holds both the socket's rs_lock
715 	 * and the connection's c_lock */
716 	spin_lock_irqsave(&rs->rs_lock, flags);
717 
718 	/*
719 	 * If there is a little space in sndbuf, we don't queue anything,
720 	 * and userspace gets -EAGAIN. But poll() indicates there's send
721 	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
722 	 * freed up by incoming acks. So we check the *old* value of
723 	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
724 	 * and poll() now knows no more data can be sent.
725 	 */
726 	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
727 		rs->rs_snd_bytes += len;
728 
729 		/* let recv side know we are close to send space exhaustion.
730 		 * This is probably not the optimal way to do it, as this
731 		 * means we set the flag on *all* messages as soon as our
732 		 * throughput hits a certain threshold.
733 		 */
734 		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
735 			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
736 
737 		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
738 		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
739 		rds_message_addref(rm);
740 		rm->m_rs = rs;
741 
742 		/* The code ordering is a little weird, but we're
743 		   trying to minimize the time we hold c_lock */
744 		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
745 		rm->m_inc.i_conn = conn;
746 		rds_message_addref(rm);
747 
748 		spin_lock(&conn->c_lock);
749 		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
750 		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
751 		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
752 		spin_unlock(&conn->c_lock);
753 
754 		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
755 			 rm, len, rs, rs->rs_snd_bytes,
756 			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
757 
758 		*queued = 1;
759 	}
760 
761 	spin_unlock_irqrestore(&rs->rs_lock, flags);
762 out:
763 	return *queued;
764 }
765 
766 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
767 			 struct msghdr *msg, int *allocated_mr)
768 {
769 	struct cmsghdr *cmsg;
770 	int ret = 0;
771 
772 	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
773 		if (!CMSG_OK(msg, cmsg))
774 			return -EINVAL;
775 
776 		if (cmsg->cmsg_level != SOL_RDS)
777 			continue;
778 
779 		/* As a side effect, RDMA_DEST and RDMA_MAP will set
780 		 * rm->m_rdma_cookie and rm->m_rdma_mr.
781 		 */
782 		switch (cmsg->cmsg_type) {
783 		case RDS_CMSG_RDMA_ARGS:
784 			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
785 			break;
786 
787 		case RDS_CMSG_RDMA_DEST:
788 			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
789 			break;
790 
791 		case RDS_CMSG_RDMA_MAP:
792 			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
793 			if (!ret)
794 				*allocated_mr = 1;
795 			break;
796 
797 		default:
798 			return -EINVAL;
799 		}
800 
801 		if (ret)
802 			break;
803 	}
804 
805 	return ret;
806 }
807 
808 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
809 		size_t payload_len)
810 {
811 	struct sock *sk = sock->sk;
812 	struct rds_sock *rs = rds_sk_to_rs(sk);
813 	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
814 	__be32 daddr;
815 	__be16 dport;
816 	struct rds_message *rm = NULL;
817 	struct rds_connection *conn;
818 	int ret = 0;
819 	int queued = 0, allocated_mr = 0;
820 	int nonblock = msg->msg_flags & MSG_DONTWAIT;
821 	long timeo = sock_sndtimeo(sk, nonblock);
822 
823 	/* Mirror Linux UDP mirror of BSD error message compatibility */
824 	/* XXX: Perhaps MSG_MORE someday */
825 	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
826 		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
827 		ret = -EOPNOTSUPP;
828 		goto out;
829 	}
830 
831 	if (msg->msg_namelen) {
832 		/* XXX fail non-unicast destination IPs? */
833 		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
834 			ret = -EINVAL;
835 			goto out;
836 		}
837 		daddr = usin->sin_addr.s_addr;
838 		dport = usin->sin_port;
839 	} else {
840 		/* We only care about consistency with ->connect() */
841 		lock_sock(sk);
842 		daddr = rs->rs_conn_addr;
843 		dport = rs->rs_conn_port;
844 		release_sock(sk);
845 	}
846 
847 	/* racing with another thread binding seems ok here */
848 	if (daddr == 0 || rs->rs_bound_addr == 0) {
849 		ret = -ENOTCONN; /* XXX not a great errno */
850 		goto out;
851 	}
852 
853 	rm = rds_message_copy_from_user(msg->msg_iov, payload_len);
854 	if (IS_ERR(rm)) {
855 		ret = PTR_ERR(rm);
856 		rm = NULL;
857 		goto out;
858 	}
859 
860 	rm->m_daddr = daddr;
861 
862 	/* rds_conn_create has a spinlock that runs with IRQ off.
863 	 * Caching the conn in the socket helps a lot. */
864 	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
865 		conn = rs->rs_conn;
866 	else {
867 		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
868 					rs->rs_transport,
869 					sock->sk->sk_allocation);
870 		if (IS_ERR(conn)) {
871 			ret = PTR_ERR(conn);
872 			goto out;
873 		}
874 		rs->rs_conn = conn;
875 	}
876 
877 	/* Parse any control messages the user may have included. */
878 	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
879 	if (ret)
880 		goto out;
881 
882 	if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
883 	    conn->c_trans->xmit_rdma == NULL) {
884 		if (printk_ratelimit())
885 			printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
886 				rm->m_rdma_op, conn->c_trans->xmit_rdma);
887 		ret = -EOPNOTSUPP;
888 		goto out;
889 	}
890 
891 	/* If the connection is down, trigger a connect. We may
892 	 * have scheduled a delayed reconnect however - in this case
893 	 * we should not interfere.
894 	 */
895 	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
896 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
897 		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
898 
899 	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
900 	if (ret) {
901 		rs->rs_seen_congestion = 1;
902 		goto out;
903 	}
904 
905 	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
906 				  dport, &queued)) {
907 		rds_stats_inc(s_send_queue_full);
908 		/* XXX make sure this is reasonable */
909 		if (payload_len > rds_sk_sndbuf(rs)) {
910 			ret = -EMSGSIZE;
911 			goto out;
912 		}
913 		if (nonblock) {
914 			ret = -EAGAIN;
915 			goto out;
916 		}
917 
918 		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
919 					rds_send_queue_rm(rs, conn, rm,
920 							  rs->rs_bound_port,
921 							  dport,
922 							  &queued),
923 					timeo);
924 		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
925 		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
926 			continue;
927 
928 		ret = timeo;
929 		if (ret == 0)
930 			ret = -ETIMEDOUT;
931 		goto out;
932 	}
933 
934 	/*
935 	 * By now we've committed to the send.  We reuse rds_send_worker()
936 	 * to retry sends in the rds thread if the transport asks us to.
937 	 */
938 	rds_stats_inc(s_send_queued);
939 
940 	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
941 		rds_send_worker(&conn->c_send_w.work);
942 
943 	rds_message_put(rm);
944 	return payload_len;
945 
946 out:
947 	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
948 	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
949 	 * or in any other way, we need to destroy the MR again */
950 	if (allocated_mr)
951 		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
952 
953 	if (rm)
954 		rds_message_put(rm);
955 	return ret;
956 }
957 
958 /*
959  * Reply to a ping packet.
960  */
961 int
962 rds_send_pong(struct rds_connection *conn, __be16 dport)
963 {
964 	struct rds_message *rm;
965 	unsigned long flags;
966 	int ret = 0;
967 
968 	rm = rds_message_alloc(0, GFP_ATOMIC);
969 	if (rm == NULL) {
970 		ret = -ENOMEM;
971 		goto out;
972 	}
973 
974 	rm->m_daddr = conn->c_faddr;
975 
976 	/* If the connection is down, trigger a connect. We may
977 	 * have scheduled a delayed reconnect however - in this case
978 	 * we should not interfere.
979 	 */
980 	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
981 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
982 		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
983 
984 	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
985 	if (ret)
986 		goto out;
987 
988 	spin_lock_irqsave(&conn->c_lock, flags);
989 	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
990 	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
991 	rds_message_addref(rm);
992 	rm->m_inc.i_conn = conn;
993 
994 	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
995 				    conn->c_next_tx_seq);
996 	conn->c_next_tx_seq++;
997 	spin_unlock_irqrestore(&conn->c_lock, flags);
998 
999 	rds_stats_inc(s_send_queued);
1000 	rds_stats_inc(s_send_pong);
1001 
1002 	queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1003 	rds_message_put(rm);
1004 	return 0;
1005 
1006 out:
1007 	if (rm)
1008 		rds_message_put(rm);
1009 	return ret;
1010 }
1011