xref: /openbmc/linux/net/rds/ib_send.c (revision 9d56dd3b)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 
38 #include "rds.h"
39 #include "rdma.h"
40 #include "ib.h"
41 
42 static void rds_ib_send_rdma_complete(struct rds_message *rm,
43 				      int wc_status)
44 {
45 	int notify_status;
46 
47 	switch (wc_status) {
48 	case IB_WC_WR_FLUSH_ERR:
49 		return;
50 
51 	case IB_WC_SUCCESS:
52 		notify_status = RDS_RDMA_SUCCESS;
53 		break;
54 
55 	case IB_WC_REM_ACCESS_ERR:
56 		notify_status = RDS_RDMA_REMOTE_ERROR;
57 		break;
58 
59 	default:
60 		notify_status = RDS_RDMA_OTHER_ERROR;
61 		break;
62 	}
63 	rds_rdma_send_complete(rm, notify_status);
64 }
65 
66 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
67 				   struct rds_rdma_op *op)
68 {
69 	if (op->r_mapped) {
70 		ib_dma_unmap_sg(ic->i_cm_id->device,
71 			op->r_sg, op->r_nents,
72 			op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 		op->r_mapped = 0;
74 	}
75 }
76 
77 static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
78 			  struct rds_ib_send_work *send,
79 			  int wc_status)
80 {
81 	struct rds_message *rm = send->s_rm;
82 
83 	rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
84 
85 	ib_dma_unmap_sg(ic->i_cm_id->device,
86 		     rm->m_sg, rm->m_nents,
87 		     DMA_TO_DEVICE);
88 
89 	if (rm->m_rdma_op != NULL) {
90 		rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
91 
92 		/* If the user asked for a completion notification on this
93 		 * message, we can implement three different semantics:
94 		 *  1.	Notify when we received the ACK on the RDS message
95 		 *	that was queued with the RDMA. This provides reliable
96 		 *	notification of RDMA status at the expense of a one-way
97 		 *	packet delay.
98 		 *  2.	Notify when the IB stack gives us the completion event for
99 		 *	the RDMA operation.
100 		 *  3.	Notify when the IB stack gives us the completion event for
101 		 *	the accompanying RDS messages.
102 		 * Here, we implement approach #3. To implement approach #2,
103 		 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 		 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 		 * handling in the ACK processing code.
106 		 *
107 		 * Note: There's no need to explicitly sync any RDMA buffers using
108 		 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 		 * operation itself unmapped the RDMA buffers, which takes care
110 		 * of synching.
111 		 */
112 		rds_ib_send_rdma_complete(rm, wc_status);
113 
114 		if (rm->m_rdma_op->r_write)
115 			rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
116 		else
117 			rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
118 	}
119 
120 	/* If anyone waited for this message to get flushed out, wake
121 	 * them up now */
122 	rds_message_unmapped(rm);
123 
124 	rds_message_put(rm);
125 	send->s_rm = NULL;
126 }
127 
128 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
129 {
130 	struct rds_ib_send_work *send;
131 	u32 i;
132 
133 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
134 		struct ib_sge *sge;
135 
136 		send->s_rm = NULL;
137 		send->s_op = NULL;
138 
139 		send->s_wr.wr_id = i;
140 		send->s_wr.sg_list = send->s_sge;
141 		send->s_wr.num_sge = 1;
142 		send->s_wr.opcode = IB_WR_SEND;
143 		send->s_wr.send_flags = 0;
144 		send->s_wr.ex.imm_data = 0;
145 
146 		sge = rds_ib_data_sge(ic, send->s_sge);
147 		sge->lkey = ic->i_mr->lkey;
148 
149 		sge = rds_ib_header_sge(ic, send->s_sge);
150 		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
151 		sge->length = sizeof(struct rds_header);
152 		sge->lkey = ic->i_mr->lkey;
153 	}
154 }
155 
156 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
157 {
158 	struct rds_ib_send_work *send;
159 	u32 i;
160 
161 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
162 		if (send->s_wr.opcode == 0xdead)
163 			continue;
164 		if (send->s_rm)
165 			rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
166 		if (send->s_op)
167 			rds_ib_send_unmap_rdma(ic, send->s_op);
168 	}
169 }
170 
171 /*
172  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
173  * operations performed in the send path.  As the sender allocs and potentially
174  * unallocs the next free entry in the ring it doesn't alter which is
175  * the next to be freed, which is what this is concerned with.
176  */
177 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
178 {
179 	struct rds_connection *conn = context;
180 	struct rds_ib_connection *ic = conn->c_transport_data;
181 	struct ib_wc wc;
182 	struct rds_ib_send_work *send;
183 	u32 completed;
184 	u32 oldest;
185 	u32 i = 0;
186 	int ret;
187 
188 	rdsdebug("cq %p conn %p\n", cq, conn);
189 	rds_ib_stats_inc(s_ib_tx_cq_call);
190 	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
191 	if (ret)
192 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
193 
194 	while (ib_poll_cq(cq, 1, &wc) > 0) {
195 		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
196 			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
197 			 be32_to_cpu(wc.ex.imm_data));
198 		rds_ib_stats_inc(s_ib_tx_cq_event);
199 
200 		if (wc.wr_id == RDS_IB_ACK_WR_ID) {
201 			if (ic->i_ack_queued + HZ/2 < jiffies)
202 				rds_ib_stats_inc(s_ib_tx_stalled);
203 			rds_ib_ack_send_complete(ic);
204 			continue;
205 		}
206 
207 		oldest = rds_ib_ring_oldest(&ic->i_send_ring);
208 
209 		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
210 
211 		for (i = 0; i < completed; i++) {
212 			send = &ic->i_sends[oldest];
213 
214 			/* In the error case, wc.opcode sometimes contains garbage */
215 			switch (send->s_wr.opcode) {
216 			case IB_WR_SEND:
217 				if (send->s_rm)
218 					rds_ib_send_unmap_rm(ic, send, wc.status);
219 				break;
220 			case IB_WR_RDMA_WRITE:
221 			case IB_WR_RDMA_READ:
222 				/* Nothing to be done - the SG list will be unmapped
223 				 * when the SEND completes. */
224 				break;
225 			default:
226 				if (printk_ratelimit())
227 					printk(KERN_NOTICE
228 						"RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
229 						__func__, send->s_wr.opcode);
230 				break;
231 			}
232 
233 			send->s_wr.opcode = 0xdead;
234 			send->s_wr.num_sge = 1;
235 			if (send->s_queued + HZ/2 < jiffies)
236 				rds_ib_stats_inc(s_ib_tx_stalled);
237 
238 			/* If a RDMA operation produced an error, signal this right
239 			 * away. If we don't, the subsequent SEND that goes with this
240 			 * RDMA will be canceled with ERR_WFLUSH, and the application
241 			 * never learn that the RDMA failed. */
242 			if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
243 				struct rds_message *rm;
244 
245 				rm = rds_send_get_message(conn, send->s_op);
246 				if (rm)
247 					rds_ib_send_rdma_complete(rm, wc.status);
248 			}
249 
250 			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
251 		}
252 
253 		rds_ib_ring_free(&ic->i_send_ring, completed);
254 
255 		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
256 		    test_bit(0, &conn->c_map_queued))
257 			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
258 
259 		/* We expect errors as the qp is drained during shutdown */
260 		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
261 			rds_ib_conn_error(conn,
262 				"send completion on %pI4 "
263 				"had status %u, disconnecting and reconnecting\n",
264 				&conn->c_faddr, wc.status);
265 		}
266 	}
267 }
268 
269 /*
270  * This is the main function for allocating credits when sending
271  * messages.
272  *
273  * Conceptually, we have two counters:
274  *  -	send credits: this tells us how many WRs we're allowed
275  *	to submit without overruning the reciever's queue. For
276  *	each SEND WR we post, we decrement this by one.
277  *
278  *  -	posted credits: this tells us how many WRs we recently
279  *	posted to the receive queue. This value is transferred
280  *	to the peer as a "credit update" in a RDS header field.
281  *	Every time we transmit credits to the peer, we subtract
282  *	the amount of transferred credits from this counter.
283  *
284  * It is essential that we avoid situations where both sides have
285  * exhausted their send credits, and are unable to send new credits
286  * to the peer. We achieve this by requiring that we send at least
287  * one credit update to the peer before exhausting our credits.
288  * When new credits arrive, we subtract one credit that is withheld
289  * until we've posted new buffers and are ready to transmit these
290  * credits (see rds_ib_send_add_credits below).
291  *
292  * The RDS send code is essentially single-threaded; rds_send_xmit
293  * grabs c_send_lock to ensure exclusive access to the send ring.
294  * However, the ACK sending code is independent and can race with
295  * message SENDs.
296  *
297  * In the send path, we need to update the counters for send credits
298  * and the counter of posted buffers atomically - when we use the
299  * last available credit, we cannot allow another thread to race us
300  * and grab the posted credits counter.  Hence, we have to use a
301  * spinlock to protect the credit counter, or use atomics.
302  *
303  * Spinlocks shared between the send and the receive path are bad,
304  * because they create unnecessary delays. An early implementation
305  * using a spinlock showed a 5% degradation in throughput at some
306  * loads.
307  *
308  * This implementation avoids spinlocks completely, putting both
309  * counters into a single atomic, and updating that atomic using
310  * atomic_add (in the receive path, when receiving fresh credits),
311  * and using atomic_cmpxchg when updating the two counters.
312  */
313 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
314 			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
315 {
316 	unsigned int avail, posted, got = 0, advertise;
317 	long oldval, newval;
318 
319 	*adv_credits = 0;
320 	if (!ic->i_flowctl)
321 		return wanted;
322 
323 try_again:
324 	advertise = 0;
325 	oldval = newval = atomic_read(&ic->i_credits);
326 	posted = IB_GET_POST_CREDITS(oldval);
327 	avail = IB_GET_SEND_CREDITS(oldval);
328 
329 	rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
330 			wanted, avail, posted);
331 
332 	/* The last credit must be used to send a credit update. */
333 	if (avail && !posted)
334 		avail--;
335 
336 	if (avail < wanted) {
337 		struct rds_connection *conn = ic->i_cm_id->context;
338 
339 		/* Oops, there aren't that many credits left! */
340 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
341 		got = avail;
342 	} else {
343 		/* Sometimes you get what you want, lalala. */
344 		got = wanted;
345 	}
346 	newval -= IB_SET_SEND_CREDITS(got);
347 
348 	/*
349 	 * If need_posted is non-zero, then the caller wants
350 	 * the posted regardless of whether any send credits are
351 	 * available.
352 	 */
353 	if (posted && (got || need_posted)) {
354 		advertise = min_t(unsigned int, posted, max_posted);
355 		newval -= IB_SET_POST_CREDITS(advertise);
356 	}
357 
358 	/* Finally bill everything */
359 	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
360 		goto try_again;
361 
362 	*adv_credits = advertise;
363 	return got;
364 }
365 
366 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
367 {
368 	struct rds_ib_connection *ic = conn->c_transport_data;
369 
370 	if (credits == 0)
371 		return;
372 
373 	rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
374 			credits,
375 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
376 			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
377 
378 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
379 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
380 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
381 
382 	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
383 
384 	rds_ib_stats_inc(s_ib_rx_credit_updates);
385 }
386 
387 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
388 {
389 	struct rds_ib_connection *ic = conn->c_transport_data;
390 
391 	if (posted == 0)
392 		return;
393 
394 	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
395 
396 	/* Decide whether to send an update to the peer now.
397 	 * If we would send a credit update for every single buffer we
398 	 * post, we would end up with an ACK storm (ACK arrives,
399 	 * consumes buffer, we refill the ring, send ACK to remote
400 	 * advertising the newly posted buffer... ad inf)
401 	 *
402 	 * Performance pretty much depends on how often we send
403 	 * credit updates - too frequent updates mean lots of ACKs.
404 	 * Too infrequent updates, and the peer will run out of
405 	 * credits and has to throttle.
406 	 * For the time being, 16 seems to be a good compromise.
407 	 */
408 	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
409 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
410 }
411 
412 static inline void
413 rds_ib_xmit_populate_wr(struct rds_ib_connection *ic,
414 		struct rds_ib_send_work *send, unsigned int pos,
415 		unsigned long buffer, unsigned int length,
416 		int send_flags)
417 {
418 	struct ib_sge *sge;
419 
420 	WARN_ON(pos != send - ic->i_sends);
421 
422 	send->s_wr.send_flags = send_flags;
423 	send->s_wr.opcode = IB_WR_SEND;
424 	send->s_wr.num_sge = 2;
425 	send->s_wr.next = NULL;
426 	send->s_queued = jiffies;
427 	send->s_op = NULL;
428 
429 	if (length != 0) {
430 		sge = rds_ib_data_sge(ic, send->s_sge);
431 		sge->addr = buffer;
432 		sge->length = length;
433 		sge->lkey = ic->i_mr->lkey;
434 
435 		sge = rds_ib_header_sge(ic, send->s_sge);
436 	} else {
437 		/* We're sending a packet with no payload. There is only
438 		 * one SGE */
439 		send->s_wr.num_sge = 1;
440 		sge = &send->s_sge[0];
441 	}
442 
443 	sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
444 	sge->length = sizeof(struct rds_header);
445 	sge->lkey = ic->i_mr->lkey;
446 }
447 
448 /*
449  * This can be called multiple times for a given message.  The first time
450  * we see a message we map its scatterlist into the IB device so that
451  * we can provide that mapped address to the IB scatter gather entries
452  * in the IB work requests.  We translate the scatterlist into a series
453  * of work requests that fragment the message.  These work requests complete
454  * in order so we pass ownership of the message to the completion handler
455  * once we send the final fragment.
456  *
457  * The RDS core uses the c_send_lock to only enter this function once
458  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
459  * don't get out of sync and confuse the ring.
460  */
461 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
462 		unsigned int hdr_off, unsigned int sg, unsigned int off)
463 {
464 	struct rds_ib_connection *ic = conn->c_transport_data;
465 	struct ib_device *dev = ic->i_cm_id->device;
466 	struct rds_ib_send_work *send = NULL;
467 	struct rds_ib_send_work *first;
468 	struct rds_ib_send_work *prev;
469 	struct ib_send_wr *failed_wr;
470 	struct scatterlist *scat;
471 	u32 pos;
472 	u32 i;
473 	u32 work_alloc;
474 	u32 credit_alloc;
475 	u32 posted;
476 	u32 adv_credits = 0;
477 	int send_flags = 0;
478 	int sent;
479 	int ret;
480 	int flow_controlled = 0;
481 
482 	BUG_ON(off % RDS_FRAG_SIZE);
483 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
484 
485 	/* FIXME we may overallocate here */
486 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
487 		i = 1;
488 	else
489 		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
490 
491 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
492 	if (work_alloc == 0) {
493 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
494 		rds_ib_stats_inc(s_ib_tx_ring_full);
495 		ret = -ENOMEM;
496 		goto out;
497 	}
498 
499 	credit_alloc = work_alloc;
500 	if (ic->i_flowctl) {
501 		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
502 		adv_credits += posted;
503 		if (credit_alloc < work_alloc) {
504 			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
505 			work_alloc = credit_alloc;
506 			flow_controlled++;
507 		}
508 		if (work_alloc == 0) {
509 			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
510 			rds_ib_stats_inc(s_ib_tx_throttle);
511 			ret = -ENOMEM;
512 			goto out;
513 		}
514 	}
515 
516 	/* map the message the first time we see it */
517 	if (ic->i_rm == NULL) {
518 		/*
519 		printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n",
520 				be16_to_cpu(rm->m_inc.i_hdr.h_dport),
521 				rm->m_inc.i_hdr.h_flags,
522 				be32_to_cpu(rm->m_inc.i_hdr.h_len));
523 		   */
524 		if (rm->m_nents) {
525 			rm->m_count = ib_dma_map_sg(dev,
526 					 rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
527 			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
528 			if (rm->m_count == 0) {
529 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
530 				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
531 				ret = -ENOMEM; /* XXX ? */
532 				goto out;
533 			}
534 		} else {
535 			rm->m_count = 0;
536 		}
537 
538 		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
539 		ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
540 		rds_message_addref(rm);
541 		ic->i_rm = rm;
542 
543 		/* Finalize the header */
544 		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
545 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
546 		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
547 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
548 
549 		/* If it has a RDMA op, tell the peer we did it. This is
550 		 * used by the peer to release use-once RDMA MRs. */
551 		if (rm->m_rdma_op) {
552 			struct rds_ext_header_rdma ext_hdr;
553 
554 			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
555 			rds_message_add_extension(&rm->m_inc.i_hdr,
556 					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
557 		}
558 		if (rm->m_rdma_cookie) {
559 			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
560 					rds_rdma_cookie_key(rm->m_rdma_cookie),
561 					rds_rdma_cookie_offset(rm->m_rdma_cookie));
562 		}
563 
564 		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
565 		 * we should not do this unless we have a chance of at least
566 		 * sticking the header into the send ring. Which is why we
567 		 * should call rds_ib_ring_alloc first. */
568 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
569 		rds_message_make_checksum(&rm->m_inc.i_hdr);
570 
571 		/*
572 		 * Update adv_credits since we reset the ACK_REQUIRED bit.
573 		 */
574 		rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
575 		adv_credits += posted;
576 		BUG_ON(adv_credits > 255);
577 	} else if (ic->i_rm != rm)
578 		BUG();
579 
580 	send = &ic->i_sends[pos];
581 	first = send;
582 	prev = NULL;
583 	scat = &rm->m_sg[sg];
584 	sent = 0;
585 	i = 0;
586 
587 	/* Sometimes you want to put a fence between an RDMA
588 	 * READ and the following SEND.
589 	 * We could either do this all the time
590 	 * or when requested by the user. Right now, we let
591 	 * the application choose.
592 	 */
593 	if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
594 		send_flags = IB_SEND_FENCE;
595 
596 	/*
597 	 * We could be copying the header into the unused tail of the page.
598 	 * That would need to be changed in the future when those pages might
599 	 * be mapped userspace pages or page cache pages.  So instead we always
600 	 * use a second sge and our long-lived ring of mapped headers.  We send
601 	 * the header after the data so that the data payload can be aligned on
602 	 * the receiver.
603 	 */
604 
605 	/* handle a 0-len message */
606 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
607 		rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
608 		goto add_header;
609 	}
610 
611 	/* if there's data reference it with a chain of work reqs */
612 	for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
613 		unsigned int len;
614 
615 		send = &ic->i_sends[pos];
616 
617 		len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
618 		rds_ib_xmit_populate_wr(ic, send, pos,
619 				ib_sg_dma_address(dev, scat) + off, len,
620 				send_flags);
621 
622 		/*
623 		 * We want to delay signaling completions just enough to get
624 		 * the batching benefits but not so much that we create dead time
625 		 * on the wire.
626 		 */
627 		if (ic->i_unsignaled_wrs-- == 0) {
628 			ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
629 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
630 		}
631 
632 		ic->i_unsignaled_bytes -= len;
633 		if (ic->i_unsignaled_bytes <= 0) {
634 			ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
635 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
636 		}
637 
638 		/*
639 		 * Always signal the last one if we're stopping due to flow control.
640 		 */
641 		if (flow_controlled && i == (work_alloc-1))
642 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
643 
644 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
645 			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
646 
647 		sent += len;
648 		off += len;
649 		if (off == ib_sg_dma_len(dev, scat)) {
650 			scat++;
651 			off = 0;
652 		}
653 
654 add_header:
655 		/* Tack on the header after the data. The header SGE should already
656 		 * have been set up to point to the right header buffer. */
657 		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
658 
659 		if (0) {
660 			struct rds_header *hdr = &ic->i_send_hdrs[pos];
661 
662 			printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
663 				be16_to_cpu(hdr->h_dport),
664 				hdr->h_flags,
665 				be32_to_cpu(hdr->h_len));
666 		}
667 		if (adv_credits) {
668 			struct rds_header *hdr = &ic->i_send_hdrs[pos];
669 
670 			/* add credit and redo the header checksum */
671 			hdr->h_credit = adv_credits;
672 			rds_message_make_checksum(hdr);
673 			adv_credits = 0;
674 			rds_ib_stats_inc(s_ib_tx_credit_updates);
675 		}
676 
677 		if (prev)
678 			prev->s_wr.next = &send->s_wr;
679 		prev = send;
680 
681 		pos = (pos + 1) % ic->i_send_ring.w_nr;
682 	}
683 
684 	/* Account the RDS header in the number of bytes we sent, but just once.
685 	 * The caller has no concept of fragmentation. */
686 	if (hdr_off == 0)
687 		sent += sizeof(struct rds_header);
688 
689 	/* if we finished the message then send completion owns it */
690 	if (scat == &rm->m_sg[rm->m_count]) {
691 		prev->s_rm = ic->i_rm;
692 		prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
693 		ic->i_rm = NULL;
694 	}
695 
696 	if (i < work_alloc) {
697 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
698 		work_alloc = i;
699 	}
700 	if (ic->i_flowctl && i < credit_alloc)
701 		rds_ib_send_add_credits(conn, credit_alloc - i);
702 
703 	/* XXX need to worry about failed_wr and partial sends. */
704 	failed_wr = &first->s_wr;
705 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
706 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
707 		 first, &first->s_wr, ret, failed_wr);
708 	BUG_ON(failed_wr != &first->s_wr);
709 	if (ret) {
710 		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
711 		       "returned %d\n", &conn->c_faddr, ret);
712 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
713 		if (prev->s_rm) {
714 			ic->i_rm = prev->s_rm;
715 			prev->s_rm = NULL;
716 		}
717 		/* Finesse this later */
718 		BUG();
719 		goto out;
720 	}
721 
722 	ret = sent;
723 out:
724 	BUG_ON(adv_credits);
725 	return ret;
726 }
727 
728 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
729 {
730 	struct rds_ib_connection *ic = conn->c_transport_data;
731 	struct rds_ib_send_work *send = NULL;
732 	struct rds_ib_send_work *first;
733 	struct rds_ib_send_work *prev;
734 	struct ib_send_wr *failed_wr;
735 	struct rds_ib_device *rds_ibdev;
736 	struct scatterlist *scat;
737 	unsigned long len;
738 	u64 remote_addr = op->r_remote_addr;
739 	u32 pos;
740 	u32 work_alloc;
741 	u32 i;
742 	u32 j;
743 	int sent;
744 	int ret;
745 	int num_sge;
746 
747 	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
748 
749 	/* map the message the first time we see it */
750 	if (!op->r_mapped) {
751 		op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
752 					op->r_sg, op->r_nents, (op->r_write) ?
753 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
754 		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
755 		if (op->r_count == 0) {
756 			rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
757 			ret = -ENOMEM; /* XXX ? */
758 			goto out;
759 		}
760 
761 		op->r_mapped = 1;
762 	}
763 
764 	/*
765 	 * Instead of knowing how to return a partial rdma read/write we insist that there
766 	 * be enough work requests to send the entire message.
767 	 */
768 	i = ceil(op->r_count, rds_ibdev->max_sge);
769 
770 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
771 	if (work_alloc != i) {
772 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
773 		rds_ib_stats_inc(s_ib_tx_ring_full);
774 		ret = -ENOMEM;
775 		goto out;
776 	}
777 
778 	send = &ic->i_sends[pos];
779 	first = send;
780 	prev = NULL;
781 	scat = &op->r_sg[0];
782 	sent = 0;
783 	num_sge = op->r_count;
784 
785 	for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
786 		send->s_wr.send_flags = 0;
787 		send->s_queued = jiffies;
788 		/*
789 		 * We want to delay signaling completions just enough to get
790 		 * the batching benefits but not so much that we create dead time on the wire.
791 		 */
792 		if (ic->i_unsignaled_wrs-- == 0) {
793 			ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
794 			send->s_wr.send_flags = IB_SEND_SIGNALED;
795 		}
796 
797 		send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
798 		send->s_wr.wr.rdma.remote_addr = remote_addr;
799 		send->s_wr.wr.rdma.rkey = op->r_key;
800 		send->s_op = op;
801 
802 		if (num_sge > rds_ibdev->max_sge) {
803 			send->s_wr.num_sge = rds_ibdev->max_sge;
804 			num_sge -= rds_ibdev->max_sge;
805 		} else {
806 			send->s_wr.num_sge = num_sge;
807 		}
808 
809 		send->s_wr.next = NULL;
810 
811 		if (prev)
812 			prev->s_wr.next = &send->s_wr;
813 
814 		for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
815 			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
816 			send->s_sge[j].addr =
817 				 ib_sg_dma_address(ic->i_cm_id->device, scat);
818 			send->s_sge[j].length = len;
819 			send->s_sge[j].lkey = ic->i_mr->lkey;
820 
821 			sent += len;
822 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
823 
824 			remote_addr += len;
825 			scat++;
826 		}
827 
828 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
829 			&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
830 
831 		prev = send;
832 		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
833 			send = ic->i_sends;
834 	}
835 
836 	/* if we finished the message then send completion owns it */
837 	if (scat == &op->r_sg[op->r_count])
838 		prev->s_wr.send_flags = IB_SEND_SIGNALED;
839 
840 	if (i < work_alloc) {
841 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
842 		work_alloc = i;
843 	}
844 
845 	failed_wr = &first->s_wr;
846 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
847 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
848 		 first, &first->s_wr, ret, failed_wr);
849 	BUG_ON(failed_wr != &first->s_wr);
850 	if (ret) {
851 		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
852 		       "returned %d\n", &conn->c_faddr, ret);
853 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
854 		goto out;
855 	}
856 
857 	if (unlikely(failed_wr != &first->s_wr)) {
858 		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
859 		BUG_ON(failed_wr != &first->s_wr);
860 	}
861 
862 
863 out:
864 	return ret;
865 }
866 
867 void rds_ib_xmit_complete(struct rds_connection *conn)
868 {
869 	struct rds_ib_connection *ic = conn->c_transport_data;
870 
871 	/* We may have a pending ACK or window update we were unable
872 	 * to send previously (due to flow control). Try again. */
873 	rds_ib_attempt_ack(ic);
874 }
875