xref: /openbmc/linux/net/rds/ib_send.c (revision 4db6187d)
16a0979dfSAndy Grover /*
29b17f588SKa-Cheong Poon  * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
36a0979dfSAndy Grover  *
46a0979dfSAndy Grover  * This software is available to you under a choice of one of two
56a0979dfSAndy Grover  * licenses.  You may choose to be licensed under the terms of the GNU
66a0979dfSAndy Grover  * General Public License (GPL) Version 2, available from the file
76a0979dfSAndy Grover  * COPYING in the main directory of this source tree, or the
86a0979dfSAndy Grover  * OpenIB.org BSD license below:
96a0979dfSAndy Grover  *
106a0979dfSAndy Grover  *     Redistribution and use in source and binary forms, with or
116a0979dfSAndy Grover  *     without modification, are permitted provided that the following
126a0979dfSAndy Grover  *     conditions are met:
136a0979dfSAndy Grover  *
146a0979dfSAndy Grover  *      - Redistributions of source code must retain the above
156a0979dfSAndy Grover  *        copyright notice, this list of conditions and the following
166a0979dfSAndy Grover  *        disclaimer.
176a0979dfSAndy Grover  *
186a0979dfSAndy Grover  *      - Redistributions in binary form must reproduce the above
196a0979dfSAndy Grover  *        copyright notice, this list of conditions and the following
206a0979dfSAndy Grover  *        disclaimer in the documentation and/or other materials
216a0979dfSAndy Grover  *        provided with the distribution.
226a0979dfSAndy Grover  *
236a0979dfSAndy Grover  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
246a0979dfSAndy Grover  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
256a0979dfSAndy Grover  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
266a0979dfSAndy Grover  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
276a0979dfSAndy Grover  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
286a0979dfSAndy Grover  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
296a0979dfSAndy Grover  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
306a0979dfSAndy Grover  * SOFTWARE.
316a0979dfSAndy Grover  *
326a0979dfSAndy Grover  */
336a0979dfSAndy Grover #include <linux/kernel.h>
346a0979dfSAndy Grover #include <linux/in.h>
356a0979dfSAndy Grover #include <linux/device.h>
366a0979dfSAndy Grover #include <linux/dmapool.h>
37cb0a6056SManuel Zerpies #include <linux/ratelimit.h>
386a0979dfSAndy Grover 
390cb43965SSowmini Varadhan #include "rds_single_path.h"
406a0979dfSAndy Grover #include "rds.h"
416a0979dfSAndy Grover #include "ib.h"
422eafa174SHans Westgaard Ry #include "ib_mr.h"
436a0979dfSAndy Grover 
449c030391SAndy Grover /*
459c030391SAndy Grover  * Convert IB-specific error message to RDS error message and call core
469c030391SAndy Grover  * completion handler.
479c030391SAndy Grover  */
rds_ib_send_complete(struct rds_message * rm,int wc_status,void (* complete)(struct rds_message * rm,int status))489c030391SAndy Grover static void rds_ib_send_complete(struct rds_message *rm,
499c030391SAndy Grover 				 int wc_status,
509c030391SAndy Grover 				 void (*complete)(struct rds_message *rm, int status))
516a0979dfSAndy Grover {
526a0979dfSAndy Grover 	int notify_status;
536a0979dfSAndy Grover 
546a0979dfSAndy Grover 	switch (wc_status) {
556a0979dfSAndy Grover 	case IB_WC_WR_FLUSH_ERR:
566a0979dfSAndy Grover 		return;
576a0979dfSAndy Grover 
586a0979dfSAndy Grover 	case IB_WC_SUCCESS:
596a0979dfSAndy Grover 		notify_status = RDS_RDMA_SUCCESS;
606a0979dfSAndy Grover 		break;
616a0979dfSAndy Grover 
626a0979dfSAndy Grover 	case IB_WC_REM_ACCESS_ERR:
636a0979dfSAndy Grover 		notify_status = RDS_RDMA_REMOTE_ERROR;
646a0979dfSAndy Grover 		break;
656a0979dfSAndy Grover 
666a0979dfSAndy Grover 	default:
676a0979dfSAndy Grover 		notify_status = RDS_RDMA_OTHER_ERROR;
686a0979dfSAndy Grover 		break;
696a0979dfSAndy Grover 	}
709c030391SAndy Grover 	complete(rm, notify_status);
716a0979dfSAndy Grover }
726a0979dfSAndy Grover 
rds_ib_send_unmap_data(struct rds_ib_connection * ic,struct rm_data_op * op,int wc_status)73616d37a0SSantosh Shilimkar static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
74616d37a0SSantosh Shilimkar 				   struct rm_data_op *op,
75616d37a0SSantosh Shilimkar 				   int wc_status)
76616d37a0SSantosh Shilimkar {
77616d37a0SSantosh Shilimkar 	if (op->op_nents)
78616d37a0SSantosh Shilimkar 		ib_dma_unmap_sg(ic->i_cm_id->device,
79616d37a0SSantosh Shilimkar 				op->op_sg, op->op_nents,
80616d37a0SSantosh Shilimkar 				DMA_TO_DEVICE);
81616d37a0SSantosh Shilimkar }
82616d37a0SSantosh Shilimkar 
rds_ib_send_unmap_rdma(struct rds_ib_connection * ic,struct rm_rdma_op * op,int wc_status)83ff3d7d36SAndy Grover static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
84ff3d7d36SAndy Grover 				   struct rm_rdma_op *op,
85ff3d7d36SAndy Grover 				   int wc_status)
86ff3d7d36SAndy Grover {
87f8b3aaf2SAndy Grover 	if (op->op_mapped) {
8815133f6eSAndy Grover 		ib_dma_unmap_sg(ic->i_cm_id->device,
89f8b3aaf2SAndy Grover 				op->op_sg, op->op_nents,
90f8b3aaf2SAndy Grover 				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
91f8b3aaf2SAndy Grover 		op->op_mapped = 0;
9215133f6eSAndy Grover 	}
936a0979dfSAndy Grover 
946a0979dfSAndy Grover 	/* If the user asked for a completion notification on this
956a0979dfSAndy Grover 	 * message, we can implement three different semantics:
966a0979dfSAndy Grover 	 *  1.	Notify when we received the ACK on the RDS message
976a0979dfSAndy Grover 	 *	that was queued with the RDMA. This provides reliable
986a0979dfSAndy Grover 	 *	notification of RDMA status at the expense of a one-way
996a0979dfSAndy Grover 	 *	packet delay.
1006a0979dfSAndy Grover 	 *  2.	Notify when the IB stack gives us the completion event for
1016a0979dfSAndy Grover 	 *	the RDMA operation.
1026a0979dfSAndy Grover 	 *  3.	Notify when the IB stack gives us the completion event for
1036a0979dfSAndy Grover 	 *	the accompanying RDS messages.
1046a0979dfSAndy Grover 	 * Here, we implement approach #3. To implement approach #2,
105ff3d7d36SAndy Grover 	 * we would need to take an event for the rdma WR. To implement #1,
1066a0979dfSAndy Grover 	 * don't call rds_rdma_send_complete at all, and fall back to the notify
1076a0979dfSAndy Grover 	 * handling in the ACK processing code.
1086a0979dfSAndy Grover 	 *
1096a0979dfSAndy Grover 	 * Note: There's no need to explicitly sync any RDMA buffers using
1106a0979dfSAndy Grover 	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
1116a0979dfSAndy Grover 	 * operation itself unmapped the RDMA buffers, which takes care
1126a0979dfSAndy Grover 	 * of synching.
1136a0979dfSAndy Grover 	 */
114ff3d7d36SAndy Grover 	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
115ff3d7d36SAndy Grover 			     wc_status, rds_rdma_send_complete);
1166a0979dfSAndy Grover 
117ff3d7d36SAndy Grover 	if (op->op_write)
118ff3d7d36SAndy Grover 		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
1196a0979dfSAndy Grover 	else
120ff3d7d36SAndy Grover 		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
1216a0979dfSAndy Grover }
1226a0979dfSAndy Grover 
rds_ib_send_unmap_atomic(struct rds_ib_connection * ic,struct rm_atomic_op * op,int wc_status)123ff3d7d36SAndy Grover static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
124ff3d7d36SAndy Grover 				     struct rm_atomic_op *op,
125ff3d7d36SAndy Grover 				     int wc_status)
126ff3d7d36SAndy Grover {
12715133f6eSAndy Grover 	/* unmap atomic recvbuf */
12815133f6eSAndy Grover 	if (op->op_mapped) {
12915133f6eSAndy Grover 		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
13015133f6eSAndy Grover 				DMA_FROM_DEVICE);
13115133f6eSAndy Grover 		op->op_mapped = 0;
13215133f6eSAndy Grover 	}
13315133f6eSAndy Grover 
134ff3d7d36SAndy Grover 	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
135ff3d7d36SAndy Grover 			     wc_status, rds_atomic_send_complete);
13615133f6eSAndy Grover 
137ff3d7d36SAndy Grover 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
13851e2cba8SAndy Grover 		rds_ib_stats_inc(s_ib_atomic_cswp);
13915133f6eSAndy Grover 	else
14051e2cba8SAndy Grover 		rds_ib_stats_inc(s_ib_atomic_fadd);
14115133f6eSAndy Grover }
14215133f6eSAndy Grover 
143ff3d7d36SAndy Grover /*
144ff3d7d36SAndy Grover  * Unmap the resources associated with a struct send_work.
145ff3d7d36SAndy Grover  *
146ff3d7d36SAndy Grover  * Returns the rm for no good reason other than it is unobtainable
147ff3d7d36SAndy Grover  * other than by switching on wr.opcode, currently, and the caller,
148ff3d7d36SAndy Grover  * the event handler, needs it.
149ff3d7d36SAndy Grover  */
rds_ib_send_unmap_op(struct rds_ib_connection * ic,struct rds_ib_send_work * send,int wc_status)150ff3d7d36SAndy Grover static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
151ff3d7d36SAndy Grover 						struct rds_ib_send_work *send,
152ff3d7d36SAndy Grover 						int wc_status)
153ff3d7d36SAndy Grover {
154ff3d7d36SAndy Grover 	struct rds_message *rm = NULL;
1556a0979dfSAndy Grover 
156ff3d7d36SAndy Grover 	/* In the error case, wc.opcode sometimes contains garbage */
157ff3d7d36SAndy Grover 	switch (send->s_wr.opcode) {
158ff3d7d36SAndy Grover 	case IB_WR_SEND:
159ff3d7d36SAndy Grover 		if (send->s_op) {
160ff3d7d36SAndy Grover 			rm = container_of(send->s_op, struct rds_message, data);
161ff3d7d36SAndy Grover 			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
162ff3d7d36SAndy Grover 		}
163ff3d7d36SAndy Grover 		break;
164ff3d7d36SAndy Grover 	case IB_WR_RDMA_WRITE:
165ff3d7d36SAndy Grover 	case IB_WR_RDMA_READ:
166ff3d7d36SAndy Grover 		if (send->s_op) {
167ff3d7d36SAndy Grover 			rm = container_of(send->s_op, struct rds_message, rdma);
168ff3d7d36SAndy Grover 			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
169ff3d7d36SAndy Grover 		}
170ff3d7d36SAndy Grover 		break;
171ff3d7d36SAndy Grover 	case IB_WR_ATOMIC_FETCH_AND_ADD:
172ff3d7d36SAndy Grover 	case IB_WR_ATOMIC_CMP_AND_SWP:
173ff3d7d36SAndy Grover 		if (send->s_op) {
174ff3d7d36SAndy Grover 			rm = container_of(send->s_op, struct rds_message, atomic);
175ff3d7d36SAndy Grover 			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
176ff3d7d36SAndy Grover 		}
177ff3d7d36SAndy Grover 		break;
178ff3d7d36SAndy Grover 	default:
179cb0a6056SManuel Zerpies 		printk_ratelimited(KERN_NOTICE
180ff3d7d36SAndy Grover 			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
181ff3d7d36SAndy Grover 			       __func__, send->s_wr.opcode);
182ff3d7d36SAndy Grover 		break;
183ff3d7d36SAndy Grover 	}
184ff3d7d36SAndy Grover 
185ff3d7d36SAndy Grover 	send->s_wr.opcode = 0xdead;
186ff3d7d36SAndy Grover 
187ff3d7d36SAndy Grover 	return rm;
1886a0979dfSAndy Grover }
1896a0979dfSAndy Grover 
rds_ib_send_init_ring(struct rds_ib_connection * ic)1906a0979dfSAndy Grover void rds_ib_send_init_ring(struct rds_ib_connection *ic)
1916a0979dfSAndy Grover {
1926a0979dfSAndy Grover 	struct rds_ib_send_work *send;
1936a0979dfSAndy Grover 	u32 i;
1946a0979dfSAndy Grover 
1956a0979dfSAndy Grover 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
1966a0979dfSAndy Grover 		struct ib_sge *sge;
1976a0979dfSAndy Grover 
1986a0979dfSAndy Grover 		send->s_op = NULL;
1996a0979dfSAndy Grover 
200dcfd041cSsantosh.shilimkar@oracle.com 		send->s_wr.wr_id = i;
2016a0979dfSAndy Grover 		send->s_wr.sg_list = send->s_sge;
2026a0979dfSAndy Grover 		send->s_wr.ex.imm_data = 0;
2036a0979dfSAndy Grover 
204919ced4cSAndy Grover 		sge = &send->s_sge[0];
2059b17f588SKa-Cheong Poon 		sge->addr = ic->i_send_hdrs_dma[i];
2069b17f588SKa-Cheong Poon 
2076a0979dfSAndy Grover 		sge->length = sizeof(struct rds_header);
208e5580242SJason Gunthorpe 		sge->lkey = ic->i_pd->local_dma_lkey;
209919ced4cSAndy Grover 
210e5580242SJason Gunthorpe 		send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
2116a0979dfSAndy Grover 	}
2126a0979dfSAndy Grover }
2136a0979dfSAndy Grover 
rds_ib_send_clear_ring(struct rds_ib_connection * ic)2146a0979dfSAndy Grover void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
2156a0979dfSAndy Grover {
2166a0979dfSAndy Grover 	struct rds_ib_send_work *send;
2176a0979dfSAndy Grover 	u32 i;
2186a0979dfSAndy Grover 
2196a0979dfSAndy Grover 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
220ff3d7d36SAndy Grover 		if (send->s_op && send->s_wr.opcode != 0xdead)
221ff3d7d36SAndy Grover 			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
2226a0979dfSAndy Grover 	}
2236a0979dfSAndy Grover }
2246a0979dfSAndy Grover 
2256a0979dfSAndy Grover /*
226f046011cSZach Brown  * The only fast path caller always has a non-zero nr, so we don't
227f046011cSZach Brown  * bother testing nr before performing the atomic sub.
228f046011cSZach Brown  */
rds_ib_sub_signaled(struct rds_ib_connection * ic,int nr)229f046011cSZach Brown static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
230f046011cSZach Brown {
231f046011cSZach Brown 	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
232f046011cSZach Brown 	    waitqueue_active(&rds_ib_ring_empty_wait))
233f046011cSZach Brown 		wake_up(&rds_ib_ring_empty_wait);
234f046011cSZach Brown 	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
235f046011cSZach Brown }
236f046011cSZach Brown 
237f046011cSZach Brown /*
2386a0979dfSAndy Grover  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
2396a0979dfSAndy Grover  * operations performed in the send path.  As the sender allocs and potentially
2406a0979dfSAndy Grover  * unallocs the next free entry in the ring it doesn't alter which is
2416a0979dfSAndy Grover  * the next to be freed, which is what this is concerned with.
2426a0979dfSAndy Grover  */
rds_ib_send_cqe_handler(struct rds_ib_connection * ic,struct ib_wc * wc)2430c28c045SSantosh Shilimkar void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
2446a0979dfSAndy Grover {
245ff3d7d36SAndy Grover 	struct rds_message *rm = NULL;
2460c28c045SSantosh Shilimkar 	struct rds_connection *conn = ic->conn;
2476a0979dfSAndy Grover 	struct rds_ib_send_work *send;
2486a0979dfSAndy Grover 	u32 completed;
2496a0979dfSAndy Grover 	u32 oldest;
2506a0979dfSAndy Grover 	u32 i = 0;
251f046011cSZach Brown 	int nr_sig = 0;
2526a0979dfSAndy Grover 
2536a0979dfSAndy Grover 
25459f740a6SZach Brown 	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
2550c28c045SSantosh Shilimkar 		 (unsigned long long)wc->wr_id, wc->status,
2560c28c045SSantosh Shilimkar 		 ib_wc_status_msg(wc->status), wc->byte_len,
2570c28c045SSantosh Shilimkar 		 be32_to_cpu(wc->ex.imm_data));
2586a0979dfSAndy Grover 	rds_ib_stats_inc(s_ib_tx_cq_event);
2596a0979dfSAndy Grover 
2600c28c045SSantosh Shilimkar 	if (wc->wr_id == RDS_IB_ACK_WR_ID) {
26171fd762fSManuel Schölling 		if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
2626a0979dfSAndy Grover 			rds_ib_stats_inc(s_ib_tx_stalled);
2636a0979dfSAndy Grover 		rds_ib_ack_send_complete(ic);
2640c28c045SSantosh Shilimkar 		return;
2656a0979dfSAndy Grover 	}
2666a0979dfSAndy Grover 
2676a0979dfSAndy Grover 	oldest = rds_ib_ring_oldest(&ic->i_send_ring);
2686a0979dfSAndy Grover 
269dcfd041cSsantosh.shilimkar@oracle.com 	completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
2706a0979dfSAndy Grover 
2716a0979dfSAndy Grover 	for (i = 0; i < completed; i++) {
2726a0979dfSAndy Grover 		send = &ic->i_sends[oldest];
273f046011cSZach Brown 		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
274f046011cSZach Brown 			nr_sig++;
2756a0979dfSAndy Grover 
2760c28c045SSantosh Shilimkar 		rm = rds_ib_send_unmap_op(ic, send, wc->status);
2776a0979dfSAndy Grover 
27871fd762fSManuel Schölling 		if (time_after(jiffies, send->s_queued + HZ / 2))
2796a0979dfSAndy Grover 			rds_ib_stats_inc(s_ib_tx_stalled);
2806a0979dfSAndy Grover 
281c9e65383SChris Mason 		if (send->s_op) {
282c9e65383SChris Mason 			if (send->s_op == rm->m_final_op) {
2830c28c045SSantosh Shilimkar 				/* If anyone waited for this message to get
2840c28c045SSantosh Shilimkar 				 * flushed out, wake them up now
2850c28c045SSantosh Shilimkar 				 */
286ff3d7d36SAndy Grover 				rds_message_unmapped(rm);
287c9e65383SChris Mason 			}
288450d06c0SSherman Pun 			rds_message_put(rm);
289ff3d7d36SAndy Grover 			send->s_op = NULL;
2906a0979dfSAndy Grover 		}
2916a0979dfSAndy Grover 
2926a0979dfSAndy Grover 		oldest = (oldest + 1) % ic->i_send_ring.w_nr;
2936a0979dfSAndy Grover 	}
2946a0979dfSAndy Grover 
2956a0979dfSAndy Grover 	rds_ib_ring_free(&ic->i_send_ring, completed);
296f046011cSZach Brown 	rds_ib_sub_signaled(ic, nr_sig);
2976a0979dfSAndy Grover 
298f64f9e71SJoe Perches 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
299f64f9e71SJoe Perches 	    test_bit(0, &conn->c_map_queued))
3006a0979dfSAndy Grover 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
3016a0979dfSAndy Grover 
3026a0979dfSAndy Grover 	/* We expect errors as the qp is drained during shutdown */
3030c28c045SSantosh Shilimkar 	if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
304fab401e1SSudhakar Dindukurti 		rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
305fd261ce6SSantosh Shilimkar 				  &conn->c_laddr, &conn->c_faddr,
306fd261ce6SSantosh Shilimkar 				  conn->c_tos, wc->status,
307fab401e1SSudhakar Dindukurti 				  ib_wc_status_msg(wc->status), wc->vendor_err);
3086a0979dfSAndy Grover 	}
3096a0979dfSAndy Grover }
3106a0979dfSAndy Grover 
3116a0979dfSAndy Grover /*
3126a0979dfSAndy Grover  * This is the main function for allocating credits when sending
3136a0979dfSAndy Grover  * messages.
3146a0979dfSAndy Grover  *
3156a0979dfSAndy Grover  * Conceptually, we have two counters:
3166a0979dfSAndy Grover  *  -	send credits: this tells us how many WRs we're allowed
31725985edcSLucas De Marchi  *	to submit without overruning the receiver's queue. For
3186a0979dfSAndy Grover  *	each SEND WR we post, we decrement this by one.
3196a0979dfSAndy Grover  *
3206a0979dfSAndy Grover  *  -	posted credits: this tells us how many WRs we recently
3216a0979dfSAndy Grover  *	posted to the receive queue. This value is transferred
3226a0979dfSAndy Grover  *	to the peer as a "credit update" in a RDS header field.
3236a0979dfSAndy Grover  *	Every time we transmit credits to the peer, we subtract
3246a0979dfSAndy Grover  *	the amount of transferred credits from this counter.
3256a0979dfSAndy Grover  *
3266a0979dfSAndy Grover  * It is essential that we avoid situations where both sides have
3276a0979dfSAndy Grover  * exhausted their send credits, and are unable to send new credits
3286a0979dfSAndy Grover  * to the peer. We achieve this by requiring that we send at least
3296a0979dfSAndy Grover  * one credit update to the peer before exhausting our credits.
3306a0979dfSAndy Grover  * When new credits arrive, we subtract one credit that is withheld
3316a0979dfSAndy Grover  * until we've posted new buffers and are ready to transmit these
3326a0979dfSAndy Grover  * credits (see rds_ib_send_add_credits below).
3336a0979dfSAndy Grover  *
3346a0979dfSAndy Grover  * The RDS send code is essentially single-threaded; rds_send_xmit
3350f4b1c7eSZach Brown  * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
3366a0979dfSAndy Grover  * However, the ACK sending code is independent and can race with
3376a0979dfSAndy Grover  * message SENDs.
3386a0979dfSAndy Grover  *
3396a0979dfSAndy Grover  * In the send path, we need to update the counters for send credits
3406a0979dfSAndy Grover  * and the counter of posted buffers atomically - when we use the
3416a0979dfSAndy Grover  * last available credit, we cannot allow another thread to race us
3426a0979dfSAndy Grover  * and grab the posted credits counter.  Hence, we have to use a
3436a0979dfSAndy Grover  * spinlock to protect the credit counter, or use atomics.
3446a0979dfSAndy Grover  *
3456a0979dfSAndy Grover  * Spinlocks shared between the send and the receive path are bad,
3466a0979dfSAndy Grover  * because they create unnecessary delays. An early implementation
3476a0979dfSAndy Grover  * using a spinlock showed a 5% degradation in throughput at some
3486a0979dfSAndy Grover  * loads.
3496a0979dfSAndy Grover  *
3506a0979dfSAndy Grover  * This implementation avoids spinlocks completely, putting both
3516a0979dfSAndy Grover  * counters into a single atomic, and updating that atomic using
3526a0979dfSAndy Grover  * atomic_add (in the receive path, when receiving fresh credits),
3536a0979dfSAndy Grover  * and using atomic_cmpxchg when updating the two counters.
3546a0979dfSAndy Grover  */
rds_ib_send_grab_credits(struct rds_ib_connection * ic,u32 wanted,u32 * adv_credits,int need_posted,int max_posted)3556a0979dfSAndy Grover int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
3567b70d033SSteve Wise 			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
3576a0979dfSAndy Grover {
3586a0979dfSAndy Grover 	unsigned int avail, posted, got = 0, advertise;
3596a0979dfSAndy Grover 	long oldval, newval;
3606a0979dfSAndy Grover 
3616a0979dfSAndy Grover 	*adv_credits = 0;
3626a0979dfSAndy Grover 	if (!ic->i_flowctl)
3636a0979dfSAndy Grover 		return wanted;
3646a0979dfSAndy Grover 
3656a0979dfSAndy Grover try_again:
3666a0979dfSAndy Grover 	advertise = 0;
3676a0979dfSAndy Grover 	oldval = newval = atomic_read(&ic->i_credits);
3686a0979dfSAndy Grover 	posted = IB_GET_POST_CREDITS(oldval);
3696a0979dfSAndy Grover 	avail = IB_GET_SEND_CREDITS(oldval);
3706a0979dfSAndy Grover 
37111ac1199SRasmus Villemoes 	rdsdebug("wanted=%u credits=%u posted=%u\n",
3726a0979dfSAndy Grover 			wanted, avail, posted);
3736a0979dfSAndy Grover 
3746a0979dfSAndy Grover 	/* The last credit must be used to send a credit update. */
3756a0979dfSAndy Grover 	if (avail && !posted)
3766a0979dfSAndy Grover 		avail--;
3776a0979dfSAndy Grover 
3786a0979dfSAndy Grover 	if (avail < wanted) {
3796a0979dfSAndy Grover 		struct rds_connection *conn = ic->i_cm_id->context;
3806a0979dfSAndy Grover 
3816a0979dfSAndy Grover 		/* Oops, there aren't that many credits left! */
3826a0979dfSAndy Grover 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
3836a0979dfSAndy Grover 		got = avail;
3846a0979dfSAndy Grover 	} else {
3856a0979dfSAndy Grover 		/* Sometimes you get what you want, lalala. */
3866a0979dfSAndy Grover 		got = wanted;
3876a0979dfSAndy Grover 	}
3886a0979dfSAndy Grover 	newval -= IB_SET_SEND_CREDITS(got);
3896a0979dfSAndy Grover 
3906a0979dfSAndy Grover 	/*
3916a0979dfSAndy Grover 	 * If need_posted is non-zero, then the caller wants
3926a0979dfSAndy Grover 	 * the posted regardless of whether any send credits are
3936a0979dfSAndy Grover 	 * available.
3946a0979dfSAndy Grover 	 */
3956a0979dfSAndy Grover 	if (posted && (got || need_posted)) {
3967b70d033SSteve Wise 		advertise = min_t(unsigned int, posted, max_posted);
3976a0979dfSAndy Grover 		newval -= IB_SET_POST_CREDITS(advertise);
3986a0979dfSAndy Grover 	}
3996a0979dfSAndy Grover 
4006a0979dfSAndy Grover 	/* Finally bill everything */
4016a0979dfSAndy Grover 	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
4026a0979dfSAndy Grover 		goto try_again;
4036a0979dfSAndy Grover 
4046a0979dfSAndy Grover 	*adv_credits = advertise;
4056a0979dfSAndy Grover 	return got;
4066a0979dfSAndy Grover }
4076a0979dfSAndy Grover 
rds_ib_send_add_credits(struct rds_connection * conn,unsigned int credits)4086a0979dfSAndy Grover void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
4096a0979dfSAndy Grover {
4106a0979dfSAndy Grover 	struct rds_ib_connection *ic = conn->c_transport_data;
4116a0979dfSAndy Grover 
4126a0979dfSAndy Grover 	if (credits == 0)
4136a0979dfSAndy Grover 		return;
4146a0979dfSAndy Grover 
41511ac1199SRasmus Villemoes 	rdsdebug("credits=%u current=%u%s\n",
4166a0979dfSAndy Grover 			credits,
4176a0979dfSAndy Grover 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
4186a0979dfSAndy Grover 			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
4196a0979dfSAndy Grover 
4206a0979dfSAndy Grover 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
4216a0979dfSAndy Grover 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
4226a0979dfSAndy Grover 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
4236a0979dfSAndy Grover 
4246a0979dfSAndy Grover 	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
4256a0979dfSAndy Grover 
4266a0979dfSAndy Grover 	rds_ib_stats_inc(s_ib_rx_credit_updates);
4276a0979dfSAndy Grover }
4286a0979dfSAndy Grover 
rds_ib_advertise_credits(struct rds_connection * conn,unsigned int posted)4296a0979dfSAndy Grover void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
4306a0979dfSAndy Grover {
4316a0979dfSAndy Grover 	struct rds_ib_connection *ic = conn->c_transport_data;
4326a0979dfSAndy Grover 
4336a0979dfSAndy Grover 	if (posted == 0)
4346a0979dfSAndy Grover 		return;
4356a0979dfSAndy Grover 
4366a0979dfSAndy Grover 	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
4376a0979dfSAndy Grover 
4386a0979dfSAndy Grover 	/* Decide whether to send an update to the peer now.
4396a0979dfSAndy Grover 	 * If we would send a credit update for every single buffer we
4406a0979dfSAndy Grover 	 * post, we would end up with an ACK storm (ACK arrives,
4416a0979dfSAndy Grover 	 * consumes buffer, we refill the ring, send ACK to remote
4426a0979dfSAndy Grover 	 * advertising the newly posted buffer... ad inf)
4436a0979dfSAndy Grover 	 *
4446a0979dfSAndy Grover 	 * Performance pretty much depends on how often we send
4456a0979dfSAndy Grover 	 * credit updates - too frequent updates mean lots of ACKs.
4466a0979dfSAndy Grover 	 * Too infrequent updates, and the peer will run out of
4476a0979dfSAndy Grover 	 * credits and has to throttle.
4486a0979dfSAndy Grover 	 * For the time being, 16 seems to be a good compromise.
4496a0979dfSAndy Grover 	 */
4506a0979dfSAndy Grover 	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
4516a0979dfSAndy Grover 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
4526a0979dfSAndy Grover }
4536a0979dfSAndy Grover 
rds_ib_set_wr_signal_state(struct rds_ib_connection * ic,struct rds_ib_send_work * send,bool notify)454f046011cSZach Brown static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
455241eef3eSAndy Grover 					     struct rds_ib_send_work *send,
456241eef3eSAndy Grover 					     bool notify)
457241eef3eSAndy Grover {
458241eef3eSAndy Grover 	/*
459241eef3eSAndy Grover 	 * We want to delay signaling completions just enough to get
460241eef3eSAndy Grover 	 * the batching benefits but not so much that we create dead time
461241eef3eSAndy Grover 	 * on the wire.
462241eef3eSAndy Grover 	 */
463241eef3eSAndy Grover 	if (ic->i_unsignaled_wrs-- == 0 || notify) {
464241eef3eSAndy Grover 		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
465241eef3eSAndy Grover 		send->s_wr.send_flags |= IB_SEND_SIGNALED;
466f046011cSZach Brown 		return 1;
467241eef3eSAndy Grover 	}
468f046011cSZach Brown 	return 0;
469241eef3eSAndy Grover }
470241eef3eSAndy Grover 
4716a0979dfSAndy Grover /*
4726a0979dfSAndy Grover  * This can be called multiple times for a given message.  The first time
4736a0979dfSAndy Grover  * we see a message we map its scatterlist into the IB device so that
4746a0979dfSAndy Grover  * we can provide that mapped address to the IB scatter gather entries
4756a0979dfSAndy Grover  * in the IB work requests.  We translate the scatterlist into a series
4766a0979dfSAndy Grover  * of work requests that fragment the message.  These work requests complete
4776a0979dfSAndy Grover  * in order so we pass ownership of the message to the completion handler
4786a0979dfSAndy Grover  * once we send the final fragment.
4796a0979dfSAndy Grover  *
4806a0979dfSAndy Grover  * The RDS core uses the c_send_lock to only enter this function once
4816a0979dfSAndy Grover  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
4826a0979dfSAndy Grover  * don't get out of sync and confuse the ring.
4836a0979dfSAndy Grover  */
rds_ib_xmit(struct rds_connection * conn,struct rds_message * rm,unsigned int hdr_off,unsigned int sg,unsigned int off)4846a0979dfSAndy Grover int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
4856a0979dfSAndy Grover 		unsigned int hdr_off, unsigned int sg, unsigned int off)
4866a0979dfSAndy Grover {
4876a0979dfSAndy Grover 	struct rds_ib_connection *ic = conn->c_transport_data;
4886a0979dfSAndy Grover 	struct ib_device *dev = ic->i_cm_id->device;
4896a0979dfSAndy Grover 	struct rds_ib_send_work *send = NULL;
4906a0979dfSAndy Grover 	struct rds_ib_send_work *first;
4916a0979dfSAndy Grover 	struct rds_ib_send_work *prev;
492d34ac5cdSBart Van Assche 	const struct ib_send_wr *failed_wr;
4936a0979dfSAndy Grover 	struct scatterlist *scat;
4946a0979dfSAndy Grover 	u32 pos;
4956a0979dfSAndy Grover 	u32 i;
4966a0979dfSAndy Grover 	u32 work_alloc;
497da5a06ceSAndy Grover 	u32 credit_alloc = 0;
4986a0979dfSAndy Grover 	u32 posted;
4996a0979dfSAndy Grover 	u32 adv_credits = 0;
5006a0979dfSAndy Grover 	int send_flags = 0;
501da5a06ceSAndy Grover 	int bytes_sent = 0;
5026a0979dfSAndy Grover 	int ret;
5036a0979dfSAndy Grover 	int flow_controlled = 0;
504f046011cSZach Brown 	int nr_sig = 0;
5056a0979dfSAndy Grover 
5066a0979dfSAndy Grover 	BUG_ON(off % RDS_FRAG_SIZE);
5076a0979dfSAndy Grover 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
5086a0979dfSAndy Grover 
5092e7b3b99SAndy Grover 	/* Do not send cong updates to IB loopback */
5102e7b3b99SAndy Grover 	if (conn->c_loopback
5112e7b3b99SAndy Grover 	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
5122e7b3b99SAndy Grover 		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
5136094628bSNeil Horman 		scat = &rm->data.op_sg[sg];
51418fc25c9SVenkat Venkatsubra 		ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
51518fc25c9SVenkat Venkatsubra 		return sizeof(struct rds_header) + ret;
5162e7b3b99SAndy Grover 	}
5172e7b3b99SAndy Grover 
5186a0979dfSAndy Grover 	/* FIXME we may overallocate here */
5196a0979dfSAndy Grover 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
5206a0979dfSAndy Grover 		i = 1;
5216a0979dfSAndy Grover 	else
522eeb2c4fbSJacob Wen 		i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
5236a0979dfSAndy Grover 
5246a0979dfSAndy Grover 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
5256a0979dfSAndy Grover 	if (work_alloc == 0) {
5266a0979dfSAndy Grover 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
5276a0979dfSAndy Grover 		rds_ib_stats_inc(s_ib_tx_ring_full);
5286a0979dfSAndy Grover 		ret = -ENOMEM;
5296a0979dfSAndy Grover 		goto out;
5306a0979dfSAndy Grover 	}
5316a0979dfSAndy Grover 
5326a0979dfSAndy Grover 	if (ic->i_flowctl) {
5337b70d033SSteve Wise 		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
5346a0979dfSAndy Grover 		adv_credits += posted;
5356a0979dfSAndy Grover 		if (credit_alloc < work_alloc) {
5366a0979dfSAndy Grover 			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
5376a0979dfSAndy Grover 			work_alloc = credit_alloc;
538c8de3f10SAndy Grover 			flow_controlled = 1;
5396a0979dfSAndy Grover 		}
5406a0979dfSAndy Grover 		if (work_alloc == 0) {
541d39e0602SSteve Wise 			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
5426a0979dfSAndy Grover 			rds_ib_stats_inc(s_ib_tx_throttle);
5436a0979dfSAndy Grover 			ret = -ENOMEM;
5446a0979dfSAndy Grover 			goto out;
5456a0979dfSAndy Grover 		}
5466a0979dfSAndy Grover 	}
5476a0979dfSAndy Grover 
5486a0979dfSAndy Grover 	/* map the message the first time we see it */
549ff3d7d36SAndy Grover 	if (!ic->i_data_op) {
5506c7cc6e4SAndy Grover 		if (rm->data.op_nents) {
5516c7cc6e4SAndy Grover 			rm->data.op_count = ib_dma_map_sg(dev,
5526c7cc6e4SAndy Grover 							  rm->data.op_sg,
5536c7cc6e4SAndy Grover 							  rm->data.op_nents,
554e779137aSAndy Grover 							  DMA_TO_DEVICE);
5556c7cc6e4SAndy Grover 			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
5566c7cc6e4SAndy Grover 			if (rm->data.op_count == 0) {
5576a0979dfSAndy Grover 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
5586a0979dfSAndy Grover 				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
5596a0979dfSAndy Grover 				ret = -ENOMEM; /* XXX ? */
5606a0979dfSAndy Grover 				goto out;
5616a0979dfSAndy Grover 			}
5626a0979dfSAndy Grover 		} else {
5636c7cc6e4SAndy Grover 			rm->data.op_count = 0;
5646a0979dfSAndy Grover 		}
5656a0979dfSAndy Grover 
5666a0979dfSAndy Grover 		rds_message_addref(rm);
567d655a9fbSWengang Wang 		rm->data.op_dmasg = 0;
568d655a9fbSWengang Wang 		rm->data.op_dmaoff = 0;
569ff3d7d36SAndy Grover 		ic->i_data_op = &rm->data;
5706a0979dfSAndy Grover 
5716a0979dfSAndy Grover 		/* Finalize the header */
5726a0979dfSAndy Grover 		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
5736a0979dfSAndy Grover 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
5746a0979dfSAndy Grover 		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
5756a0979dfSAndy Grover 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
5766a0979dfSAndy Grover 
5776a0979dfSAndy Grover 		/* If it has a RDMA op, tell the peer we did it. This is
5786a0979dfSAndy Grover 		 * used by the peer to release use-once RDMA MRs. */
579f8b3aaf2SAndy Grover 		if (rm->rdma.op_active) {
5806a0979dfSAndy Grover 			struct rds_ext_header_rdma ext_hdr;
5816a0979dfSAndy Grover 
582f8b3aaf2SAndy Grover 			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
5836a0979dfSAndy Grover 			rds_message_add_extension(&rm->m_inc.i_hdr,
5846a0979dfSAndy Grover 					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
5856a0979dfSAndy Grover 		}
5866a0979dfSAndy Grover 		if (rm->m_rdma_cookie) {
5876a0979dfSAndy Grover 			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
5886a0979dfSAndy Grover 					rds_rdma_cookie_key(rm->m_rdma_cookie),
5896a0979dfSAndy Grover 					rds_rdma_cookie_offset(rm->m_rdma_cookie));
5906a0979dfSAndy Grover 		}
5916a0979dfSAndy Grover 
5926a0979dfSAndy Grover 		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
5936a0979dfSAndy Grover 		 * we should not do this unless we have a chance of at least
5946a0979dfSAndy Grover 		 * sticking the header into the send ring. Which is why we
5956a0979dfSAndy Grover 		 * should call rds_ib_ring_alloc first. */
5966a0979dfSAndy Grover 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
5976a0979dfSAndy Grover 		rds_message_make_checksum(&rm->m_inc.i_hdr);
5986a0979dfSAndy Grover 
5996a0979dfSAndy Grover 		/*
6006a0979dfSAndy Grover 		 * Update adv_credits since we reset the ACK_REQUIRED bit.
6016a0979dfSAndy Grover 		 */
602c8de3f10SAndy Grover 		if (ic->i_flowctl) {
6037b70d033SSteve Wise 			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
6046a0979dfSAndy Grover 			adv_credits += posted;
6056a0979dfSAndy Grover 			BUG_ON(adv_credits > 255);
606735f61e6SAndy Grover 		}
607c8de3f10SAndy Grover 	}
6086a0979dfSAndy Grover 
6096a0979dfSAndy Grover 	/* Sometimes you want to put a fence between an RDMA
6106a0979dfSAndy Grover 	 * READ and the following SEND.
6116a0979dfSAndy Grover 	 * We could either do this all the time
6126a0979dfSAndy Grover 	 * or when requested by the user. Right now, we let
6136a0979dfSAndy Grover 	 * the application choose.
6146a0979dfSAndy Grover 	 */
615f8b3aaf2SAndy Grover 	if (rm->rdma.op_active && rm->rdma.op_fence)
6166a0979dfSAndy Grover 		send_flags = IB_SEND_FENCE;
6176a0979dfSAndy Grover 
618da5a06ceSAndy Grover 	/* Each frag gets a header. Msgs may be 0 bytes */
6196a0979dfSAndy Grover 	send = &ic->i_sends[pos];
620da5a06ceSAndy Grover 	first = send;
621da5a06ceSAndy Grover 	prev = NULL;
622d655a9fbSWengang Wang 	scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
623da5a06ceSAndy Grover 	i = 0;
624da5a06ceSAndy Grover 	do {
625da5a06ceSAndy Grover 		unsigned int len = 0;
6266a0979dfSAndy Grover 
627da5a06ceSAndy Grover 		/* Set up the header */
628da5a06ceSAndy Grover 		send->s_wr.send_flags = send_flags;
629da5a06ceSAndy Grover 		send->s_wr.opcode = IB_WR_SEND;
630da5a06ceSAndy Grover 		send->s_wr.num_sge = 1;
631da5a06ceSAndy Grover 		send->s_wr.next = NULL;
632da5a06ceSAndy Grover 		send->s_queued = jiffies;
633da5a06ceSAndy Grover 		send->s_op = NULL;
634da5a06ceSAndy Grover 
6359b17f588SKa-Cheong Poon 		send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
6369b17f588SKa-Cheong Poon 
637da5a06ceSAndy Grover 		send->s_sge[0].length = sizeof(struct rds_header);
6382eafa174SHans Westgaard Ry 		send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
639da5a06ceSAndy Grover 
640*42f2611cSChristoph Hellwig 		ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev,
641*42f2611cSChristoph Hellwig 					   ic->i_send_hdrs_dma[pos],
642*42f2611cSChristoph Hellwig 					   sizeof(struct rds_header),
643*42f2611cSChristoph Hellwig 					   DMA_TO_DEVICE);
6449b17f588SKa-Cheong Poon 		memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
6459b17f588SKa-Cheong Poon 		       sizeof(struct rds_header));
6469b17f588SKa-Cheong Poon 
647da5a06ceSAndy Grover 
648da5a06ceSAndy Grover 		/* Set up the data, if present */
649da5a06ceSAndy Grover 		if (i < work_alloc
6506c7cc6e4SAndy Grover 		    && scat != &rm->data.op_sg[rm->data.op_count]) {
651d655a9fbSWengang Wang 			len = min(RDS_FRAG_SIZE,
652a163afc8SBart Van Assche 				  sg_dma_len(scat) - rm->data.op_dmaoff);
653da5a06ceSAndy Grover 			send->s_wr.num_sge = 2;
654da5a06ceSAndy Grover 
655a163afc8SBart Van Assche 			send->s_sge[1].addr = sg_dma_address(scat);
656d655a9fbSWengang Wang 			send->s_sge[1].addr += rm->data.op_dmaoff;
657da5a06ceSAndy Grover 			send->s_sge[1].length = len;
6582eafa174SHans Westgaard Ry 			send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
659da5a06ceSAndy Grover 
660da5a06ceSAndy Grover 			bytes_sent += len;
661d655a9fbSWengang Wang 			rm->data.op_dmaoff += len;
662a163afc8SBart Van Assche 			if (rm->data.op_dmaoff == sg_dma_len(scat)) {
663da5a06ceSAndy Grover 				scat++;
664d655a9fbSWengang Wang 				rm->data.op_dmasg++;
665d655a9fbSWengang Wang 				rm->data.op_dmaoff = 0;
666da5a06ceSAndy Grover 			}
667da5a06ceSAndy Grover 		}
6686a0979dfSAndy Grover 
669a0c0865fSHåkon Bugge 		rds_ib_set_wr_signal_state(ic, send, false);
6706a0979dfSAndy Grover 
6716a0979dfSAndy Grover 		/*
6726a0979dfSAndy Grover 		 * Always signal the last one if we're stopping due to flow control.
6736a0979dfSAndy Grover 		 */
674a0c0865fSHåkon Bugge 		if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
675a0c0865fSHåkon Bugge 			rds_ib_set_wr_signal_state(ic, send, true);
676a0c0865fSHåkon Bugge 			send->s_wr.send_flags |= IB_SEND_SOLICITED;
677a0c0865fSHåkon Bugge 		}
6786a0979dfSAndy Grover 
679f046011cSZach Brown 		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
680f046011cSZach Brown 			nr_sig++;
681f046011cSZach Brown 
6826a0979dfSAndy Grover 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
6836a0979dfSAndy Grover 			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
6846a0979dfSAndy Grover 
685c8de3f10SAndy Grover 		if (ic->i_flowctl && adv_credits) {
6869b17f588SKa-Cheong Poon 			struct rds_header *hdr = ic->i_send_hdrs[pos];
6876a0979dfSAndy Grover 
6886a0979dfSAndy Grover 			/* add credit and redo the header checksum */
6896a0979dfSAndy Grover 			hdr->h_credit = adv_credits;
6906a0979dfSAndy Grover 			rds_message_make_checksum(hdr);
6916a0979dfSAndy Grover 			adv_credits = 0;
6926a0979dfSAndy Grover 			rds_ib_stats_inc(s_ib_tx_credit_updates);
6936a0979dfSAndy Grover 		}
694*42f2611cSChristoph Hellwig 		ib_dma_sync_single_for_device(ic->rds_ibdev->dev,
695*42f2611cSChristoph Hellwig 					      ic->i_send_hdrs_dma[pos],
696*42f2611cSChristoph Hellwig 					      sizeof(struct rds_header),
697*42f2611cSChristoph Hellwig 					      DMA_TO_DEVICE);
6986a0979dfSAndy Grover 
6996a0979dfSAndy Grover 		if (prev)
7006a0979dfSAndy Grover 			prev->s_wr.next = &send->s_wr;
7016a0979dfSAndy Grover 		prev = send;
7026a0979dfSAndy Grover 
7036a0979dfSAndy Grover 		pos = (pos + 1) % ic->i_send_ring.w_nr;
704da5a06ceSAndy Grover 		send = &ic->i_sends[pos];
705da5a06ceSAndy Grover 		i++;
706da5a06ceSAndy Grover 
707da5a06ceSAndy Grover 	} while (i < work_alloc
7086c7cc6e4SAndy Grover 		 && scat != &rm->data.op_sg[rm->data.op_count]);
7096a0979dfSAndy Grover 
7106a0979dfSAndy Grover 	/* Account the RDS header in the number of bytes we sent, but just once.
7116a0979dfSAndy Grover 	 * The caller has no concept of fragmentation. */
7126a0979dfSAndy Grover 	if (hdr_off == 0)
713da5a06ceSAndy Grover 		bytes_sent += sizeof(struct rds_header);
7146a0979dfSAndy Grover 
7156a0979dfSAndy Grover 	/* if we finished the message then send completion owns it */
7166c7cc6e4SAndy Grover 	if (scat == &rm->data.op_sg[rm->data.op_count]) {
717ff3d7d36SAndy Grover 		prev->s_op = ic->i_data_op;
718241eef3eSAndy Grover 		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
719a0c0865fSHåkon Bugge 		if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
720a0c0865fSHåkon Bugge 			nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
721ff3d7d36SAndy Grover 		ic->i_data_op = NULL;
7226a0979dfSAndy Grover 	}
7236a0979dfSAndy Grover 
724da5a06ceSAndy Grover 	/* Put back wrs & credits we didn't use */
7256a0979dfSAndy Grover 	if (i < work_alloc) {
7266a0979dfSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
7276a0979dfSAndy Grover 		work_alloc = i;
7286a0979dfSAndy Grover 	}
7296a0979dfSAndy Grover 	if (ic->i_flowctl && i < credit_alloc)
7306a0979dfSAndy Grover 		rds_ib_send_add_credits(conn, credit_alloc - i);
7316a0979dfSAndy Grover 
732f046011cSZach Brown 	if (nr_sig)
733f046011cSZach Brown 		atomic_add(nr_sig, &ic->i_signaled_sends);
734f046011cSZach Brown 
7356a0979dfSAndy Grover 	/* XXX need to worry about failed_wr and partial sends. */
7366a0979dfSAndy Grover 	failed_wr = &first->s_wr;
7376a0979dfSAndy Grover 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
7386a0979dfSAndy Grover 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
7396a0979dfSAndy Grover 		 first, &first->s_wr, ret, failed_wr);
7406a0979dfSAndy Grover 	BUG_ON(failed_wr != &first->s_wr);
7416a0979dfSAndy Grover 	if (ret) {
742eee2fa6aSKa-Cheong Poon 		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
7436a0979dfSAndy Grover 		       "returned %d\n", &conn->c_faddr, ret);
7446a0979dfSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
745f046011cSZach Brown 		rds_ib_sub_signaled(ic, nr_sig);
746ff3d7d36SAndy Grover 		if (prev->s_op) {
747ff3d7d36SAndy Grover 			ic->i_data_op = prev->s_op;
748ff3d7d36SAndy Grover 			prev->s_op = NULL;
7496a0979dfSAndy Grover 		}
750735f61e6SAndy Grover 
751735f61e6SAndy Grover 		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
7526a0979dfSAndy Grover 		goto out;
7536a0979dfSAndy Grover 	}
7546a0979dfSAndy Grover 
755da5a06ceSAndy Grover 	ret = bytes_sent;
7566a0979dfSAndy Grover out:
7576a0979dfSAndy Grover 	BUG_ON(adv_credits);
7586a0979dfSAndy Grover 	return ret;
7596a0979dfSAndy Grover }
7606a0979dfSAndy Grover 
76115133f6eSAndy Grover /*
76215133f6eSAndy Grover  * Issue atomic operation.
76315133f6eSAndy Grover  * A simplified version of the rdma case, we always map 1 SG, and
76415133f6eSAndy Grover  * only 8 bytes, for the return value from the atomic operation.
76515133f6eSAndy Grover  */
rds_ib_xmit_atomic(struct rds_connection * conn,struct rm_atomic_op * op)766ff3d7d36SAndy Grover int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
76715133f6eSAndy Grover {
76815133f6eSAndy Grover 	struct rds_ib_connection *ic = conn->c_transport_data;
76915133f6eSAndy Grover 	struct rds_ib_send_work *send = NULL;
770d34ac5cdSBart Van Assche 	const struct ib_send_wr *failed_wr;
77115133f6eSAndy Grover 	u32 pos;
77215133f6eSAndy Grover 	u32 work_alloc;
77315133f6eSAndy Grover 	int ret;
774f046011cSZach Brown 	int nr_sig = 0;
77515133f6eSAndy Grover 
77615133f6eSAndy Grover 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
77715133f6eSAndy Grover 	if (work_alloc != 1) {
77815133f6eSAndy Grover 		rds_ib_stats_inc(s_ib_tx_ring_full);
77915133f6eSAndy Grover 		ret = -ENOMEM;
78015133f6eSAndy Grover 		goto out;
78115133f6eSAndy Grover 	}
78215133f6eSAndy Grover 
78315133f6eSAndy Grover 	/* address of send request in ring */
78415133f6eSAndy Grover 	send = &ic->i_sends[pos];
78515133f6eSAndy Grover 	send->s_queued = jiffies;
78615133f6eSAndy Grover 
78715133f6eSAndy Grover 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
788e622f2f4SChristoph Hellwig 		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
789e622f2f4SChristoph Hellwig 		send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
790e622f2f4SChristoph Hellwig 		send->s_atomic_wr.swap = op->op_m_cswp.swap;
791e622f2f4SChristoph Hellwig 		send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
792e622f2f4SChristoph Hellwig 		send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
79315133f6eSAndy Grover 	} else { /* FADD */
794e622f2f4SChristoph Hellwig 		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
795e622f2f4SChristoph Hellwig 		send->s_atomic_wr.compare_add = op->op_m_fadd.add;
796e622f2f4SChristoph Hellwig 		send->s_atomic_wr.swap = 0;
797e622f2f4SChristoph Hellwig 		send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
798e622f2f4SChristoph Hellwig 		send->s_atomic_wr.swap_mask = 0;
79915133f6eSAndy Grover 	}
800e9a0b998SHåkon Bugge 	send->s_wr.send_flags = 0;
801f046011cSZach Brown 	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
802e622f2f4SChristoph Hellwig 	send->s_atomic_wr.wr.num_sge = 1;
803e622f2f4SChristoph Hellwig 	send->s_atomic_wr.wr.next = NULL;
804e622f2f4SChristoph Hellwig 	send->s_atomic_wr.remote_addr = op->op_remote_addr;
805e622f2f4SChristoph Hellwig 	send->s_atomic_wr.rkey = op->op_rkey;
8061cc2228cSChris Mason 	send->s_op = op;
8071cc2228cSChris Mason 	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
80815133f6eSAndy Grover 
80915133f6eSAndy Grover 	/* map 8 byte retval buffer to the device */
81015133f6eSAndy Grover 	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
81115133f6eSAndy Grover 	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
81215133f6eSAndy Grover 	if (ret != 1) {
81315133f6eSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
81415133f6eSAndy Grover 		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
81515133f6eSAndy Grover 		ret = -ENOMEM; /* XXX ? */
81615133f6eSAndy Grover 		goto out;
81715133f6eSAndy Grover 	}
81815133f6eSAndy Grover 
81915133f6eSAndy Grover 	/* Convert our struct scatterlist to struct ib_sge */
820a163afc8SBart Van Assche 	send->s_sge[0].addr = sg_dma_address(op->op_sg);
821a163afc8SBart Van Assche 	send->s_sge[0].length = sg_dma_len(op->op_sg);
822e5580242SJason Gunthorpe 	send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
82315133f6eSAndy Grover 
82415133f6eSAndy Grover 	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
82515133f6eSAndy Grover 		 send->s_sge[0].addr, send->s_sge[0].length);
82615133f6eSAndy Grover 
827f046011cSZach Brown 	if (nr_sig)
828f046011cSZach Brown 		atomic_add(nr_sig, &ic->i_signaled_sends);
829f046011cSZach Brown 
830e622f2f4SChristoph Hellwig 	failed_wr = &send->s_atomic_wr.wr;
831e622f2f4SChristoph Hellwig 	ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
83215133f6eSAndy Grover 	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
833e622f2f4SChristoph Hellwig 		 send, &send->s_atomic_wr, ret, failed_wr);
834e622f2f4SChristoph Hellwig 	BUG_ON(failed_wr != &send->s_atomic_wr.wr);
83515133f6eSAndy Grover 	if (ret) {
836eee2fa6aSKa-Cheong Poon 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
83715133f6eSAndy Grover 		       "returned %d\n", &conn->c_faddr, ret);
83815133f6eSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
839f046011cSZach Brown 		rds_ib_sub_signaled(ic, nr_sig);
84015133f6eSAndy Grover 		goto out;
84115133f6eSAndy Grover 	}
84215133f6eSAndy Grover 
843e622f2f4SChristoph Hellwig 	if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
84415133f6eSAndy Grover 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
845e622f2f4SChristoph Hellwig 		BUG_ON(failed_wr != &send->s_atomic_wr.wr);
84615133f6eSAndy Grover 	}
84715133f6eSAndy Grover 
84815133f6eSAndy Grover out:
84915133f6eSAndy Grover 	return ret;
85015133f6eSAndy Grover }
85115133f6eSAndy Grover 
rds_ib_xmit_rdma(struct rds_connection * conn,struct rm_rdma_op * op)852f8b3aaf2SAndy Grover int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
8536a0979dfSAndy Grover {
8546a0979dfSAndy Grover 	struct rds_ib_connection *ic = conn->c_transport_data;
8556a0979dfSAndy Grover 	struct rds_ib_send_work *send = NULL;
8566a0979dfSAndy Grover 	struct rds_ib_send_work *first;
8576a0979dfSAndy Grover 	struct rds_ib_send_work *prev;
858d34ac5cdSBart Van Assche 	const struct ib_send_wr *failed_wr;
8596a0979dfSAndy Grover 	struct scatterlist *scat;
8606a0979dfSAndy Grover 	unsigned long len;
861f8b3aaf2SAndy Grover 	u64 remote_addr = op->op_remote_addr;
86289bf9d41SZach Brown 	u32 max_sge = ic->rds_ibdev->max_sge;
8636a0979dfSAndy Grover 	u32 pos;
8646a0979dfSAndy Grover 	u32 work_alloc;
8656a0979dfSAndy Grover 	u32 i;
8666a0979dfSAndy Grover 	u32 j;
8676a0979dfSAndy Grover 	int sent;
8686a0979dfSAndy Grover 	int ret;
8696a0979dfSAndy Grover 	int num_sge;
870f046011cSZach Brown 	int nr_sig = 0;
8712eafa174SHans Westgaard Ry 	u64 odp_addr = op->op_odp_addr;
8722eafa174SHans Westgaard Ry 	u32 odp_lkey = 0;
8736a0979dfSAndy Grover 
874ff3d7d36SAndy Grover 	/* map the op the first time we see it */
8752eafa174SHans Westgaard Ry 	if (!op->op_odp_mr) {
876f8b3aaf2SAndy Grover 		if (!op->op_mapped) {
8772eafa174SHans Westgaard Ry 			op->op_count =
8782eafa174SHans Westgaard Ry 				ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
8792eafa174SHans Westgaard Ry 					      op->op_nents,
8802eafa174SHans Westgaard Ry 					      (op->op_write) ? DMA_TO_DEVICE :
8812eafa174SHans Westgaard Ry 							       DMA_FROM_DEVICE);
8822eafa174SHans Westgaard Ry 			rdsdebug("ic %p mapping op %p: %d\n", ic, op,
8832eafa174SHans Westgaard Ry 				 op->op_count);
884f8b3aaf2SAndy Grover 			if (op->op_count == 0) {
8856a0979dfSAndy Grover 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
8866a0979dfSAndy Grover 				ret = -ENOMEM; /* XXX ? */
8876a0979dfSAndy Grover 				goto out;
8886a0979dfSAndy Grover 			}
889f8b3aaf2SAndy Grover 			op->op_mapped = 1;
8906a0979dfSAndy Grover 		}
8912eafa174SHans Westgaard Ry 	} else {
8922eafa174SHans Westgaard Ry 		op->op_count = op->op_nents;
8932eafa174SHans Westgaard Ry 		odp_lkey = rds_ib_get_lkey(op->op_odp_mr->r_trans_private);
8942eafa174SHans Westgaard Ry 	}
8956a0979dfSAndy Grover 
8966a0979dfSAndy Grover 	/*
8976a0979dfSAndy Grover 	 * Instead of knowing how to return a partial rdma read/write we insist that there
8986a0979dfSAndy Grover 	 * be enough work requests to send the entire message.
8996a0979dfSAndy Grover 	 */
900eeb2c4fbSJacob Wen 	i = DIV_ROUND_UP(op->op_count, max_sge);
9016a0979dfSAndy Grover 
9026a0979dfSAndy Grover 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
9036a0979dfSAndy Grover 	if (work_alloc != i) {
9046a0979dfSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
9056a0979dfSAndy Grover 		rds_ib_stats_inc(s_ib_tx_ring_full);
9066a0979dfSAndy Grover 		ret = -ENOMEM;
9076a0979dfSAndy Grover 		goto out;
9086a0979dfSAndy Grover 	}
9096a0979dfSAndy Grover 
9106a0979dfSAndy Grover 	send = &ic->i_sends[pos];
9116a0979dfSAndy Grover 	first = send;
9126a0979dfSAndy Grover 	prev = NULL;
913f8b3aaf2SAndy Grover 	scat = &op->op_sg[0];
9146a0979dfSAndy Grover 	sent = 0;
915f8b3aaf2SAndy Grover 	num_sge = op->op_count;
9166a0979dfSAndy Grover 
917f8b3aaf2SAndy Grover 	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
9186a0979dfSAndy Grover 		send->s_wr.send_flags = 0;
9196a0979dfSAndy Grover 		send->s_queued = jiffies;
9201cc2228cSChris Mason 		send->s_op = NULL;
921241eef3eSAndy Grover 
922616d37a0SSantosh Shilimkar 		if (!op->op_notify)
923616d37a0SSantosh Shilimkar 			nr_sig += rds_ib_set_wr_signal_state(ic, send,
924616d37a0SSantosh Shilimkar 							     op->op_notify);
9256a0979dfSAndy Grover 
926f8b3aaf2SAndy Grover 		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
927e622f2f4SChristoph Hellwig 		send->s_rdma_wr.remote_addr = remote_addr;
928e622f2f4SChristoph Hellwig 		send->s_rdma_wr.rkey = op->op_rkey;
9296a0979dfSAndy Grover 
93089bf9d41SZach Brown 		if (num_sge > max_sge) {
931e622f2f4SChristoph Hellwig 			send->s_rdma_wr.wr.num_sge = max_sge;
93289bf9d41SZach Brown 			num_sge -= max_sge;
9336a0979dfSAndy Grover 		} else {
934e622f2f4SChristoph Hellwig 			send->s_rdma_wr.wr.num_sge = num_sge;
9356a0979dfSAndy Grover 		}
9366a0979dfSAndy Grover 
937e622f2f4SChristoph Hellwig 		send->s_rdma_wr.wr.next = NULL;
9386a0979dfSAndy Grover 
9396a0979dfSAndy Grover 		if (prev)
940e622f2f4SChristoph Hellwig 			prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
9416a0979dfSAndy Grover 
942e622f2f4SChristoph Hellwig 		for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
943e622f2f4SChristoph Hellwig 		     scat != &op->op_sg[op->op_count]; j++) {
944a163afc8SBart Van Assche 			len = sg_dma_len(scat);
9452eafa174SHans Westgaard Ry 			if (!op->op_odp_mr) {
946a163afc8SBart Van Assche 				send->s_sge[j].addr = sg_dma_address(scat);
947e5580242SJason Gunthorpe 				send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
9482eafa174SHans Westgaard Ry 			} else {
9492eafa174SHans Westgaard Ry 				send->s_sge[j].addr = odp_addr;
9502eafa174SHans Westgaard Ry 				send->s_sge[j].lkey = odp_lkey;
9512eafa174SHans Westgaard Ry 			}
9522eafa174SHans Westgaard Ry 			send->s_sge[j].length = len;
9536a0979dfSAndy Grover 
9546a0979dfSAndy Grover 			sent += len;
9556a0979dfSAndy Grover 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
9566a0979dfSAndy Grover 
9576a0979dfSAndy Grover 			remote_addr += len;
9582eafa174SHans Westgaard Ry 			odp_addr += len;
9596a0979dfSAndy Grover 			scat++;
9606a0979dfSAndy Grover 		}
9616a0979dfSAndy Grover 
9626a0979dfSAndy Grover 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
963e622f2f4SChristoph Hellwig 			&send->s_rdma_wr.wr,
964e622f2f4SChristoph Hellwig 			send->s_rdma_wr.wr.num_sge,
965e622f2f4SChristoph Hellwig 			send->s_rdma_wr.wr.next);
9666a0979dfSAndy Grover 
9676a0979dfSAndy Grover 		prev = send;
9686a0979dfSAndy Grover 		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
9696a0979dfSAndy Grover 			send = ic->i_sends;
9706a0979dfSAndy Grover 	}
9716a0979dfSAndy Grover 
9721cc2228cSChris Mason 	/* give a reference to the last op */
9731cc2228cSChris Mason 	if (scat == &op->op_sg[op->op_count]) {
9741cc2228cSChris Mason 		prev->s_op = op;
9751cc2228cSChris Mason 		rds_message_addref(container_of(op, struct rds_message, rdma));
9761cc2228cSChris Mason 	}
9771cc2228cSChris Mason 
9786a0979dfSAndy Grover 	if (i < work_alloc) {
9796a0979dfSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
9806a0979dfSAndy Grover 		work_alloc = i;
9816a0979dfSAndy Grover 	}
9826a0979dfSAndy Grover 
983f046011cSZach Brown 	if (nr_sig)
984f046011cSZach Brown 		atomic_add(nr_sig, &ic->i_signaled_sends);
985f046011cSZach Brown 
986e622f2f4SChristoph Hellwig 	failed_wr = &first->s_rdma_wr.wr;
987e622f2f4SChristoph Hellwig 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
9886a0979dfSAndy Grover 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
989e622f2f4SChristoph Hellwig 		 first, &first->s_rdma_wr.wr, ret, failed_wr);
990e622f2f4SChristoph Hellwig 	BUG_ON(failed_wr != &first->s_rdma_wr.wr);
9916a0979dfSAndy Grover 	if (ret) {
992eee2fa6aSKa-Cheong Poon 		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
9936a0979dfSAndy Grover 		       "returned %d\n", &conn->c_faddr, ret);
9946a0979dfSAndy Grover 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
995f046011cSZach Brown 		rds_ib_sub_signaled(ic, nr_sig);
9966a0979dfSAndy Grover 		goto out;
9976a0979dfSAndy Grover 	}
9986a0979dfSAndy Grover 
999e622f2f4SChristoph Hellwig 	if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
10006a0979dfSAndy Grover 		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
1001e622f2f4SChristoph Hellwig 		BUG_ON(failed_wr != &first->s_rdma_wr.wr);
10026a0979dfSAndy Grover 	}
10036a0979dfSAndy Grover 
10046a0979dfSAndy Grover 
10056a0979dfSAndy Grover out:
10066a0979dfSAndy Grover 	return ret;
10076a0979dfSAndy Grover }
10086a0979dfSAndy Grover 
rds_ib_xmit_path_complete(struct rds_conn_path * cp)1009226f7a7dSSowmini Varadhan void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
10106a0979dfSAndy Grover {
1011226f7a7dSSowmini Varadhan 	struct rds_connection *conn = cp->cp_conn;
10126a0979dfSAndy Grover 	struct rds_ib_connection *ic = conn->c_transport_data;
10136a0979dfSAndy Grover 
10146a0979dfSAndy Grover 	/* We may have a pending ACK or window update we were unable
10156a0979dfSAndy Grover 	 * to send previously (due to flow control). Try again. */
10166a0979dfSAndy Grover 	rds_ib_attempt_ack(ic);
10176a0979dfSAndy Grover }
1018