xref: /openbmc/linux/net/smc/smc_cdc.c (revision c8ec3743)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  * Connection Data Control (CDC)
6  * handles flow control
7  *
8  * Copyright IBM Corp. 2016
9  *
10  * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
11  */
12 
13 #include <linux/spinlock.h>
14 
15 #include "smc.h"
16 #include "smc_wr.h"
17 #include "smc_cdc.h"
18 #include "smc_tx.h"
19 #include "smc_rx.h"
20 #include "smc_close.h"
21 
22 /********************************** send *************************************/
23 
24 /* handler for send/transmission completion of a CDC msg */
25 static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
26 			       struct smc_link *link,
27 			       enum ib_wc_status wc_status)
28 {
29 	struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
30 	struct smc_connection *conn = cdcpend->conn;
31 	struct smc_sock *smc;
32 	int diff;
33 
34 	if (!conn)
35 		/* already dismissed */
36 		return;
37 
38 	smc = container_of(conn, struct smc_sock, conn);
39 	bh_lock_sock(&smc->sk);
40 	if (!wc_status) {
41 		diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
42 				     &cdcpend->conn->tx_curs_fin,
43 				     &cdcpend->cursor);
44 		/* sndbuf_space is decreased in smc_sendmsg */
45 		smp_mb__before_atomic();
46 		atomic_add(diff, &cdcpend->conn->sndbuf_space);
47 		/* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
48 		smp_mb__after_atomic();
49 		smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
50 	}
51 	smc_tx_sndbuf_nonfull(smc);
52 	bh_unlock_sock(&smc->sk);
53 }
54 
55 int smc_cdc_get_free_slot(struct smc_connection *conn,
56 			  struct smc_wr_buf **wr_buf,
57 			  struct smc_rdma_wr **wr_rdma_buf,
58 			  struct smc_cdc_tx_pend **pend)
59 {
60 	struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
61 	int rc;
62 
63 	rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
64 				     wr_rdma_buf,
65 				     (struct smc_wr_tx_pend_priv **)pend);
66 	if (!conn->alert_token_local)
67 		/* abnormal termination */
68 		rc = -EPIPE;
69 	return rc;
70 }
71 
72 static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
73 					    struct smc_cdc_tx_pend *pend)
74 {
75 	BUILD_BUG_ON_MSG(
76 		sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
77 		"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
78 	BUILD_BUG_ON_MSG(
79 		offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
80 		"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
81 	BUILD_BUG_ON_MSG(
82 		sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
83 		"must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
84 	pend->conn = conn;
85 	pend->cursor = conn->tx_curs_sent;
86 	pend->p_cursor = conn->local_tx_ctrl.prod;
87 	pend->ctrl_seq = conn->tx_cdc_seq;
88 }
89 
90 int smc_cdc_msg_send(struct smc_connection *conn,
91 		     struct smc_wr_buf *wr_buf,
92 		     struct smc_cdc_tx_pend *pend)
93 {
94 	union smc_host_cursor cfed;
95 	struct smc_link *link;
96 	int rc;
97 
98 	link = &conn->lgr->lnk[SMC_SINGLE_LINK];
99 
100 	smc_cdc_add_pending_send(conn, pend);
101 
102 	conn->tx_cdc_seq++;
103 	conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
104 	smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
105 			    &conn->local_tx_ctrl, conn);
106 	smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
107 	rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
108 	if (!rc)
109 		smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
110 
111 	return rc;
112 }
113 
114 static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
115 {
116 	struct smc_cdc_tx_pend *pend;
117 	struct smc_wr_buf *wr_buf;
118 	int rc;
119 
120 	rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
121 	if (rc)
122 		return rc;
123 
124 	spin_lock_bh(&conn->send_lock);
125 	rc = smc_cdc_msg_send(conn, wr_buf, pend);
126 	spin_unlock_bh(&conn->send_lock);
127 	return rc;
128 }
129 
130 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
131 {
132 	int rc;
133 
134 	if (conn->lgr->is_smcd) {
135 		spin_lock_bh(&conn->send_lock);
136 		rc = smcd_cdc_msg_send(conn);
137 		spin_unlock_bh(&conn->send_lock);
138 	} else {
139 		rc = smcr_cdc_get_slot_and_msg_send(conn);
140 	}
141 
142 	return rc;
143 }
144 
145 static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
146 			      unsigned long data)
147 {
148 	struct smc_connection *conn = (struct smc_connection *)data;
149 	struct smc_cdc_tx_pend *cdc_pend =
150 		(struct smc_cdc_tx_pend *)tx_pend;
151 
152 	return cdc_pend->conn == conn;
153 }
154 
155 static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
156 {
157 	struct smc_cdc_tx_pend *cdc_pend =
158 		(struct smc_cdc_tx_pend *)tx_pend;
159 
160 	cdc_pend->conn = NULL;
161 }
162 
163 void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
164 {
165 	struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
166 
167 	smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
168 				smc_cdc_tx_filter, smc_cdc_tx_dismisser,
169 				(unsigned long)conn);
170 }
171 
172 /* Send a SMC-D CDC header.
173  * This increments the free space available in our send buffer.
174  * Also update the confirmed receive buffer with what was sent to the peer.
175  */
176 int smcd_cdc_msg_send(struct smc_connection *conn)
177 {
178 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
179 	union smc_host_cursor curs;
180 	struct smcd_cdc_msg cdc;
181 	int rc, diff;
182 
183 	memset(&cdc, 0, sizeof(cdc));
184 	cdc.common.type = SMC_CDC_MSG_TYPE;
185 	curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
186 	cdc.prod.wrap = curs.wrap;
187 	cdc.prod.count = curs.count;
188 	curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
189 	cdc.cons.wrap = curs.wrap;
190 	cdc.cons.count = curs.count;
191 	cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
192 	cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
193 	rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
194 	if (rc)
195 		return rc;
196 	smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
197 	/* Calculate transmitted data and increment free send buffer space */
198 	diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
199 			     &conn->tx_curs_sent);
200 	/* increased by confirmed number of bytes */
201 	smp_mb__before_atomic();
202 	atomic_add(diff, &conn->sndbuf_space);
203 	/* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
204 	smp_mb__after_atomic();
205 	smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
206 
207 	smc_tx_sndbuf_nonfull(smc);
208 	return rc;
209 }
210 
211 /********************************* receive ***********************************/
212 
213 static inline bool smc_cdc_before(u16 seq1, u16 seq2)
214 {
215 	return (s16)(seq1 - seq2) < 0;
216 }
217 
218 static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
219 					    int *diff_prod)
220 {
221 	struct smc_connection *conn = &smc->conn;
222 	char *base;
223 
224 	/* new data included urgent business */
225 	smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
226 	conn->urg_state = SMC_URG_VALID;
227 	if (!sock_flag(&smc->sk, SOCK_URGINLINE))
228 		/* we'll skip the urgent byte, so don't account for it */
229 		(*diff_prod)--;
230 	base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
231 	if (conn->urg_curs.count)
232 		conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
233 	else
234 		conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
235 	sk_send_sigurg(&smc->sk);
236 }
237 
238 static void smc_cdc_msg_recv_action(struct smc_sock *smc,
239 				    struct smc_cdc_msg *cdc)
240 {
241 	union smc_host_cursor cons_old, prod_old;
242 	struct smc_connection *conn = &smc->conn;
243 	int diff_cons, diff_prod;
244 
245 	smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
246 	smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
247 	smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
248 
249 	diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
250 				  &conn->local_rx_ctrl.cons);
251 	if (diff_cons) {
252 		/* peer_rmbe_space is decreased during data transfer with RDMA
253 		 * write
254 		 */
255 		smp_mb__before_atomic();
256 		atomic_add(diff_cons, &conn->peer_rmbe_space);
257 		/* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
258 		smp_mb__after_atomic();
259 	}
260 
261 	diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
262 				  &conn->local_rx_ctrl.prod);
263 	if (diff_prod) {
264 		if (conn->local_rx_ctrl.prod_flags.urg_data_present)
265 			smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
266 		/* bytes_to_rcv is decreased in smc_recvmsg */
267 		smp_mb__before_atomic();
268 		atomic_add(diff_prod, &conn->bytes_to_rcv);
269 		/* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
270 		smp_mb__after_atomic();
271 		smc->sk.sk_data_ready(&smc->sk);
272 	} else {
273 		if (conn->local_rx_ctrl.prod_flags.write_blocked ||
274 		    conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
275 		    conn->local_rx_ctrl.prod_flags.urg_data_pending) {
276 			if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
277 				conn->urg_state = SMC_URG_NOTYET;
278 			/* force immediate tx of current consumer cursor, but
279 			 * under send_lock to guarantee arrival in seqno-order
280 			 */
281 			if (smc->sk.sk_state != SMC_INIT)
282 				smc_tx_sndbuf_nonempty(conn);
283 		}
284 	}
285 
286 	/* piggy backed tx info */
287 	/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
288 	if (diff_cons && smc_tx_prepared_sends(conn)) {
289 		smc_tx_sndbuf_nonempty(conn);
290 		/* trigger socket release if connection closed */
291 		smc_close_wake_tx_prepared(smc);
292 	}
293 	if (diff_cons && conn->urg_tx_pend &&
294 	    atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
295 		/* urg data confirmed by peer, indicate we're ready for more */
296 		conn->urg_tx_pend = false;
297 		smc->sk.sk_write_space(&smc->sk);
298 	}
299 
300 	if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
301 		smc->sk.sk_err = ECONNRESET;
302 		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
303 	}
304 	if (smc_cdc_rxed_any_close_or_senddone(conn)) {
305 		smc->sk.sk_shutdown |= RCV_SHUTDOWN;
306 		if (smc->clcsock && smc->clcsock->sk)
307 			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
308 		sock_set_flag(&smc->sk, SOCK_DONE);
309 		sock_hold(&smc->sk); /* sock_put in close_work */
310 		if (!schedule_work(&conn->close_work))
311 			sock_put(&smc->sk);
312 	}
313 }
314 
315 /* called under tasklet context */
316 static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
317 {
318 	sock_hold(&smc->sk);
319 	bh_lock_sock(&smc->sk);
320 	smc_cdc_msg_recv_action(smc, cdc);
321 	bh_unlock_sock(&smc->sk);
322 	sock_put(&smc->sk); /* no free sk in softirq-context */
323 }
324 
325 /* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
326  * handler to indicate update in the DMBE.
327  *
328  * Context:
329  * - tasklet context
330  */
331 static void smcd_cdc_rx_tsklet(unsigned long data)
332 {
333 	struct smc_connection *conn = (struct smc_connection *)data;
334 	struct smcd_cdc_msg *data_cdc;
335 	struct smcd_cdc_msg cdc;
336 	struct smc_sock *smc;
337 
338 	if (!conn)
339 		return;
340 
341 	data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
342 	smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
343 	smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
344 	smc = container_of(conn, struct smc_sock, conn);
345 	smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
346 }
347 
348 /* Initialize receive tasklet. Called from ISM device IRQ handler to start
349  * receiver side.
350  */
351 void smcd_cdc_rx_init(struct smc_connection *conn)
352 {
353 	tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
354 }
355 
356 /***************************** init, exit, misc ******************************/
357 
358 static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
359 {
360 	struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
361 	struct smc_cdc_msg *cdc = buf;
362 	struct smc_connection *conn;
363 	struct smc_link_group *lgr;
364 	struct smc_sock *smc;
365 
366 	if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
367 		return; /* short message */
368 	if (cdc->len != SMC_WR_TX_SIZE)
369 		return; /* invalid message */
370 
371 	/* lookup connection */
372 	lgr = smc_get_lgr(link);
373 	read_lock_bh(&lgr->conns_lock);
374 	conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
375 	read_unlock_bh(&lgr->conns_lock);
376 	if (!conn)
377 		return;
378 	smc = container_of(conn, struct smc_sock, conn);
379 
380 	if (!cdc->prod_flags.failover_validation) {
381 		if (smc_cdc_before(ntohs(cdc->seqno),
382 				   conn->local_rx_ctrl.seqno))
383 			/* received seqno is old */
384 			return;
385 	}
386 	smc_cdc_msg_recv(smc, cdc);
387 }
388 
389 static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
390 	{
391 		.handler	= smc_cdc_rx_handler,
392 		.type		= SMC_CDC_MSG_TYPE
393 	},
394 	{
395 		.handler	= NULL,
396 	}
397 };
398 
399 int __init smc_cdc_init(void)
400 {
401 	struct smc_wr_rx_handler *handler;
402 	int rc = 0;
403 
404 	for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
405 		INIT_HLIST_NODE(&handler->list);
406 		rc = smc_wr_rx_register_handler(handler);
407 		if (rc)
408 			break;
409 	}
410 	return rc;
411 }
412