xref: /openbmc/linux/net/smc/smc_core.c (revision 4a948591)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Basic Transport Functions exploiting Infiniband API
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
10  */
11 
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <linux/mutex.h>
19 #include <linux/list.h>
20 #include <linux/smc.h>
21 #include <net/tcp.h>
22 #include <net/sock.h>
23 #include <rdma/ib_verbs.h>
24 #include <rdma/ib_cache.h>
25 
26 #include "smc.h"
27 #include "smc_clc.h"
28 #include "smc_core.h"
29 #include "smc_ib.h"
30 #include "smc_wr.h"
31 #include "smc_llc.h"
32 #include "smc_cdc.h"
33 #include "smc_close.h"
34 #include "smc_ism.h"
35 #include "smc_netlink.h"
36 #include "smc_stats.h"
37 #include "smc_tracepoint.h"
38 
39 #define SMC_LGR_NUM_INCR		256
40 #define SMC_LGR_FREE_DELAY_SERV		(600 * HZ)
41 #define SMC_LGR_FREE_DELAY_CLNT		(SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
42 
43 struct smc_lgr_list smc_lgr_list = {	/* established link groups */
44 	.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
45 	.list = LIST_HEAD_INIT(smc_lgr_list.list),
46 	.num = 0,
47 };
48 
49 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
50 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
51 
52 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
53 			 struct smc_buf_desc *buf_desc);
54 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
55 
56 static void smc_link_down_work(struct work_struct *work);
57 
58 /* return head of link group list and its lock for a given link group */
59 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
60 						  spinlock_t **lgr_lock)
61 {
62 	if (lgr->is_smcd) {
63 		*lgr_lock = &lgr->smcd->lgr_lock;
64 		return &lgr->smcd->lgr_list;
65 	}
66 
67 	*lgr_lock = &smc_lgr_list.lock;
68 	return &smc_lgr_list.list;
69 }
70 
71 static void smc_ibdev_cnt_inc(struct smc_link *lnk)
72 {
73 	atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
74 }
75 
76 static void smc_ibdev_cnt_dec(struct smc_link *lnk)
77 {
78 	atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
79 }
80 
81 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
82 {
83 	/* client link group creation always follows the server link group
84 	 * creation. For client use a somewhat higher removal delay time,
85 	 * otherwise there is a risk of out-of-sync link groups.
86 	 */
87 	if (!lgr->freeing) {
88 		mod_delayed_work(system_wq, &lgr->free_work,
89 				 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
90 						SMC_LGR_FREE_DELAY_CLNT :
91 						SMC_LGR_FREE_DELAY_SERV);
92 	}
93 }
94 
95 /* Register connection's alert token in our lookup structure.
96  * To use rbtrees we have to implement our own insert core.
97  * Requires @conns_lock
98  * @smc		connection to register
99  * Returns 0 on success, != otherwise.
100  */
101 static void smc_lgr_add_alert_token(struct smc_connection *conn)
102 {
103 	struct rb_node **link, *parent = NULL;
104 	u32 token = conn->alert_token_local;
105 
106 	link = &conn->lgr->conns_all.rb_node;
107 	while (*link) {
108 		struct smc_connection *cur = rb_entry(*link,
109 					struct smc_connection, alert_node);
110 
111 		parent = *link;
112 		if (cur->alert_token_local > token)
113 			link = &parent->rb_left;
114 		else
115 			link = &parent->rb_right;
116 	}
117 	/* Put the new node there */
118 	rb_link_node(&conn->alert_node, parent, link);
119 	rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
120 }
121 
122 /* assign an SMC-R link to the connection */
123 static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
124 {
125 	enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
126 				       SMC_LNK_ACTIVE;
127 	int i, j;
128 
129 	/* do link balancing */
130 	conn->lnk = NULL;	/* reset conn->lnk first */
131 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
132 		struct smc_link *lnk = &conn->lgr->lnk[i];
133 
134 		if (lnk->state != expected || lnk->link_is_asym)
135 			continue;
136 		if (conn->lgr->role == SMC_CLNT) {
137 			conn->lnk = lnk; /* temporary, SMC server assigns link*/
138 			break;
139 		}
140 		if (conn->lgr->conns_num % 2) {
141 			for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
142 				struct smc_link *lnk2;
143 
144 				lnk2 = &conn->lgr->lnk[j];
145 				if (lnk2->state == expected &&
146 				    !lnk2->link_is_asym) {
147 					conn->lnk = lnk2;
148 					break;
149 				}
150 			}
151 		}
152 		if (!conn->lnk)
153 			conn->lnk = lnk;
154 		break;
155 	}
156 	if (!conn->lnk)
157 		return SMC_CLC_DECL_NOACTLINK;
158 	atomic_inc(&conn->lnk->conn_cnt);
159 	return 0;
160 }
161 
162 /* Register connection in link group by assigning an alert token
163  * registered in a search tree.
164  * Requires @conns_lock
165  * Note that '0' is a reserved value and not assigned.
166  */
167 static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
168 {
169 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
170 	static atomic_t nexttoken = ATOMIC_INIT(0);
171 	int rc;
172 
173 	if (!conn->lgr->is_smcd) {
174 		rc = smcr_lgr_conn_assign_link(conn, first);
175 		if (rc) {
176 			conn->lgr = NULL;
177 			return rc;
178 		}
179 	}
180 	/* find a new alert_token_local value not yet used by some connection
181 	 * in this link group
182 	 */
183 	sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
184 	while (!conn->alert_token_local) {
185 		conn->alert_token_local = atomic_inc_return(&nexttoken);
186 		if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
187 			conn->alert_token_local = 0;
188 	}
189 	smc_lgr_add_alert_token(conn);
190 	conn->lgr->conns_num++;
191 	return 0;
192 }
193 
194 /* Unregister connection and reset the alert token of the given connection<
195  */
196 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
197 {
198 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
199 	struct smc_link_group *lgr = conn->lgr;
200 
201 	rb_erase(&conn->alert_node, &lgr->conns_all);
202 	if (conn->lnk)
203 		atomic_dec(&conn->lnk->conn_cnt);
204 	lgr->conns_num--;
205 	conn->alert_token_local = 0;
206 	sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
207 }
208 
209 /* Unregister connection from lgr
210  */
211 static void smc_lgr_unregister_conn(struct smc_connection *conn)
212 {
213 	struct smc_link_group *lgr = conn->lgr;
214 
215 	if (!smc_conn_lgr_valid(conn))
216 		return;
217 	write_lock_bh(&lgr->conns_lock);
218 	if (conn->alert_token_local) {
219 		__smc_lgr_unregister_conn(conn);
220 	}
221 	write_unlock_bh(&lgr->conns_lock);
222 }
223 
224 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
225 {
226 	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
227 	char hostname[SMC_MAX_HOSTNAME_LEN + 1];
228 	char smc_seid[SMC_MAX_EID_LEN + 1];
229 	struct nlattr *attrs;
230 	u8 *seid = NULL;
231 	u8 *host = NULL;
232 	void *nlh;
233 
234 	nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
235 			  &smc_gen_nl_family, NLM_F_MULTI,
236 			  SMC_NETLINK_GET_SYS_INFO);
237 	if (!nlh)
238 		goto errmsg;
239 	if (cb_ctx->pos[0])
240 		goto errout;
241 	attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
242 	if (!attrs)
243 		goto errout;
244 	if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
245 		goto errattr;
246 	if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
247 		goto errattr;
248 	if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
249 		goto errattr;
250 	if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true))
251 		goto errattr;
252 	smc_clc_get_hostname(&host);
253 	if (host) {
254 		memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
255 		hostname[SMC_MAX_HOSTNAME_LEN] = 0;
256 		if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
257 			goto errattr;
258 	}
259 	if (smc_ism_is_v2_capable()) {
260 		smc_ism_get_system_eid(&seid);
261 		memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
262 		smc_seid[SMC_MAX_EID_LEN] = 0;
263 		if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
264 			goto errattr;
265 	}
266 	nla_nest_end(skb, attrs);
267 	genlmsg_end(skb, nlh);
268 	cb_ctx->pos[0] = 1;
269 	return skb->len;
270 
271 errattr:
272 	nla_nest_cancel(skb, attrs);
273 errout:
274 	genlmsg_cancel(skb, nlh);
275 errmsg:
276 	return skb->len;
277 }
278 
279 /* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */
280 static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr,
281 				     struct sk_buff *skb,
282 				     struct netlink_callback *cb,
283 				     struct nlattr *v2_attrs)
284 {
285 	char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
286 	char smc_eid[SMC_MAX_EID_LEN + 1];
287 
288 	if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
289 		goto errv2attr;
290 	if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
291 		goto errv2attr;
292 	if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
293 		goto errv2attr;
294 	memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
295 	smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
296 	if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
297 		goto errv2attr;
298 	memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
299 	smc_eid[SMC_MAX_EID_LEN] = 0;
300 	if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
301 		goto errv2attr;
302 
303 	nla_nest_end(skb, v2_attrs);
304 	return 0;
305 
306 errv2attr:
307 	nla_nest_cancel(skb, v2_attrs);
308 	return -EMSGSIZE;
309 }
310 
311 static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr,
312 				   struct sk_buff *skb,
313 				   struct netlink_callback *cb)
314 {
315 	struct nlattr *v2_attrs;
316 
317 	v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2);
318 	if (!v2_attrs)
319 		goto errattr;
320 	if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway))
321 		goto errv2attr;
322 	if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_CONNS, lgr->max_conns))
323 		goto errv2attr;
324 	if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_LINKS, lgr->max_links))
325 		goto errv2attr;
326 
327 	nla_nest_end(skb, v2_attrs);
328 	return 0;
329 
330 errv2attr:
331 	nla_nest_cancel(skb, v2_attrs);
332 errattr:
333 	return -EMSGSIZE;
334 }
335 
336 static int smc_nl_fill_lgr(struct smc_link_group *lgr,
337 			   struct sk_buff *skb,
338 			   struct netlink_callback *cb)
339 {
340 	char smc_target[SMC_MAX_PNETID_LEN + 1];
341 	struct nlattr *attrs, *v2_attrs;
342 
343 	attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
344 	if (!attrs)
345 		goto errout;
346 
347 	if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
348 		goto errattr;
349 	if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
350 		goto errattr;
351 	if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
352 		goto errattr;
353 	if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
354 		goto errattr;
355 	if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type))
356 		goto errattr;
357 	if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
358 		goto errattr;
359 	if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE,
360 			      lgr->net->net_cookie, SMC_NLA_LGR_R_PAD))
361 		goto errattr;
362 	memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
363 	smc_target[SMC_MAX_PNETID_LEN] = 0;
364 	if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
365 		goto errattr;
366 	if (lgr->smc_version > SMC_V1) {
367 		v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON);
368 		if (!v2_attrs)
369 			goto errattr;
370 		if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
371 			goto errattr;
372 		if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb))
373 			goto errattr;
374 	}
375 
376 	nla_nest_end(skb, attrs);
377 	return 0;
378 errattr:
379 	nla_nest_cancel(skb, attrs);
380 errout:
381 	return -EMSGSIZE;
382 }
383 
384 static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
385 				struct smc_link *link,
386 				struct sk_buff *skb,
387 				struct netlink_callback *cb)
388 {
389 	char smc_ibname[IB_DEVICE_NAME_MAX];
390 	u8 smc_gid_target[41];
391 	struct nlattr *attrs;
392 	u32 link_uid = 0;
393 	void *nlh;
394 
395 	nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
396 			  &smc_gen_nl_family, NLM_F_MULTI,
397 			  SMC_NETLINK_GET_LINK_SMCR);
398 	if (!nlh)
399 		goto errmsg;
400 
401 	attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
402 	if (!attrs)
403 		goto errout;
404 
405 	if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
406 		goto errattr;
407 	if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
408 		goto errattr;
409 	if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
410 			atomic_read(&link->conn_cnt)))
411 		goto errattr;
412 	if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
413 		goto errattr;
414 	if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
415 		goto errattr;
416 	snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
417 	if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
418 		goto errattr;
419 	memcpy(&link_uid, link->link_uid, sizeof(link_uid));
420 	if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
421 		goto errattr;
422 	memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
423 	if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
424 		goto errattr;
425 	memset(smc_gid_target, 0, sizeof(smc_gid_target));
426 	smc_gid_be16_convert(smc_gid_target, link->gid);
427 	if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
428 		goto errattr;
429 	memset(smc_gid_target, 0, sizeof(smc_gid_target));
430 	smc_gid_be16_convert(smc_gid_target, link->peer_gid);
431 	if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
432 		goto errattr;
433 
434 	nla_nest_end(skb, attrs);
435 	genlmsg_end(skb, nlh);
436 	return 0;
437 errattr:
438 	nla_nest_cancel(skb, attrs);
439 errout:
440 	genlmsg_cancel(skb, nlh);
441 errmsg:
442 	return -EMSGSIZE;
443 }
444 
445 static int smc_nl_handle_lgr(struct smc_link_group *lgr,
446 			     struct sk_buff *skb,
447 			     struct netlink_callback *cb,
448 			     bool list_links)
449 {
450 	void *nlh;
451 	int i;
452 
453 	nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
454 			  &smc_gen_nl_family, NLM_F_MULTI,
455 			  SMC_NETLINK_GET_LGR_SMCR);
456 	if (!nlh)
457 		goto errmsg;
458 	if (smc_nl_fill_lgr(lgr, skb, cb))
459 		goto errout;
460 
461 	genlmsg_end(skb, nlh);
462 	if (!list_links)
463 		goto out;
464 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
465 		if (!smc_link_usable(&lgr->lnk[i]))
466 			continue;
467 		if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
468 			goto errout;
469 	}
470 out:
471 	return 0;
472 
473 errout:
474 	genlmsg_cancel(skb, nlh);
475 errmsg:
476 	return -EMSGSIZE;
477 }
478 
479 static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
480 				 struct sk_buff *skb,
481 				 struct netlink_callback *cb,
482 				 bool list_links)
483 {
484 	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
485 	struct smc_link_group *lgr;
486 	int snum = cb_ctx->pos[0];
487 	int num = 0;
488 
489 	spin_lock_bh(&smc_lgr->lock);
490 	list_for_each_entry(lgr, &smc_lgr->list, list) {
491 		if (num < snum)
492 			goto next;
493 		if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
494 			goto errout;
495 next:
496 		num++;
497 	}
498 errout:
499 	spin_unlock_bh(&smc_lgr->lock);
500 	cb_ctx->pos[0] = num;
501 }
502 
503 static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
504 				struct sk_buff *skb,
505 				struct netlink_callback *cb)
506 {
507 	char smc_pnet[SMC_MAX_PNETID_LEN + 1];
508 	struct smcd_dev *smcd = lgr->smcd;
509 	struct smcd_gid smcd_gid;
510 	struct nlattr *attrs;
511 	void *nlh;
512 
513 	nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
514 			  &smc_gen_nl_family, NLM_F_MULTI,
515 			  SMC_NETLINK_GET_LGR_SMCD);
516 	if (!nlh)
517 		goto errmsg;
518 
519 	attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
520 	if (!attrs)
521 		goto errout;
522 
523 	if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
524 		goto errattr;
525 	smcd->ops->get_local_gid(smcd, &smcd_gid);
526 	if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID,
527 			      smcd_gid.gid, SMC_NLA_LGR_D_PAD))
528 		goto errattr;
529 	if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid.gid,
530 			      SMC_NLA_LGR_D_PAD))
531 		goto errattr;
532 	if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
533 		goto errattr;
534 	if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
535 		goto errattr;
536 	if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
537 		goto errattr;
538 	memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
539 	smc_pnet[SMC_MAX_PNETID_LEN] = 0;
540 	if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
541 		goto errattr;
542 	if (lgr->smc_version > SMC_V1) {
543 		struct nlattr *v2_attrs;
544 
545 		v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON);
546 		if (!v2_attrs)
547 			goto errattr;
548 		if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
549 			goto errattr;
550 	}
551 	nla_nest_end(skb, attrs);
552 	genlmsg_end(skb, nlh);
553 	return 0;
554 
555 errattr:
556 	nla_nest_cancel(skb, attrs);
557 errout:
558 	genlmsg_cancel(skb, nlh);
559 errmsg:
560 	return -EMSGSIZE;
561 }
562 
563 static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
564 				  struct sk_buff *skb,
565 				  struct netlink_callback *cb)
566 {
567 	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
568 	struct smc_link_group *lgr;
569 	int snum = cb_ctx->pos[1];
570 	int rc = 0, num = 0;
571 
572 	spin_lock_bh(&dev->lgr_lock);
573 	list_for_each_entry(lgr, &dev->lgr_list, list) {
574 		if (!lgr->is_smcd)
575 			continue;
576 		if (num < snum)
577 			goto next;
578 		rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
579 		if (rc)
580 			goto errout;
581 next:
582 		num++;
583 	}
584 errout:
585 	spin_unlock_bh(&dev->lgr_lock);
586 	cb_ctx->pos[1] = num;
587 	return rc;
588 }
589 
590 static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
591 				struct sk_buff *skb,
592 				struct netlink_callback *cb)
593 {
594 	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
595 	struct smcd_dev *smcd_dev;
596 	int snum = cb_ctx->pos[0];
597 	int rc = 0, num = 0;
598 
599 	mutex_lock(&dev_list->mutex);
600 	list_for_each_entry(smcd_dev, &dev_list->list, list) {
601 		if (list_empty(&smcd_dev->lgr_list))
602 			continue;
603 		if (num < snum)
604 			goto next;
605 		rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
606 		if (rc)
607 			goto errout;
608 next:
609 		num++;
610 	}
611 errout:
612 	mutex_unlock(&dev_list->mutex);
613 	cb_ctx->pos[0] = num;
614 	return rc;
615 }
616 
617 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
618 {
619 	bool list_links = false;
620 
621 	smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
622 	return skb->len;
623 }
624 
625 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
626 {
627 	bool list_links = true;
628 
629 	smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
630 	return skb->len;
631 }
632 
633 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
634 {
635 	smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
636 	return skb->len;
637 }
638 
639 void smc_lgr_cleanup_early(struct smc_link_group *lgr)
640 {
641 	spinlock_t *lgr_lock;
642 
643 	if (!lgr)
644 		return;
645 
646 	smc_lgr_list_head(lgr, &lgr_lock);
647 	spin_lock_bh(lgr_lock);
648 	/* do not use this link group for new connections */
649 	if (!list_empty(&lgr->list))
650 		list_del_init(&lgr->list);
651 	spin_unlock_bh(lgr_lock);
652 	__smc_lgr_terminate(lgr, true);
653 }
654 
655 static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
656 {
657 	int i;
658 
659 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
660 		struct smc_link *lnk = &lgr->lnk[i];
661 
662 		if (smc_link_sendable(lnk))
663 			lnk->state = SMC_LNK_INACTIVE;
664 	}
665 	wake_up_all(&lgr->llc_msg_waiter);
666 	wake_up_all(&lgr->llc_flow_waiter);
667 }
668 
669 static void smc_lgr_free(struct smc_link_group *lgr);
670 
671 static void smc_lgr_free_work(struct work_struct *work)
672 {
673 	struct smc_link_group *lgr = container_of(to_delayed_work(work),
674 						  struct smc_link_group,
675 						  free_work);
676 	spinlock_t *lgr_lock;
677 	bool conns;
678 
679 	smc_lgr_list_head(lgr, &lgr_lock);
680 	spin_lock_bh(lgr_lock);
681 	if (lgr->freeing) {
682 		spin_unlock_bh(lgr_lock);
683 		return;
684 	}
685 	read_lock_bh(&lgr->conns_lock);
686 	conns = RB_EMPTY_ROOT(&lgr->conns_all);
687 	read_unlock_bh(&lgr->conns_lock);
688 	if (!conns) { /* number of lgr connections is no longer zero */
689 		spin_unlock_bh(lgr_lock);
690 		return;
691 	}
692 	list_del_init(&lgr->list); /* remove from smc_lgr_list */
693 	lgr->freeing = 1; /* this instance does the freeing, no new schedule */
694 	spin_unlock_bh(lgr_lock);
695 	cancel_delayed_work(&lgr->free_work);
696 
697 	if (!lgr->is_smcd && !lgr->terminating)
698 		smc_llc_send_link_delete_all(lgr, true,
699 					     SMC_LLC_DEL_PROG_INIT_TERM);
700 	if (lgr->is_smcd && !lgr->terminating)
701 		smc_ism_signal_shutdown(lgr);
702 	if (!lgr->is_smcd)
703 		smcr_lgr_link_deactivate_all(lgr);
704 	smc_lgr_free(lgr);
705 }
706 
707 static void smc_lgr_terminate_work(struct work_struct *work)
708 {
709 	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
710 						  terminate_work);
711 
712 	__smc_lgr_terminate(lgr, true);
713 }
714 
715 /* return next unique link id for the lgr */
716 static u8 smcr_next_link_id(struct smc_link_group *lgr)
717 {
718 	u8 link_id;
719 	int i;
720 
721 	while (1) {
722 again:
723 		link_id = ++lgr->next_link_id;
724 		if (!link_id)	/* skip zero as link_id */
725 			link_id = ++lgr->next_link_id;
726 		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
727 			if (smc_link_usable(&lgr->lnk[i]) &&
728 			    lgr->lnk[i].link_id == link_id)
729 				goto again;
730 		}
731 		break;
732 	}
733 	return link_id;
734 }
735 
736 static void smcr_copy_dev_info_to_link(struct smc_link *link)
737 {
738 	struct smc_ib_device *smcibdev = link->smcibdev;
739 
740 	snprintf(link->ibname, sizeof(link->ibname), "%s",
741 		 smcibdev->ibdev->name);
742 	link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
743 }
744 
745 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
746 		   u8 link_idx, struct smc_init_info *ini)
747 {
748 	struct smc_ib_device *smcibdev;
749 	u8 rndvec[3];
750 	int rc;
751 
752 	if (lgr->smc_version == SMC_V2) {
753 		lnk->smcibdev = ini->smcrv2.ib_dev_v2;
754 		lnk->ibport = ini->smcrv2.ib_port_v2;
755 	} else {
756 		lnk->smcibdev = ini->ib_dev;
757 		lnk->ibport = ini->ib_port;
758 	}
759 	get_device(&lnk->smcibdev->ibdev->dev);
760 	atomic_inc(&lnk->smcibdev->lnk_cnt);
761 	refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */
762 	lnk->clearing = 0;
763 	lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
764 	lnk->link_id = smcr_next_link_id(lgr);
765 	lnk->lgr = lgr;
766 	smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
767 	lnk->link_idx = link_idx;
768 	lnk->wr_rx_id_compl = 0;
769 	smc_ibdev_cnt_inc(lnk);
770 	smcr_copy_dev_info_to_link(lnk);
771 	atomic_set(&lnk->conn_cnt, 0);
772 	smc_llc_link_set_uid(lnk);
773 	INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
774 	if (!lnk->smcibdev->initialized) {
775 		rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
776 		if (rc)
777 			goto out;
778 	}
779 	get_random_bytes(rndvec, sizeof(rndvec));
780 	lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
781 		(rndvec[2] << 16);
782 	rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
783 				  ini->vlan_id, lnk->gid, &lnk->sgid_index,
784 				  lgr->smc_version == SMC_V2 ?
785 						  &ini->smcrv2 : NULL);
786 	if (rc)
787 		goto out;
788 	rc = smc_llc_link_init(lnk);
789 	if (rc)
790 		goto out;
791 	rc = smc_wr_alloc_link_mem(lnk);
792 	if (rc)
793 		goto clear_llc_lnk;
794 	rc = smc_ib_create_protection_domain(lnk);
795 	if (rc)
796 		goto free_link_mem;
797 	rc = smc_ib_create_queue_pair(lnk);
798 	if (rc)
799 		goto dealloc_pd;
800 	rc = smc_wr_create_link(lnk);
801 	if (rc)
802 		goto destroy_qp;
803 	lnk->state = SMC_LNK_ACTIVATING;
804 	return 0;
805 
806 destroy_qp:
807 	smc_ib_destroy_queue_pair(lnk);
808 dealloc_pd:
809 	smc_ib_dealloc_protection_domain(lnk);
810 free_link_mem:
811 	smc_wr_free_link_mem(lnk);
812 clear_llc_lnk:
813 	smc_llc_link_clear(lnk, false);
814 out:
815 	smc_ibdev_cnt_dec(lnk);
816 	put_device(&lnk->smcibdev->ibdev->dev);
817 	smcibdev = lnk->smcibdev;
818 	memset(lnk, 0, sizeof(struct smc_link));
819 	lnk->state = SMC_LNK_UNUSED;
820 	if (!atomic_dec_return(&smcibdev->lnk_cnt))
821 		wake_up(&smcibdev->lnks_deleted);
822 	smc_lgr_put(lgr); /* lgr_hold above */
823 	return rc;
824 }
825 
826 /* create a new SMC link group */
827 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
828 {
829 	struct smc_link_group *lgr;
830 	struct list_head *lgr_list;
831 	struct smcd_dev *smcd;
832 	struct smc_link *lnk;
833 	spinlock_t *lgr_lock;
834 	u8 link_idx;
835 	int rc = 0;
836 	int i;
837 
838 	if (ini->is_smcd && ini->vlan_id) {
839 		if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
840 				     ini->vlan_id)) {
841 			rc = SMC_CLC_DECL_ISMVLANERR;
842 			goto out;
843 		}
844 	}
845 
846 	lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
847 	if (!lgr) {
848 		rc = SMC_CLC_DECL_MEM;
849 		goto ism_put_vlan;
850 	}
851 	lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
852 				     SMC_LGR_ID_SIZE, &lgr->id);
853 	if (!lgr->tx_wq) {
854 		rc = -ENOMEM;
855 		goto free_lgr;
856 	}
857 	lgr->is_smcd = ini->is_smcd;
858 	lgr->sync_err = 0;
859 	lgr->terminating = 0;
860 	lgr->freeing = 0;
861 	lgr->vlan_id = ini->vlan_id;
862 	refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
863 	init_rwsem(&lgr->sndbufs_lock);
864 	init_rwsem(&lgr->rmbs_lock);
865 	rwlock_init(&lgr->conns_lock);
866 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
867 		INIT_LIST_HEAD(&lgr->sndbufs[i]);
868 		INIT_LIST_HEAD(&lgr->rmbs[i]);
869 	}
870 	lgr->next_link_id = 0;
871 	smc_lgr_list.num += SMC_LGR_NUM_INCR;
872 	memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
873 	INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
874 	INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
875 	lgr->conns_all = RB_ROOT;
876 	if (ini->is_smcd) {
877 		/* SMC-D specific settings */
878 		smcd = ini->ism_dev[ini->ism_selected];
879 		get_device(smcd->ops->get_dev(smcd));
880 		lgr->peer_gid.gid =
881 			ini->ism_peer_gid[ini->ism_selected].gid;
882 		lgr->peer_gid.gid_ext =
883 			ini->ism_peer_gid[ini->ism_selected].gid_ext;
884 		lgr->smcd = ini->ism_dev[ini->ism_selected];
885 		lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
886 		lgr_lock = &lgr->smcd->lgr_lock;
887 		lgr->smc_version = ini->smcd_version;
888 		lgr->peer_shutdown = 0;
889 		atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
890 	} else {
891 		/* SMC-R specific settings */
892 		struct smc_ib_device *ibdev;
893 		int ibport;
894 
895 		lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
896 		lgr->smc_version = ini->smcr_version;
897 		memcpy(lgr->peer_systemid, ini->peer_systemid,
898 		       SMC_SYSTEMID_LEN);
899 		if (lgr->smc_version == SMC_V2) {
900 			ibdev = ini->smcrv2.ib_dev_v2;
901 			ibport = ini->smcrv2.ib_port_v2;
902 			lgr->saddr = ini->smcrv2.saddr;
903 			lgr->uses_gateway = ini->smcrv2.uses_gateway;
904 			memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac,
905 			       ETH_ALEN);
906 			lgr->max_conns = ini->max_conns;
907 			lgr->max_links = ini->max_links;
908 		} else {
909 			ibdev = ini->ib_dev;
910 			ibport = ini->ib_port;
911 			lgr->max_conns = SMC_CONN_PER_LGR_MAX;
912 			lgr->max_links = SMC_LINKS_ADD_LNK_MAX;
913 		}
914 		memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
915 		       SMC_MAX_PNETID_LEN);
916 		rc = smc_wr_alloc_lgr_mem(lgr);
917 		if (rc)
918 			goto free_wq;
919 		smc_llc_lgr_init(lgr, smc);
920 
921 		link_idx = SMC_SINGLE_LINK;
922 		lnk = &lgr->lnk[link_idx];
923 		rc = smcr_link_init(lgr, lnk, link_idx, ini);
924 		if (rc) {
925 			smc_wr_free_lgr_mem(lgr);
926 			goto free_wq;
927 		}
928 		lgr->net = smc_ib_net(lnk->smcibdev);
929 		lgr_list = &smc_lgr_list.list;
930 		lgr_lock = &smc_lgr_list.lock;
931 		lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type;
932 		atomic_inc(&lgr_cnt);
933 	}
934 	smc->conn.lgr = lgr;
935 	spin_lock_bh(lgr_lock);
936 	list_add_tail(&lgr->list, lgr_list);
937 	spin_unlock_bh(lgr_lock);
938 	return 0;
939 
940 free_wq:
941 	destroy_workqueue(lgr->tx_wq);
942 free_lgr:
943 	kfree(lgr);
944 ism_put_vlan:
945 	if (ini->is_smcd && ini->vlan_id)
946 		smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
947 out:
948 	if (rc < 0) {
949 		if (rc == -ENOMEM)
950 			rc = SMC_CLC_DECL_MEM;
951 		else
952 			rc = SMC_CLC_DECL_INTERR;
953 	}
954 	return rc;
955 }
956 
957 static int smc_write_space(struct smc_connection *conn)
958 {
959 	int buffer_len = conn->peer_rmbe_size;
960 	union smc_host_cursor prod;
961 	union smc_host_cursor cons;
962 	int space;
963 
964 	smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
965 	smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
966 	/* determine rx_buf space */
967 	space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
968 	return space;
969 }
970 
971 static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
972 			     struct smc_wr_buf *wr_buf)
973 {
974 	struct smc_connection *conn = &smc->conn;
975 	union smc_host_cursor cons, fin;
976 	int rc = 0;
977 	int diff;
978 
979 	smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
980 	smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
981 	/* set prod cursor to old state, enforce tx_rdma_writes() */
982 	smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
983 	smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
984 
985 	if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
986 		/* cons cursor advanced more than fin, and prod was set
987 		 * fin above, so now prod is smaller than cons. Fix that.
988 		 */
989 		diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
990 		smc_curs_add(conn->sndbuf_desc->len,
991 			     &conn->tx_curs_sent, diff);
992 		smc_curs_add(conn->sndbuf_desc->len,
993 			     &conn->tx_curs_fin, diff);
994 
995 		smp_mb__before_atomic();
996 		atomic_add(diff, &conn->sndbuf_space);
997 		smp_mb__after_atomic();
998 
999 		smc_curs_add(conn->peer_rmbe_size,
1000 			     &conn->local_tx_ctrl.prod, diff);
1001 		smc_curs_add(conn->peer_rmbe_size,
1002 			     &conn->local_tx_ctrl_fin, diff);
1003 	}
1004 	/* recalculate, value is used by tx_rdma_writes() */
1005 	atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
1006 
1007 	if (smc->sk.sk_state != SMC_INIT &&
1008 	    smc->sk.sk_state != SMC_CLOSED) {
1009 		rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
1010 		if (!rc) {
1011 			queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
1012 			smc->sk.sk_data_ready(&smc->sk);
1013 		}
1014 	} else {
1015 		smc_wr_tx_put_slot(conn->lnk,
1016 				   (struct smc_wr_tx_pend_priv *)pend);
1017 	}
1018 	return rc;
1019 }
1020 
1021 void smc_switch_link_and_count(struct smc_connection *conn,
1022 			       struct smc_link *to_lnk)
1023 {
1024 	atomic_dec(&conn->lnk->conn_cnt);
1025 	/* link_hold in smc_conn_create() */
1026 	smcr_link_put(conn->lnk);
1027 	conn->lnk = to_lnk;
1028 	atomic_inc(&conn->lnk->conn_cnt);
1029 	/* link_put in smc_conn_free() */
1030 	smcr_link_hold(conn->lnk);
1031 }
1032 
1033 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
1034 				  struct smc_link *from_lnk, bool is_dev_err)
1035 {
1036 	struct smc_link *to_lnk = NULL;
1037 	struct smc_cdc_tx_pend *pend;
1038 	struct smc_connection *conn;
1039 	struct smc_wr_buf *wr_buf;
1040 	struct smc_sock *smc;
1041 	struct rb_node *node;
1042 	int i, rc = 0;
1043 
1044 	/* link is inactive, wake up tx waiters */
1045 	smc_wr_wakeup_tx_wait(from_lnk);
1046 
1047 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1048 		if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
1049 			continue;
1050 		if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
1051 		    from_lnk->ibport == lgr->lnk[i].ibport) {
1052 			continue;
1053 		}
1054 		to_lnk = &lgr->lnk[i];
1055 		break;
1056 	}
1057 	if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
1058 		smc_lgr_terminate_sched(lgr);
1059 		return NULL;
1060 	}
1061 again:
1062 	read_lock_bh(&lgr->conns_lock);
1063 	for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
1064 		conn = rb_entry(node, struct smc_connection, alert_node);
1065 		if (conn->lnk != from_lnk)
1066 			continue;
1067 		smc = container_of(conn, struct smc_sock, conn);
1068 		/* conn->lnk not yet set in SMC_INIT state */
1069 		if (smc->sk.sk_state == SMC_INIT)
1070 			continue;
1071 		if (smc->sk.sk_state == SMC_CLOSED ||
1072 		    smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
1073 		    smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
1074 		    smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
1075 		    smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
1076 		    smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
1077 		    smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
1078 		    smc->sk.sk_state == SMC_PEERABORTWAIT ||
1079 		    smc->sk.sk_state == SMC_PROCESSABORT) {
1080 			spin_lock_bh(&conn->send_lock);
1081 			smc_switch_link_and_count(conn, to_lnk);
1082 			spin_unlock_bh(&conn->send_lock);
1083 			continue;
1084 		}
1085 		sock_hold(&smc->sk);
1086 		read_unlock_bh(&lgr->conns_lock);
1087 		/* pre-fetch buffer outside of send_lock, might sleep */
1088 		rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
1089 		if (rc)
1090 			goto err_out;
1091 		/* avoid race with smcr_tx_sndbuf_nonempty() */
1092 		spin_lock_bh(&conn->send_lock);
1093 		smc_switch_link_and_count(conn, to_lnk);
1094 		rc = smc_switch_cursor(smc, pend, wr_buf);
1095 		spin_unlock_bh(&conn->send_lock);
1096 		sock_put(&smc->sk);
1097 		if (rc)
1098 			goto err_out;
1099 		goto again;
1100 	}
1101 	read_unlock_bh(&lgr->conns_lock);
1102 	smc_wr_tx_link_put(to_lnk);
1103 	return to_lnk;
1104 
1105 err_out:
1106 	smcr_link_down_cond_sched(to_lnk);
1107 	smc_wr_tx_link_put(to_lnk);
1108 	return NULL;
1109 }
1110 
1111 static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
1112 			   struct smc_link_group *lgr)
1113 {
1114 	struct rw_semaphore *lock;	/* lock buffer list */
1115 	int rc;
1116 
1117 	if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
1118 		/* unregister rmb with peer */
1119 		rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
1120 		if (!rc) {
1121 			/* protect against smc_llc_cli_rkey_exchange() */
1122 			down_read(&lgr->llc_conf_mutex);
1123 			smc_llc_do_delete_rkey(lgr, buf_desc);
1124 			buf_desc->is_conf_rkey = false;
1125 			up_read(&lgr->llc_conf_mutex);
1126 			smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1127 		}
1128 	}
1129 
1130 	if (buf_desc->is_reg_err) {
1131 		/* buf registration failed, reuse not possible */
1132 		lock = is_rmb ? &lgr->rmbs_lock :
1133 				&lgr->sndbufs_lock;
1134 		down_write(lock);
1135 		list_del(&buf_desc->list);
1136 		up_write(lock);
1137 
1138 		smc_buf_free(lgr, is_rmb, buf_desc);
1139 	} else {
1140 		/* memzero_explicit provides potential memory barrier semantics */
1141 		memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
1142 		WRITE_ONCE(buf_desc->used, 0);
1143 	}
1144 }
1145 
1146 static void smcd_buf_detach(struct smc_connection *conn)
1147 {
1148 	struct smcd_dev *smcd = conn->lgr->smcd;
1149 	u64 peer_token = conn->peer_token;
1150 
1151 	if (!conn->sndbuf_desc)
1152 		return;
1153 
1154 	smc_ism_detach_dmb(smcd, peer_token);
1155 
1156 	kfree(conn->sndbuf_desc);
1157 	conn->sndbuf_desc = NULL;
1158 }
1159 
1160 static void smc_buf_unuse(struct smc_connection *conn,
1161 			  struct smc_link_group *lgr)
1162 {
1163 	if (conn->sndbuf_desc) {
1164 		if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
1165 			smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
1166 		} else {
1167 			memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
1168 			WRITE_ONCE(conn->sndbuf_desc->used, 0);
1169 		}
1170 	}
1171 	if (conn->rmb_desc) {
1172 		if (!lgr->is_smcd) {
1173 			smcr_buf_unuse(conn->rmb_desc, true, lgr);
1174 		} else {
1175 			memzero_explicit(conn->rmb_desc->cpu_addr,
1176 					 conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
1177 			WRITE_ONCE(conn->rmb_desc->used, 0);
1178 		}
1179 	}
1180 }
1181 
1182 /* remove a finished connection from its link group */
1183 void smc_conn_free(struct smc_connection *conn)
1184 {
1185 	struct smc_link_group *lgr = conn->lgr;
1186 
1187 	if (!lgr || conn->freed)
1188 		/* Connection has never been registered in a
1189 		 * link group, or has already been freed.
1190 		 */
1191 		return;
1192 
1193 	conn->freed = 1;
1194 	if (!smc_conn_lgr_valid(conn))
1195 		/* Connection has already unregistered from
1196 		 * link group.
1197 		 */
1198 		goto lgr_put;
1199 
1200 	if (lgr->is_smcd) {
1201 		if (!list_empty(&lgr->list))
1202 			smc_ism_unset_conn(conn);
1203 		if (smc_ism_support_dmb_nocopy(lgr->smcd))
1204 			smcd_buf_detach(conn);
1205 		tasklet_kill(&conn->rx_tsklet);
1206 	} else {
1207 		smc_cdc_wait_pend_tx_wr(conn);
1208 		if (current_work() != &conn->abort_work)
1209 			cancel_work_sync(&conn->abort_work);
1210 	}
1211 	if (!list_empty(&lgr->list)) {
1212 		smc_buf_unuse(conn, lgr); /* allow buffer reuse */
1213 		smc_lgr_unregister_conn(conn);
1214 	}
1215 
1216 	if (!lgr->conns_num)
1217 		smc_lgr_schedule_free_work(lgr);
1218 lgr_put:
1219 	if (!lgr->is_smcd)
1220 		smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */
1221 	smc_lgr_put(lgr); /* lgr_hold in smc_conn_create() */
1222 }
1223 
1224 /* unregister a link from a buf_desc */
1225 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1226 				struct smc_link *lnk)
1227 {
1228 	if (is_rmb || buf_desc->is_vm)
1229 		buf_desc->is_reg_mr[lnk->link_idx] = false;
1230 	if (!buf_desc->is_map_ib[lnk->link_idx])
1231 		return;
1232 
1233 	if ((is_rmb || buf_desc->is_vm) &&
1234 	    buf_desc->mr[lnk->link_idx]) {
1235 		smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
1236 		buf_desc->mr[lnk->link_idx] = NULL;
1237 	}
1238 	if (is_rmb)
1239 		smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1240 	else
1241 		smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1242 
1243 	sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1244 	buf_desc->is_map_ib[lnk->link_idx] = false;
1245 }
1246 
1247 /* unmap all buffers of lgr for a deleted link */
1248 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1249 {
1250 	struct smc_link_group *lgr = lnk->lgr;
1251 	struct smc_buf_desc *buf_desc, *bf;
1252 	int i;
1253 
1254 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
1255 		down_write(&lgr->rmbs_lock);
1256 		list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
1257 			smcr_buf_unmap_link(buf_desc, true, lnk);
1258 		up_write(&lgr->rmbs_lock);
1259 
1260 		down_write(&lgr->sndbufs_lock);
1261 		list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
1262 					 list)
1263 			smcr_buf_unmap_link(buf_desc, false, lnk);
1264 		up_write(&lgr->sndbufs_lock);
1265 	}
1266 }
1267 
1268 static void smcr_rtoken_clear_link(struct smc_link *lnk)
1269 {
1270 	struct smc_link_group *lgr = lnk->lgr;
1271 	int i;
1272 
1273 	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1274 		lgr->rtokens[i][lnk->link_idx].rkey = 0;
1275 		lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1276 	}
1277 }
1278 
1279 static void __smcr_link_clear(struct smc_link *lnk)
1280 {
1281 	struct smc_link_group *lgr = lnk->lgr;
1282 	struct smc_ib_device *smcibdev;
1283 
1284 	smc_wr_free_link_mem(lnk);
1285 	smc_ibdev_cnt_dec(lnk);
1286 	put_device(&lnk->smcibdev->ibdev->dev);
1287 	smcibdev = lnk->smcibdev;
1288 	memset(lnk, 0, sizeof(struct smc_link));
1289 	lnk->state = SMC_LNK_UNUSED;
1290 	if (!atomic_dec_return(&smcibdev->lnk_cnt))
1291 		wake_up(&smcibdev->lnks_deleted);
1292 	smc_lgr_put(lgr); /* lgr_hold in smcr_link_init() */
1293 }
1294 
1295 /* must be called under lgr->llc_conf_mutex lock */
1296 void smcr_link_clear(struct smc_link *lnk, bool log)
1297 {
1298 	if (!lnk->lgr || lnk->clearing ||
1299 	    lnk->state == SMC_LNK_UNUSED)
1300 		return;
1301 	lnk->clearing = 1;
1302 	lnk->peer_qpn = 0;
1303 	smc_llc_link_clear(lnk, log);
1304 	smcr_buf_unmap_lgr(lnk);
1305 	smcr_rtoken_clear_link(lnk);
1306 	smc_ib_modify_qp_error(lnk);
1307 	smc_wr_free_link(lnk);
1308 	smc_ib_destroy_queue_pair(lnk);
1309 	smc_ib_dealloc_protection_domain(lnk);
1310 	smcr_link_put(lnk); /* theoretically last link_put */
1311 }
1312 
1313 void smcr_link_hold(struct smc_link *lnk)
1314 {
1315 	refcount_inc(&lnk->refcnt);
1316 }
1317 
1318 void smcr_link_put(struct smc_link *lnk)
1319 {
1320 	if (refcount_dec_and_test(&lnk->refcnt))
1321 		__smcr_link_clear(lnk);
1322 }
1323 
1324 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
1325 			  struct smc_buf_desc *buf_desc)
1326 {
1327 	int i;
1328 
1329 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1330 		smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1331 
1332 	if (!buf_desc->is_vm && buf_desc->pages)
1333 		__free_pages(buf_desc->pages, buf_desc->order);
1334 	else if (buf_desc->is_vm && buf_desc->cpu_addr)
1335 		vfree(buf_desc->cpu_addr);
1336 	kfree(buf_desc);
1337 }
1338 
1339 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
1340 			  struct smc_buf_desc *buf_desc)
1341 {
1342 	if (is_dmb) {
1343 		/* restore original buf len */
1344 		buf_desc->len += sizeof(struct smcd_cdc_msg);
1345 		smc_ism_unregister_dmb(lgr->smcd, buf_desc);
1346 	} else {
1347 		kfree(buf_desc->cpu_addr);
1348 	}
1349 	kfree(buf_desc);
1350 }
1351 
1352 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
1353 			 struct smc_buf_desc *buf_desc)
1354 {
1355 	if (lgr->is_smcd)
1356 		smcd_buf_free(lgr, is_rmb, buf_desc);
1357 	else
1358 		smcr_buf_free(lgr, is_rmb, buf_desc);
1359 }
1360 
1361 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
1362 {
1363 	struct smc_buf_desc *buf_desc, *bf_desc;
1364 	struct list_head *buf_list;
1365 	int i;
1366 
1367 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
1368 		if (is_rmb)
1369 			buf_list = &lgr->rmbs[i];
1370 		else
1371 			buf_list = &lgr->sndbufs[i];
1372 		list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
1373 					 list) {
1374 			list_del(&buf_desc->list);
1375 			smc_buf_free(lgr, is_rmb, buf_desc);
1376 		}
1377 	}
1378 }
1379 
1380 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
1381 {
1382 	/* free send buffers */
1383 	__smc_lgr_free_bufs(lgr, false);
1384 	/* free rmbs */
1385 	__smc_lgr_free_bufs(lgr, true);
1386 }
1387 
1388 /* won't be freed until no one accesses to lgr anymore */
1389 static void __smc_lgr_free(struct smc_link_group *lgr)
1390 {
1391 	smc_lgr_free_bufs(lgr);
1392 	if (lgr->is_smcd) {
1393 		if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
1394 			wake_up(&lgr->smcd->lgrs_deleted);
1395 	} else {
1396 		smc_wr_free_lgr_mem(lgr);
1397 		if (!atomic_dec_return(&lgr_cnt))
1398 			wake_up(&lgrs_deleted);
1399 	}
1400 	kfree(lgr);
1401 }
1402 
1403 /* remove a link group */
1404 static void smc_lgr_free(struct smc_link_group *lgr)
1405 {
1406 	int i;
1407 
1408 	if (!lgr->is_smcd) {
1409 		down_write(&lgr->llc_conf_mutex);
1410 		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1411 			if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1412 				smcr_link_clear(&lgr->lnk[i], false);
1413 		}
1414 		up_write(&lgr->llc_conf_mutex);
1415 		smc_llc_lgr_clear(lgr);
1416 	}
1417 
1418 	destroy_workqueue(lgr->tx_wq);
1419 	if (lgr->is_smcd) {
1420 		smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
1421 		put_device(lgr->smcd->ops->get_dev(lgr->smcd));
1422 	}
1423 	smc_lgr_put(lgr); /* theoretically last lgr_put */
1424 }
1425 
1426 void smc_lgr_hold(struct smc_link_group *lgr)
1427 {
1428 	refcount_inc(&lgr->refcnt);
1429 }
1430 
1431 void smc_lgr_put(struct smc_link_group *lgr)
1432 {
1433 	if (refcount_dec_and_test(&lgr->refcnt))
1434 		__smc_lgr_free(lgr);
1435 }
1436 
1437 static void smc_sk_wake_ups(struct smc_sock *smc)
1438 {
1439 	smc->sk.sk_write_space(&smc->sk);
1440 	smc->sk.sk_data_ready(&smc->sk);
1441 	smc->sk.sk_state_change(&smc->sk);
1442 }
1443 
1444 /* kill a connection */
1445 static void smc_conn_kill(struct smc_connection *conn, bool soft)
1446 {
1447 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1448 
1449 	if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
1450 		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
1451 	else
1452 		smc_close_abort(conn);
1453 	conn->killed = 1;
1454 	smc->sk.sk_err = ECONNABORTED;
1455 	smc_sk_wake_ups(smc);
1456 	if (conn->lgr->is_smcd) {
1457 		smc_ism_unset_conn(conn);
1458 		if (smc_ism_support_dmb_nocopy(conn->lgr->smcd))
1459 			smcd_buf_detach(conn);
1460 		if (soft)
1461 			tasklet_kill(&conn->rx_tsklet);
1462 		else
1463 			tasklet_unlock_wait(&conn->rx_tsklet);
1464 	} else {
1465 		smc_cdc_wait_pend_tx_wr(conn);
1466 	}
1467 	smc_lgr_unregister_conn(conn);
1468 	smc_close_active_abort(smc);
1469 }
1470 
1471 static void smc_lgr_cleanup(struct smc_link_group *lgr)
1472 {
1473 	if (lgr->is_smcd) {
1474 		smc_ism_signal_shutdown(lgr);
1475 	} else {
1476 		u32 rsn = lgr->llc_termination_rsn;
1477 
1478 		if (!rsn)
1479 			rsn = SMC_LLC_DEL_PROG_INIT_TERM;
1480 		smc_llc_send_link_delete_all(lgr, false, rsn);
1481 		smcr_lgr_link_deactivate_all(lgr);
1482 	}
1483 }
1484 
1485 /* terminate link group
1486  * @soft: true if link group shutdown can take its time
1487  *	  false if immediate link group shutdown is required
1488  */
1489 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
1490 {
1491 	struct smc_connection *conn;
1492 	struct smc_sock *smc;
1493 	struct rb_node *node;
1494 
1495 	if (lgr->terminating)
1496 		return;	/* lgr already terminating */
1497 	/* cancel free_work sync, will terminate when lgr->freeing is set */
1498 	cancel_delayed_work(&lgr->free_work);
1499 	lgr->terminating = 1;
1500 
1501 	/* kill remaining link group connections */
1502 	read_lock_bh(&lgr->conns_lock);
1503 	node = rb_first(&lgr->conns_all);
1504 	while (node) {
1505 		read_unlock_bh(&lgr->conns_lock);
1506 		conn = rb_entry(node, struct smc_connection, alert_node);
1507 		smc = container_of(conn, struct smc_sock, conn);
1508 		sock_hold(&smc->sk); /* sock_put below */
1509 		lock_sock(&smc->sk);
1510 		smc_conn_kill(conn, soft);
1511 		release_sock(&smc->sk);
1512 		sock_put(&smc->sk); /* sock_hold above */
1513 		read_lock_bh(&lgr->conns_lock);
1514 		node = rb_first(&lgr->conns_all);
1515 	}
1516 	read_unlock_bh(&lgr->conns_lock);
1517 	smc_lgr_cleanup(lgr);
1518 	smc_lgr_free(lgr);
1519 }
1520 
1521 /* unlink link group and schedule termination */
1522 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
1523 {
1524 	spinlock_t *lgr_lock;
1525 
1526 	smc_lgr_list_head(lgr, &lgr_lock);
1527 	spin_lock_bh(lgr_lock);
1528 	if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
1529 		spin_unlock_bh(lgr_lock);
1530 		return;	/* lgr already terminating */
1531 	}
1532 	list_del_init(&lgr->list);
1533 	lgr->freeing = 1;
1534 	spin_unlock_bh(lgr_lock);
1535 	schedule_work(&lgr->terminate_work);
1536 }
1537 
1538 /* Called when peer lgr shutdown (regularly or abnormally) is received */
1539 void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
1540 			unsigned short vlan)
1541 {
1542 	struct smc_link_group *lgr, *l;
1543 	LIST_HEAD(lgr_free_list);
1544 
1545 	/* run common cleanup function and build free list */
1546 	spin_lock_bh(&dev->lgr_lock);
1547 	list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
1548 		if ((!peer_gid->gid ||
1549 		     (lgr->peer_gid.gid == peer_gid->gid &&
1550 		      !smc_ism_is_virtual(dev) ? 1 :
1551 		      lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
1552 		    (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
1553 			if (peer_gid->gid) /* peer triggered termination */
1554 				lgr->peer_shutdown = 1;
1555 			list_move(&lgr->list, &lgr_free_list);
1556 			lgr->freeing = 1;
1557 		}
1558 	}
1559 	spin_unlock_bh(&dev->lgr_lock);
1560 
1561 	/* cancel the regular free workers and actually free lgrs */
1562 	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
1563 		list_del_init(&lgr->list);
1564 		schedule_work(&lgr->terminate_work);
1565 	}
1566 }
1567 
1568 /* Called when an SMCD device is removed or the smc module is unloaded */
1569 void smc_smcd_terminate_all(struct smcd_dev *smcd)
1570 {
1571 	struct smc_link_group *lgr, *lg;
1572 	LIST_HEAD(lgr_free_list);
1573 
1574 	spin_lock_bh(&smcd->lgr_lock);
1575 	list_splice_init(&smcd->lgr_list, &lgr_free_list);
1576 	list_for_each_entry(lgr, &lgr_free_list, list)
1577 		lgr->freeing = 1;
1578 	spin_unlock_bh(&smcd->lgr_lock);
1579 
1580 	list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1581 		list_del_init(&lgr->list);
1582 		__smc_lgr_terminate(lgr, false);
1583 	}
1584 
1585 	if (atomic_read(&smcd->lgr_cnt))
1586 		wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1587 }
1588 
1589 /* Called when an SMCR device is removed or the smc module is unloaded.
1590  * If smcibdev is given, all SMCR link groups using this device are terminated.
1591  * If smcibdev is NULL, all SMCR link groups are terminated.
1592  */
1593 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1594 {
1595 	struct smc_link_group *lgr, *lg;
1596 	LIST_HEAD(lgr_free_list);
1597 	int i;
1598 
1599 	spin_lock_bh(&smc_lgr_list.lock);
1600 	if (!smcibdev) {
1601 		list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1602 		list_for_each_entry(lgr, &lgr_free_list, list)
1603 			lgr->freeing = 1;
1604 	} else {
1605 		list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1606 			for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1607 				if (lgr->lnk[i].smcibdev == smcibdev)
1608 					smcr_link_down_cond_sched(&lgr->lnk[i]);
1609 			}
1610 		}
1611 	}
1612 	spin_unlock_bh(&smc_lgr_list.lock);
1613 
1614 	list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1615 		list_del_init(&lgr->list);
1616 		smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1617 		__smc_lgr_terminate(lgr, false);
1618 	}
1619 
1620 	if (smcibdev) {
1621 		if (atomic_read(&smcibdev->lnk_cnt))
1622 			wait_event(smcibdev->lnks_deleted,
1623 				   !atomic_read(&smcibdev->lnk_cnt));
1624 	} else {
1625 		if (atomic_read(&lgr_cnt))
1626 			wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1627 	}
1628 }
1629 
1630 /* set new lgr type and clear all asymmetric link tagging */
1631 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1632 {
1633 	char *lgr_type = "";
1634 	int i;
1635 
1636 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1637 		if (smc_link_usable(&lgr->lnk[i]))
1638 			lgr->lnk[i].link_is_asym = false;
1639 	if (lgr->type == new_type)
1640 		return;
1641 	lgr->type = new_type;
1642 
1643 	switch (lgr->type) {
1644 	case SMC_LGR_NONE:
1645 		lgr_type = "NONE";
1646 		break;
1647 	case SMC_LGR_SINGLE:
1648 		lgr_type = "SINGLE";
1649 		break;
1650 	case SMC_LGR_SYMMETRIC:
1651 		lgr_type = "SYMMETRIC";
1652 		break;
1653 	case SMC_LGR_ASYMMETRIC_PEER:
1654 		lgr_type = "ASYMMETRIC_PEER";
1655 		break;
1656 	case SMC_LGR_ASYMMETRIC_LOCAL:
1657 		lgr_type = "ASYMMETRIC_LOCAL";
1658 		break;
1659 	}
1660 	pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: "
1661 			    "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1662 			    lgr->net->net_cookie, lgr_type, lgr->pnet_id);
1663 }
1664 
1665 /* set new lgr type and tag a link as asymmetric */
1666 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1667 			    enum smc_lgr_type new_type, int asym_lnk_idx)
1668 {
1669 	smcr_lgr_set_type(lgr, new_type);
1670 	lgr->lnk[asym_lnk_idx].link_is_asym = true;
1671 }
1672 
1673 /* abort connection, abort_work scheduled from tasklet context */
1674 static void smc_conn_abort_work(struct work_struct *work)
1675 {
1676 	struct smc_connection *conn = container_of(work,
1677 						   struct smc_connection,
1678 						   abort_work);
1679 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1680 
1681 	lock_sock(&smc->sk);
1682 	smc_conn_kill(conn, true);
1683 	release_sock(&smc->sk);
1684 	sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1685 }
1686 
1687 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1688 {
1689 	struct smc_link_group *lgr, *n;
1690 
1691 	spin_lock_bh(&smc_lgr_list.lock);
1692 	list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1693 		struct smc_link *link;
1694 
1695 		if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1696 			    SMC_MAX_PNETID_LEN) ||
1697 		    lgr->type == SMC_LGR_SYMMETRIC ||
1698 		    lgr->type == SMC_LGR_ASYMMETRIC_PEER ||
1699 		    !rdma_dev_access_netns(smcibdev->ibdev, lgr->net))
1700 			continue;
1701 
1702 		if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1)
1703 			continue;
1704 
1705 		/* trigger local add link processing */
1706 		link = smc_llc_usable_link(lgr);
1707 		if (link)
1708 			smc_llc_add_link_local(link);
1709 	}
1710 	spin_unlock_bh(&smc_lgr_list.lock);
1711 }
1712 
1713 /* link is down - switch connections to alternate link,
1714  * must be called under lgr->llc_conf_mutex lock
1715  */
1716 static void smcr_link_down(struct smc_link *lnk)
1717 {
1718 	struct smc_link_group *lgr = lnk->lgr;
1719 	struct smc_link *to_lnk;
1720 	int del_link_id;
1721 
1722 	if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1723 		return;
1724 
1725 	to_lnk = smc_switch_conns(lgr, lnk, true);
1726 	if (!to_lnk) { /* no backup link available */
1727 		smcr_link_clear(lnk, true);
1728 		return;
1729 	}
1730 	smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1731 	del_link_id = lnk->link_id;
1732 
1733 	if (lgr->role == SMC_SERV) {
1734 		/* trigger local delete link processing */
1735 		smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1736 	} else {
1737 		if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1738 			/* another llc task is ongoing */
1739 			up_write(&lgr->llc_conf_mutex);
1740 			wait_event_timeout(lgr->llc_flow_waiter,
1741 				(list_empty(&lgr->list) ||
1742 				 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1743 				SMC_LLC_WAIT_TIME);
1744 			down_write(&lgr->llc_conf_mutex);
1745 		}
1746 		if (!list_empty(&lgr->list)) {
1747 			smc_llc_send_delete_link(to_lnk, del_link_id,
1748 						 SMC_LLC_REQ, true,
1749 						 SMC_LLC_DEL_LOST_PATH);
1750 			smcr_link_clear(lnk, true);
1751 		}
1752 		wake_up(&lgr->llc_flow_waiter);	/* wake up next waiter */
1753 	}
1754 }
1755 
1756 /* must be called under lgr->llc_conf_mutex lock */
1757 void smcr_link_down_cond(struct smc_link *lnk)
1758 {
1759 	if (smc_link_downing(&lnk->state)) {
1760 		trace_smcr_link_down(lnk, __builtin_return_address(0));
1761 		smcr_link_down(lnk);
1762 	}
1763 }
1764 
1765 /* will get the lgr->llc_conf_mutex lock */
1766 void smcr_link_down_cond_sched(struct smc_link *lnk)
1767 {
1768 	if (smc_link_downing(&lnk->state)) {
1769 		trace_smcr_link_down(lnk, __builtin_return_address(0));
1770 		schedule_work(&lnk->link_down_wrk);
1771 	}
1772 }
1773 
1774 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1775 {
1776 	struct smc_link_group *lgr, *n;
1777 	int i;
1778 
1779 	list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1780 		if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1781 			    SMC_MAX_PNETID_LEN))
1782 			continue; /* lgr is not affected */
1783 		if (list_empty(&lgr->list))
1784 			continue;
1785 		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1786 			struct smc_link *lnk = &lgr->lnk[i];
1787 
1788 			if (smc_link_usable(lnk) &&
1789 			    lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1790 				smcr_link_down_cond_sched(lnk);
1791 		}
1792 	}
1793 }
1794 
1795 static void smc_link_down_work(struct work_struct *work)
1796 {
1797 	struct smc_link *link = container_of(work, struct smc_link,
1798 					     link_down_wrk);
1799 	struct smc_link_group *lgr = link->lgr;
1800 
1801 	if (list_empty(&lgr->list))
1802 		return;
1803 	wake_up_all(&lgr->llc_msg_waiter);
1804 	down_write(&lgr->llc_conf_mutex);
1805 	smcr_link_down(link);
1806 	up_write(&lgr->llc_conf_mutex);
1807 }
1808 
1809 static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1810 				  struct netdev_nested_priv *priv)
1811 {
1812 	unsigned short *vlan_id = (unsigned short *)priv->data;
1813 
1814 	if (is_vlan_dev(lower_dev)) {
1815 		*vlan_id = vlan_dev_vlan_id(lower_dev);
1816 		return 1;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
1822 /* Determine vlan of internal TCP socket. */
1823 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1824 {
1825 	struct dst_entry *dst = sk_dst_get(clcsock->sk);
1826 	struct netdev_nested_priv priv;
1827 	struct net_device *ndev;
1828 	int rc = 0;
1829 
1830 	ini->vlan_id = 0;
1831 	if (!dst) {
1832 		rc = -ENOTCONN;
1833 		goto out;
1834 	}
1835 	if (!dst->dev) {
1836 		rc = -ENODEV;
1837 		goto out_rel;
1838 	}
1839 
1840 	ndev = dst->dev;
1841 	if (is_vlan_dev(ndev)) {
1842 		ini->vlan_id = vlan_dev_vlan_id(ndev);
1843 		goto out_rel;
1844 	}
1845 
1846 	priv.data = (void *)&ini->vlan_id;
1847 	rtnl_lock();
1848 	netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
1849 	rtnl_unlock();
1850 
1851 out_rel:
1852 	dst_release(dst);
1853 out:
1854 	return rc;
1855 }
1856 
1857 static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
1858 			   u8 peer_systemid[],
1859 			   u8 peer_gid[],
1860 			   u8 peer_mac_v1[],
1861 			   enum smc_lgr_role role, u32 clcqpn,
1862 			   struct net *net)
1863 {
1864 	struct smc_link *lnk;
1865 	int i;
1866 
1867 	if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) ||
1868 	    lgr->role != role)
1869 		return false;
1870 
1871 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1872 		lnk = &lgr->lnk[i];
1873 
1874 		if (!smc_link_active(lnk))
1875 			continue;
1876 		/* use verbs API to check netns, instead of lgr->net */
1877 		if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
1878 			return false;
1879 		if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
1880 		    !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
1881 		    (smcr_version == SMC_V2 ||
1882 		     !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
1883 			return true;
1884 	}
1885 	return false;
1886 }
1887 
1888 static bool smcd_lgr_match(struct smc_link_group *lgr,
1889 			   struct smcd_dev *smcismdev,
1890 			   struct smcd_gid *peer_gid)
1891 {
1892 	return lgr->peer_gid.gid == peer_gid->gid && lgr->smcd == smcismdev &&
1893 		smc_ism_is_virtual(smcismdev) ?
1894 		(lgr->peer_gid.gid_ext == peer_gid->gid_ext) : 1;
1895 }
1896 
1897 /* create a new SMC connection (and a new link group if necessary) */
1898 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1899 {
1900 	struct smc_connection *conn = &smc->conn;
1901 	struct net *net = sock_net(&smc->sk);
1902 	struct list_head *lgr_list;
1903 	struct smc_link_group *lgr;
1904 	enum smc_lgr_role role;
1905 	spinlock_t *lgr_lock;
1906 	int rc = 0;
1907 
1908 	lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1909 				  &smc_lgr_list.list;
1910 	lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1911 				  &smc_lgr_list.lock;
1912 	ini->first_contact_local = 1;
1913 	role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1914 	if (role == SMC_CLNT && ini->first_contact_peer)
1915 		/* create new link group as well */
1916 		goto create;
1917 
1918 	/* determine if an existing link group can be reused */
1919 	spin_lock_bh(lgr_lock);
1920 	list_for_each_entry(lgr, lgr_list, list) {
1921 		write_lock_bh(&lgr->conns_lock);
1922 		if ((ini->is_smcd ?
1923 		     smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1924 				    &ini->ism_peer_gid[ini->ism_selected]) :
1925 		     smcr_lgr_match(lgr, ini->smcr_version,
1926 				    ini->peer_systemid,
1927 				    ini->peer_gid, ini->peer_mac, role,
1928 				    ini->ib_clcqpn, net)) &&
1929 		    !lgr->sync_err &&
1930 		    (ini->smcd_version == SMC_V2 ||
1931 		     lgr->vlan_id == ini->vlan_id) &&
1932 		    (role == SMC_CLNT || ini->is_smcd ||
1933 		    (lgr->conns_num < lgr->max_conns &&
1934 		      !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
1935 			/* link group found */
1936 			ini->first_contact_local = 0;
1937 			conn->lgr = lgr;
1938 			rc = smc_lgr_register_conn(conn, false);
1939 			write_unlock_bh(&lgr->conns_lock);
1940 			if (!rc && delayed_work_pending(&lgr->free_work))
1941 				cancel_delayed_work(&lgr->free_work);
1942 			break;
1943 		}
1944 		write_unlock_bh(&lgr->conns_lock);
1945 	}
1946 	spin_unlock_bh(lgr_lock);
1947 	if (rc)
1948 		return rc;
1949 
1950 	if (role == SMC_CLNT && !ini->first_contact_peer &&
1951 	    ini->first_contact_local) {
1952 		/* Server reuses a link group, but Client wants to start
1953 		 * a new one
1954 		 * send out_of_sync decline, reason synchr. error
1955 		 */
1956 		return SMC_CLC_DECL_SYNCERR;
1957 	}
1958 
1959 create:
1960 	if (ini->first_contact_local) {
1961 		rc = smc_lgr_create(smc, ini);
1962 		if (rc)
1963 			goto out;
1964 		lgr = conn->lgr;
1965 		write_lock_bh(&lgr->conns_lock);
1966 		rc = smc_lgr_register_conn(conn, true);
1967 		write_unlock_bh(&lgr->conns_lock);
1968 		if (rc) {
1969 			smc_lgr_cleanup_early(lgr);
1970 			goto out;
1971 		}
1972 	}
1973 	smc_lgr_hold(conn->lgr); /* lgr_put in smc_conn_free() */
1974 	if (!conn->lgr->is_smcd)
1975 		smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */
1976 	conn->freed = 0;
1977 	conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1978 	conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1979 	conn->urg_state = SMC_URG_READ;
1980 	init_waitqueue_head(&conn->cdc_pend_tx_wq);
1981 	INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1982 	if (ini->is_smcd) {
1983 		conn->rx_off = sizeof(struct smcd_cdc_msg);
1984 		smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1985 	} else {
1986 		conn->rx_off = 0;
1987 	}
1988 #ifndef KERNEL_HAS_ATOMIC64
1989 	spin_lock_init(&conn->acurs_lock);
1990 #endif
1991 
1992 out:
1993 	return rc;
1994 }
1995 
1996 #define SMCD_DMBE_SIZES		6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1997 #define SMCR_RMBE_SIZES		5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
1998 
1999 /* convert the RMB size into the compressed notation (minimum 16K, see
2000  * SMCD/R_DMBE_SIZES.
2001  * In contrast to plain ilog2, this rounds towards the next power of 2,
2002  * so the socket application gets at least its desired sndbuf / rcvbuf size.
2003  */
2004 static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
2005 {
2006 	u8 compressed;
2007 
2008 	if (size <= SMC_BUF_MIN_SIZE)
2009 		return 0;
2010 
2011 	size = (size - 1) >> 14;  /* convert to 16K multiple */
2012 	compressed = min_t(u8, ilog2(size) + 1,
2013 			   is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
2014 
2015 #ifdef CONFIG_ARCH_NO_SG_CHAIN
2016 	if (!is_smcd && is_rmb)
2017 		/* RMBs are backed by & limited to max size of scatterlists */
2018 		compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
2019 #endif
2020 
2021 	return compressed;
2022 }
2023 
2024 /* convert the RMB size from compressed notation into integer */
2025 int smc_uncompress_bufsize(u8 compressed)
2026 {
2027 	u32 size;
2028 
2029 	size = 0x00000001 << (((int)compressed) + 14);
2030 	return (int)size;
2031 }
2032 
2033 /* try to reuse a sndbuf or rmb description slot for a certain
2034  * buffer size; if not available, return NULL
2035  */
2036 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
2037 					     struct rw_semaphore *lock,
2038 					     struct list_head *buf_list)
2039 {
2040 	struct smc_buf_desc *buf_slot;
2041 
2042 	down_read(lock);
2043 	list_for_each_entry(buf_slot, buf_list, list) {
2044 		if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
2045 			up_read(lock);
2046 			return buf_slot;
2047 		}
2048 	}
2049 	up_read(lock);
2050 	return NULL;
2051 }
2052 
2053 /* one of the conditions for announcing a receiver's current window size is
2054  * that it "results in a minimum increase in the window size of 10% of the
2055  * receive buffer space" [RFC7609]
2056  */
2057 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
2058 {
2059 	return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
2060 }
2061 
2062 /* map an buf to a link */
2063 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
2064 			     struct smc_link *lnk)
2065 {
2066 	int rc, i, nents, offset, buf_size, size, access_flags;
2067 	struct scatterlist *sg;
2068 	void *buf;
2069 
2070 	if (buf_desc->is_map_ib[lnk->link_idx])
2071 		return 0;
2072 
2073 	if (buf_desc->is_vm) {
2074 		buf = buf_desc->cpu_addr;
2075 		buf_size = buf_desc->len;
2076 		offset = offset_in_page(buf_desc->cpu_addr);
2077 		nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE;
2078 	} else {
2079 		nents = 1;
2080 	}
2081 
2082 	rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
2083 	if (rc)
2084 		return rc;
2085 
2086 	if (buf_desc->is_vm) {
2087 		/* virtually contiguous buffer */
2088 		for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
2089 			size = min_t(int, PAGE_SIZE - offset, buf_size);
2090 			sg_set_page(sg, vmalloc_to_page(buf), size, offset);
2091 			buf += size / sizeof(*buf);
2092 			buf_size -= size;
2093 			offset = 0;
2094 		}
2095 	} else {
2096 		/* physically contiguous buffer */
2097 		sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
2098 			   buf_desc->cpu_addr, buf_desc->len);
2099 	}
2100 
2101 	/* map sg table to DMA address */
2102 	rc = smc_ib_buf_map_sg(lnk, buf_desc,
2103 			       is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2104 	/* SMC protocol depends on mapping to one DMA address only */
2105 	if (rc != nents) {
2106 		rc = -EAGAIN;
2107 		goto free_table;
2108 	}
2109 
2110 	buf_desc->is_dma_need_sync |=
2111 		smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
2112 
2113 	if (is_rmb || buf_desc->is_vm) {
2114 		/* create a new memory region for the RMB or vzalloced sndbuf */
2115 		access_flags = is_rmb ?
2116 			       IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2117 			       IB_ACCESS_LOCAL_WRITE;
2118 
2119 		rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
2120 					      buf_desc, lnk->link_idx);
2121 		if (rc)
2122 			goto buf_unmap;
2123 		smc_ib_sync_sg_for_device(lnk, buf_desc,
2124 					  is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2125 	}
2126 	buf_desc->is_map_ib[lnk->link_idx] = true;
2127 	return 0;
2128 
2129 buf_unmap:
2130 	smc_ib_buf_unmap_sg(lnk, buf_desc,
2131 			    is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2132 free_table:
2133 	sg_free_table(&buf_desc->sgt[lnk->link_idx]);
2134 	return rc;
2135 }
2136 
2137 /* register a new buf on IB device, rmb or vzalloced sndbuf
2138  * must be called under lgr->llc_conf_mutex lock
2139  */
2140 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
2141 {
2142 	if (list_empty(&link->lgr->list))
2143 		return -ENOLINK;
2144 	if (!buf_desc->is_reg_mr[link->link_idx]) {
2145 		/* register memory region for new buf */
2146 		if (buf_desc->is_vm)
2147 			buf_desc->mr[link->link_idx]->iova =
2148 				(uintptr_t)buf_desc->cpu_addr;
2149 		if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) {
2150 			buf_desc->is_reg_err = true;
2151 			return -EFAULT;
2152 		}
2153 		buf_desc->is_reg_mr[link->link_idx] = true;
2154 	}
2155 	return 0;
2156 }
2157 
2158 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
2159 			     struct list_head *lst, bool is_rmb)
2160 {
2161 	struct smc_buf_desc *buf_desc, *bf;
2162 	int rc = 0;
2163 
2164 	down_write(lock);
2165 	list_for_each_entry_safe(buf_desc, bf, lst, list) {
2166 		if (!buf_desc->used)
2167 			continue;
2168 		rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
2169 		if (rc)
2170 			goto out;
2171 	}
2172 out:
2173 	up_write(lock);
2174 	return rc;
2175 }
2176 
2177 /* map all used buffers of lgr for a new link */
2178 int smcr_buf_map_lgr(struct smc_link *lnk)
2179 {
2180 	struct smc_link_group *lgr = lnk->lgr;
2181 	int i, rc = 0;
2182 
2183 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
2184 		rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
2185 				       &lgr->rmbs[i], true);
2186 		if (rc)
2187 			return rc;
2188 		rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
2189 				       &lgr->sndbufs[i], false);
2190 		if (rc)
2191 			return rc;
2192 	}
2193 	return 0;
2194 }
2195 
2196 /* register all used buffers of lgr for a new link,
2197  * must be called under lgr->llc_conf_mutex lock
2198  */
2199 int smcr_buf_reg_lgr(struct smc_link *lnk)
2200 {
2201 	struct smc_link_group *lgr = lnk->lgr;
2202 	struct smc_buf_desc *buf_desc, *bf;
2203 	int i, rc = 0;
2204 
2205 	/* reg all RMBs for a new link */
2206 	down_write(&lgr->rmbs_lock);
2207 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
2208 		list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
2209 			if (!buf_desc->used)
2210 				continue;
2211 			rc = smcr_link_reg_buf(lnk, buf_desc);
2212 			if (rc) {
2213 				up_write(&lgr->rmbs_lock);
2214 				return rc;
2215 			}
2216 		}
2217 	}
2218 	up_write(&lgr->rmbs_lock);
2219 
2220 	if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2221 		return rc;
2222 
2223 	/* reg all vzalloced sndbufs for a new link */
2224 	down_write(&lgr->sndbufs_lock);
2225 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
2226 		list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
2227 			if (!buf_desc->used || !buf_desc->is_vm)
2228 				continue;
2229 			rc = smcr_link_reg_buf(lnk, buf_desc);
2230 			if (rc) {
2231 				up_write(&lgr->sndbufs_lock);
2232 				return rc;
2233 			}
2234 		}
2235 	}
2236 	up_write(&lgr->sndbufs_lock);
2237 	return rc;
2238 }
2239 
2240 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
2241 						bool is_rmb, int bufsize)
2242 {
2243 	struct smc_buf_desc *buf_desc;
2244 
2245 	/* try to alloc a new buffer */
2246 	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2247 	if (!buf_desc)
2248 		return ERR_PTR(-ENOMEM);
2249 
2250 	switch (lgr->buf_type) {
2251 	case SMCR_PHYS_CONT_BUFS:
2252 	case SMCR_MIXED_BUFS:
2253 		buf_desc->order = get_order(bufsize);
2254 		buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
2255 					      __GFP_NOMEMALLOC | __GFP_COMP |
2256 					      __GFP_NORETRY | __GFP_ZERO,
2257 					      buf_desc->order);
2258 		if (buf_desc->pages) {
2259 			buf_desc->cpu_addr =
2260 				(void *)page_address(buf_desc->pages);
2261 			buf_desc->len = bufsize;
2262 			buf_desc->is_vm = false;
2263 			break;
2264 		}
2265 		if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2266 			goto out;
2267 		fallthrough;	// try virtually continguous buf
2268 	case SMCR_VIRT_CONT_BUFS:
2269 		buf_desc->order = get_order(bufsize);
2270 		buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
2271 		if (!buf_desc->cpu_addr)
2272 			goto out;
2273 		buf_desc->pages = NULL;
2274 		buf_desc->len = bufsize;
2275 		buf_desc->is_vm = true;
2276 		break;
2277 	}
2278 	return buf_desc;
2279 
2280 out:
2281 	kfree(buf_desc);
2282 	return ERR_PTR(-EAGAIN);
2283 }
2284 
2285 /* map buf_desc on all usable links,
2286  * unused buffers stay mapped as long as the link is up
2287  */
2288 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
2289 				     struct smc_buf_desc *buf_desc, bool is_rmb)
2290 {
2291 	int i, rc = 0, cnt = 0;
2292 
2293 	/* protect against parallel link reconfiguration */
2294 	down_read(&lgr->llc_conf_mutex);
2295 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2296 		struct smc_link *lnk = &lgr->lnk[i];
2297 
2298 		if (!smc_link_usable(lnk))
2299 			continue;
2300 		if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
2301 			rc = -ENOMEM;
2302 			goto out;
2303 		}
2304 		cnt++;
2305 	}
2306 out:
2307 	up_read(&lgr->llc_conf_mutex);
2308 	if (!rc && !cnt)
2309 		rc = -EINVAL;
2310 	return rc;
2311 }
2312 
2313 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
2314 						bool is_dmb, int bufsize)
2315 {
2316 	struct smc_buf_desc *buf_desc;
2317 	int rc;
2318 
2319 	/* try to alloc a new DMB */
2320 	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2321 	if (!buf_desc)
2322 		return ERR_PTR(-ENOMEM);
2323 	if (is_dmb) {
2324 		rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
2325 		if (rc) {
2326 			kfree(buf_desc);
2327 			if (rc == -ENOMEM)
2328 				return ERR_PTR(-EAGAIN);
2329 			if (rc == -ENOSPC)
2330 				return ERR_PTR(-ENOSPC);
2331 			return ERR_PTR(-EIO);
2332 		}
2333 		buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
2334 		/* CDC header stored in buf. So, pretend it was smaller */
2335 		buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
2336 	} else {
2337 		buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
2338 					     __GFP_NOWARN | __GFP_NORETRY |
2339 					     __GFP_NOMEMALLOC);
2340 		if (!buf_desc->cpu_addr) {
2341 			kfree(buf_desc);
2342 			return ERR_PTR(-EAGAIN);
2343 		}
2344 		buf_desc->len = bufsize;
2345 	}
2346 	return buf_desc;
2347 }
2348 
2349 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
2350 {
2351 	struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
2352 	struct smc_connection *conn = &smc->conn;
2353 	struct smc_link_group *lgr = conn->lgr;
2354 	struct list_head *buf_list;
2355 	int bufsize, bufsize_comp;
2356 	struct rw_semaphore *lock;	/* lock buffer list */
2357 	bool is_dgraded = false;
2358 
2359 	if (is_rmb)
2360 		/* use socket recv buffer size (w/o overhead) as start value */
2361 		bufsize = smc->sk.sk_rcvbuf / 2;
2362 	else
2363 		/* use socket send buffer size (w/o overhead) as start value */
2364 		bufsize = smc->sk.sk_sndbuf / 2;
2365 
2366 	for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb);
2367 	     bufsize_comp >= 0; bufsize_comp--) {
2368 		if (is_rmb) {
2369 			lock = &lgr->rmbs_lock;
2370 			buf_list = &lgr->rmbs[bufsize_comp];
2371 		} else {
2372 			lock = &lgr->sndbufs_lock;
2373 			buf_list = &lgr->sndbufs[bufsize_comp];
2374 		}
2375 		bufsize = smc_uncompress_bufsize(bufsize_comp);
2376 
2377 		/* check for reusable slot in the link group */
2378 		buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
2379 		if (buf_desc) {
2380 			buf_desc->is_dma_need_sync = 0;
2381 			SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2382 			SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
2383 			break; /* found reusable slot */
2384 		}
2385 
2386 		if (is_smcd)
2387 			buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
2388 		else
2389 			buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
2390 
2391 		if (PTR_ERR(buf_desc) == -ENOMEM)
2392 			break;
2393 		if (IS_ERR(buf_desc)) {
2394 			if (!is_dgraded) {
2395 				is_dgraded = true;
2396 				SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
2397 			}
2398 			continue;
2399 		}
2400 
2401 		SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
2402 		SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2403 		buf_desc->used = 1;
2404 		down_write(lock);
2405 		list_add(&buf_desc->list, buf_list);
2406 		up_write(lock);
2407 		break; /* found */
2408 	}
2409 
2410 	if (IS_ERR(buf_desc))
2411 		return PTR_ERR(buf_desc);
2412 
2413 	if (!is_smcd) {
2414 		if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
2415 			smcr_buf_unuse(buf_desc, is_rmb, lgr);
2416 			return -ENOMEM;
2417 		}
2418 	}
2419 
2420 	if (is_rmb) {
2421 		conn->rmb_desc = buf_desc;
2422 		conn->rmbe_size_comp = bufsize_comp;
2423 		smc->sk.sk_rcvbuf = bufsize * 2;
2424 		atomic_set(&conn->bytes_to_rcv, 0);
2425 		conn->rmbe_update_limit =
2426 			smc_rmb_wnd_update_limit(buf_desc->len);
2427 		if (is_smcd)
2428 			smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
2429 	} else {
2430 		conn->sndbuf_desc = buf_desc;
2431 		smc->sk.sk_sndbuf = bufsize * 2;
2432 		atomic_set(&conn->sndbuf_space, bufsize);
2433 	}
2434 	return 0;
2435 }
2436 
2437 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
2438 {
2439 	if (!conn->sndbuf_desc->is_dma_need_sync)
2440 		return;
2441 	if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd ||
2442 	    !smc_link_active(conn->lnk))
2443 		return;
2444 	smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2445 }
2446 
2447 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
2448 {
2449 	int i;
2450 
2451 	if (!conn->rmb_desc->is_dma_need_sync)
2452 		return;
2453 	if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd)
2454 		return;
2455 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2456 		if (!smc_link_active(&conn->lgr->lnk[i]))
2457 			continue;
2458 		smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2459 				       DMA_FROM_DEVICE);
2460 	}
2461 }
2462 
2463 /* create the send and receive buffer for an SMC socket;
2464  * receive buffers are called RMBs;
2465  * (even though the SMC protocol allows more than one RMB-element per RMB,
2466  * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
2467  * extra RMB for every connection in a link group
2468  */
2469 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
2470 {
2471 	int rc;
2472 
2473 	/* create send buffer */
2474 	if (is_smcd &&
2475 	    smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd))
2476 		goto create_rmb;
2477 
2478 	rc = __smc_buf_create(smc, is_smcd, false);
2479 	if (rc)
2480 		return rc;
2481 
2482 create_rmb:
2483 	/* create rmb */
2484 	rc = __smc_buf_create(smc, is_smcd, true);
2485 	if (rc && smc->conn.sndbuf_desc) {
2486 		down_write(&smc->conn.lgr->sndbufs_lock);
2487 		list_del(&smc->conn.sndbuf_desc->list);
2488 		up_write(&smc->conn.lgr->sndbufs_lock);
2489 		smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
2490 		smc->conn.sndbuf_desc = NULL;
2491 	}
2492 	return rc;
2493 }
2494 
2495 int smcd_buf_attach(struct smc_sock *smc)
2496 {
2497 	struct smc_connection *conn = &smc->conn;
2498 	struct smcd_dev *smcd = conn->lgr->smcd;
2499 	u64 peer_token = conn->peer_token;
2500 	struct smc_buf_desc *buf_desc;
2501 	int rc;
2502 
2503 	buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2504 	if (!buf_desc)
2505 		return -ENOMEM;
2506 
2507 	/* The ghost sndbuf_desc describes the same memory region as
2508 	 * peer RMB. Its lifecycle is consistent with the connection's
2509 	 * and it will be freed with the connections instead of the
2510 	 * link group.
2511 	 */
2512 	rc = smc_ism_attach_dmb(smcd, peer_token, buf_desc);
2513 	if (rc)
2514 		goto free;
2515 
2516 	smc->sk.sk_sndbuf = buf_desc->len;
2517 	buf_desc->cpu_addr =
2518 		(u8 *)buf_desc->cpu_addr + sizeof(struct smcd_cdc_msg);
2519 	buf_desc->len -= sizeof(struct smcd_cdc_msg);
2520 	conn->sndbuf_desc = buf_desc;
2521 	conn->sndbuf_desc->used = 1;
2522 	atomic_set(&conn->sndbuf_space, conn->sndbuf_desc->len);
2523 	return 0;
2524 
2525 free:
2526 	kfree(buf_desc);
2527 	return rc;
2528 }
2529 
2530 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
2531 {
2532 	int i;
2533 
2534 	for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
2535 		if (!test_and_set_bit(i, lgr->rtokens_used_mask))
2536 			return i;
2537 	}
2538 	return -ENOSPC;
2539 }
2540 
2541 static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
2542 				   u32 rkey)
2543 {
2544 	int i;
2545 
2546 	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2547 		if (test_bit(i, lgr->rtokens_used_mask) &&
2548 		    lgr->rtokens[i][lnk_idx].rkey == rkey)
2549 			return i;
2550 	}
2551 	return -ENOENT;
2552 }
2553 
2554 /* set rtoken for a new link to an existing rmb */
2555 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
2556 		    __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
2557 {
2558 	int rtok_idx;
2559 
2560 	rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
2561 	if (rtok_idx == -ENOENT)
2562 		return;
2563 	lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
2564 	lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
2565 }
2566 
2567 /* set rtoken for a new link whose link_id is given */
2568 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
2569 		     __be64 nw_vaddr, __be32 nw_rkey)
2570 {
2571 	u64 dma_addr = be64_to_cpu(nw_vaddr);
2572 	u32 rkey = ntohl(nw_rkey);
2573 	bool found = false;
2574 	int link_idx;
2575 
2576 	for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
2577 		if (lgr->lnk[link_idx].link_id == link_id) {
2578 			found = true;
2579 			break;
2580 		}
2581 	}
2582 	if (!found)
2583 		return;
2584 	lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
2585 	lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
2586 }
2587 
2588 /* add a new rtoken from peer */
2589 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2590 {
2591 	struct smc_link_group *lgr = smc_get_lgr(lnk);
2592 	u64 dma_addr = be64_to_cpu(nw_vaddr);
2593 	u32 rkey = ntohl(nw_rkey);
2594 	int i;
2595 
2596 	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2597 		if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2598 		    lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2599 		    test_bit(i, lgr->rtokens_used_mask)) {
2600 			/* already in list */
2601 			return i;
2602 		}
2603 	}
2604 	i = smc_rmb_reserve_rtoken_idx(lgr);
2605 	if (i < 0)
2606 		return i;
2607 	lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2608 	lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2609 	return i;
2610 }
2611 
2612 /* delete an rtoken from all links */
2613 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2614 {
2615 	struct smc_link_group *lgr = smc_get_lgr(lnk);
2616 	u32 rkey = ntohl(nw_rkey);
2617 	int i, j;
2618 
2619 	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2620 		if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2621 		    test_bit(i, lgr->rtokens_used_mask)) {
2622 			for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
2623 				lgr->rtokens[i][j].rkey = 0;
2624 				lgr->rtokens[i][j].dma_addr = 0;
2625 			}
2626 			clear_bit(i, lgr->rtokens_used_mask);
2627 			return 0;
2628 		}
2629 	}
2630 	return -ENOENT;
2631 }
2632 
2633 /* save rkey and dma_addr received from peer during clc handshake */
2634 int smc_rmb_rtoken_handling(struct smc_connection *conn,
2635 			    struct smc_link *lnk,
2636 			    struct smc_clc_msg_accept_confirm *clc)
2637 {
2638 	conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
2639 					  clc->r0.rmb_rkey);
2640 	if (conn->rtoken_idx < 0)
2641 		return conn->rtoken_idx;
2642 	return 0;
2643 }
2644 
2645 static void smc_core_going_away(void)
2646 {
2647 	struct smc_ib_device *smcibdev;
2648 	struct smcd_dev *smcd;
2649 
2650 	mutex_lock(&smc_ib_devices.mutex);
2651 	list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
2652 		int i;
2653 
2654 		for (i = 0; i < SMC_MAX_PORTS; i++)
2655 			set_bit(i, smcibdev->ports_going_away);
2656 	}
2657 	mutex_unlock(&smc_ib_devices.mutex);
2658 
2659 	mutex_lock(&smcd_dev_list.mutex);
2660 	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2661 		smcd->going_away = 1;
2662 	}
2663 	mutex_unlock(&smcd_dev_list.mutex);
2664 }
2665 
2666 /* Clean up all SMC link groups */
2667 static void smc_lgrs_shutdown(void)
2668 {
2669 	struct smcd_dev *smcd;
2670 
2671 	smc_core_going_away();
2672 
2673 	smc_smcr_terminate_all(NULL);
2674 
2675 	mutex_lock(&smcd_dev_list.mutex);
2676 	list_for_each_entry(smcd, &smcd_dev_list.list, list)
2677 		smc_smcd_terminate_all(smcd);
2678 	mutex_unlock(&smcd_dev_list.mutex);
2679 }
2680 
2681 static int smc_core_reboot_event(struct notifier_block *this,
2682 				 unsigned long event, void *ptr)
2683 {
2684 	smc_lgrs_shutdown();
2685 	smc_ib_unregister_client();
2686 	smc_ism_exit();
2687 	return 0;
2688 }
2689 
2690 static struct notifier_block smc_reboot_notifier = {
2691 	.notifier_call = smc_core_reboot_event,
2692 };
2693 
2694 int __init smc_core_init(void)
2695 {
2696 	return register_reboot_notifier(&smc_reboot_notifier);
2697 }
2698 
2699 /* Called (from smc_exit) when module is removed */
2700 void smc_core_exit(void)
2701 {
2702 	unregister_reboot_notifier(&smc_reboot_notifier);
2703 	smc_lgrs_shutdown();
2704 }
2705