xref: /openbmc/linux/net/smc/smc_ib.c (revision 74be2d3b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  IB infrastructure:
6  *  Establish SMC-R as an Infiniband Client to be notified about added and
7  *  removed IB devices of type RDMA.
8  *  Determine device and port characteristics for these IB devices.
9  *
10  *  Copyright IBM Corp. 2016
11  *
12  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
13  */
14 
15 #include <linux/random.h>
16 #include <linux/workqueue.h>
17 #include <linux/scatterlist.h>
18 #include <linux/wait.h>
19 #include <rdma/ib_verbs.h>
20 #include <rdma/ib_cache.h>
21 
22 #include "smc_pnet.h"
23 #include "smc_ib.h"
24 #include "smc_core.h"
25 #include "smc_wr.h"
26 #include "smc.h"
27 
28 #define SMC_MAX_CQE 32766	/* max. # of completion queue elements */
29 
30 #define SMC_QP_MIN_RNR_TIMER		5
31 #define SMC_QP_TIMEOUT			15 /* 4096 * 2 ** timeout usec */
32 #define SMC_QP_RETRY_CNT			7 /* 7: infinite */
33 #define SMC_QP_RNR_RETRY			7 /* 7: infinite */
34 
35 struct smc_ib_devices smc_ib_devices = {	/* smc-registered ib devices */
36 	.lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
37 	.list = LIST_HEAD_INIT(smc_ib_devices.list),
38 };
39 
40 u8 local_systemid[SMC_SYSTEMID_LEN];		/* unique system identifier */
41 
42 static int smc_ib_modify_qp_init(struct smc_link *lnk)
43 {
44 	struct ib_qp_attr qp_attr;
45 
46 	memset(&qp_attr, 0, sizeof(qp_attr));
47 	qp_attr.qp_state = IB_QPS_INIT;
48 	qp_attr.pkey_index = 0;
49 	qp_attr.port_num = lnk->ibport;
50 	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
51 				| IB_ACCESS_REMOTE_WRITE;
52 	return ib_modify_qp(lnk->roce_qp, &qp_attr,
53 			    IB_QP_STATE | IB_QP_PKEY_INDEX |
54 			    IB_QP_ACCESS_FLAGS | IB_QP_PORT);
55 }
56 
57 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
58 {
59 	enum ib_qp_attr_mask qp_attr_mask =
60 		IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
61 		IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
62 	struct ib_qp_attr qp_attr;
63 
64 	memset(&qp_attr, 0, sizeof(qp_attr));
65 	qp_attr.qp_state = IB_QPS_RTR;
66 	qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
67 	qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
68 	rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
69 	rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
70 	rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
71 	memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
72 	       sizeof(lnk->peer_mac));
73 	qp_attr.dest_qp_num = lnk->peer_qpn;
74 	qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
75 	qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
76 					 * requests
77 					 */
78 	qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
79 
80 	return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
81 }
82 
83 int smc_ib_modify_qp_rts(struct smc_link *lnk)
84 {
85 	struct ib_qp_attr qp_attr;
86 
87 	memset(&qp_attr, 0, sizeof(qp_attr));
88 	qp_attr.qp_state = IB_QPS_RTS;
89 	qp_attr.timeout = SMC_QP_TIMEOUT;	/* local ack timeout */
90 	qp_attr.retry_cnt = SMC_QP_RETRY_CNT;	/* retry count */
91 	qp_attr.rnr_retry = SMC_QP_RNR_RETRY;	/* RNR retries, 7=infinite */
92 	qp_attr.sq_psn = lnk->psn_initial;	/* starting send packet seq # */
93 	qp_attr.max_rd_atomic = 1;	/* # of outstanding RDMA reads and
94 					 * atomic ops allowed
95 					 */
96 	return ib_modify_qp(lnk->roce_qp, &qp_attr,
97 			    IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
98 			    IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
99 			    IB_QP_MAX_QP_RD_ATOMIC);
100 }
101 
102 int smc_ib_modify_qp_reset(struct smc_link *lnk)
103 {
104 	struct ib_qp_attr qp_attr;
105 
106 	memset(&qp_attr, 0, sizeof(qp_attr));
107 	qp_attr.qp_state = IB_QPS_RESET;
108 	return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
109 }
110 
111 int smc_ib_ready_link(struct smc_link *lnk)
112 {
113 	struct smc_link_group *lgr = smc_get_lgr(lnk);
114 	int rc = 0;
115 
116 	rc = smc_ib_modify_qp_init(lnk);
117 	if (rc)
118 		goto out;
119 
120 	rc = smc_ib_modify_qp_rtr(lnk);
121 	if (rc)
122 		goto out;
123 	smc_wr_remember_qp_attr(lnk);
124 	rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
125 			      IB_CQ_SOLICITED_MASK);
126 	if (rc)
127 		goto out;
128 	rc = smc_wr_rx_post_init(lnk);
129 	if (rc)
130 		goto out;
131 	smc_wr_remember_qp_attr(lnk);
132 
133 	if (lgr->role == SMC_SERV) {
134 		rc = smc_ib_modify_qp_rts(lnk);
135 		if (rc)
136 			goto out;
137 		smc_wr_remember_qp_attr(lnk);
138 	}
139 out:
140 	return rc;
141 }
142 
143 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
144 {
145 	const struct ib_gid_attr *attr;
146 	int rc;
147 
148 	attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
149 	if (IS_ERR(attr))
150 		return -ENODEV;
151 
152 	rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
153 	rdma_put_gid_attr(attr);
154 	return rc;
155 }
156 
157 /* Create an identifier unique for this instance of SMC-R.
158  * The MAC-address of the first active registered IB device
159  * plus a random 2-byte number is used to create this identifier.
160  * This name is delivered to the peer during connection initialization.
161  */
162 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
163 						u8 ibport)
164 {
165 	memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
166 	       sizeof(smcibdev->mac[ibport - 1]));
167 }
168 
169 bool smc_ib_is_valid_local_systemid(void)
170 {
171 	return !is_zero_ether_addr(&local_systemid[2]);
172 }
173 
174 static void smc_ib_init_local_systemid(void)
175 {
176 	get_random_bytes(&local_systemid[0], 2);
177 }
178 
179 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
180 {
181 	return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
182 }
183 
184 /* determine the gid for an ib-device port and vlan id */
185 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
186 			 unsigned short vlan_id, u8 gid[], u8 *sgid_index)
187 {
188 	const struct ib_gid_attr *attr;
189 	const struct net_device *ndev;
190 	int i;
191 
192 	for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
193 		attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
194 		if (IS_ERR(attr))
195 			continue;
196 
197 		rcu_read_lock();
198 		ndev = rdma_read_gid_attr_ndev_rcu(attr);
199 		if (!IS_ERR(ndev) &&
200 		    ((!vlan_id && !is_vlan_dev(attr->ndev)) ||
201 		     (vlan_id && is_vlan_dev(attr->ndev) &&
202 		      vlan_dev_vlan_id(attr->ndev) == vlan_id)) &&
203 		    attr->gid_type == IB_GID_TYPE_ROCE) {
204 			rcu_read_unlock();
205 			if (gid)
206 				memcpy(gid, &attr->gid, SMC_GID_SIZE);
207 			if (sgid_index)
208 				*sgid_index = attr->index;
209 			rdma_put_gid_attr(attr);
210 			return 0;
211 		}
212 		rcu_read_unlock();
213 		rdma_put_gid_attr(attr);
214 	}
215 	return -ENODEV;
216 }
217 
218 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
219 {
220 	int rc;
221 
222 	memset(&smcibdev->pattr[ibport - 1], 0,
223 	       sizeof(smcibdev->pattr[ibport - 1]));
224 	rc = ib_query_port(smcibdev->ibdev, ibport,
225 			   &smcibdev->pattr[ibport - 1]);
226 	if (rc)
227 		goto out;
228 	/* the SMC protocol requires specification of the RoCE MAC address */
229 	rc = smc_ib_fill_mac(smcibdev, ibport);
230 	if (rc)
231 		goto out;
232 	if (!smc_ib_is_valid_local_systemid() &&
233 	    smc_ib_port_active(smcibdev, ibport))
234 		/* create unique system identifier */
235 		smc_ib_define_local_systemid(smcibdev, ibport);
236 out:
237 	return rc;
238 }
239 
240 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
241 static void smc_ib_port_event_work(struct work_struct *work)
242 {
243 	struct smc_ib_device *smcibdev = container_of(
244 		work, struct smc_ib_device, port_event_work);
245 	u8 port_idx;
246 
247 	for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
248 		smc_ib_remember_port_attr(smcibdev, port_idx + 1);
249 		clear_bit(port_idx, &smcibdev->port_event_mask);
250 		if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
251 			set_bit(port_idx, smcibdev->ports_going_away);
252 			smcr_port_err(smcibdev, port_idx + 1);
253 		} else {
254 			clear_bit(port_idx, smcibdev->ports_going_away);
255 			smcr_port_add(smcibdev, port_idx + 1);
256 		}
257 	}
258 }
259 
260 /* can be called in IRQ context */
261 static void smc_ib_global_event_handler(struct ib_event_handler *handler,
262 					struct ib_event *ibevent)
263 {
264 	struct smc_ib_device *smcibdev;
265 	bool schedule = false;
266 	u8 port_idx;
267 
268 	smcibdev = container_of(handler, struct smc_ib_device, event_handler);
269 
270 	switch (ibevent->event) {
271 	case IB_EVENT_DEVICE_FATAL:
272 		/* terminate all ports on device */
273 		for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
274 			set_bit(port_idx, &smcibdev->port_event_mask);
275 			if (!test_and_set_bit(port_idx,
276 					      smcibdev->ports_going_away))
277 				schedule = true;
278 		}
279 		if (schedule)
280 			schedule_work(&smcibdev->port_event_work);
281 		break;
282 	case IB_EVENT_PORT_ACTIVE:
283 		port_idx = ibevent->element.port_num - 1;
284 		if (port_idx >= SMC_MAX_PORTS)
285 			break;
286 		set_bit(port_idx, &smcibdev->port_event_mask);
287 		if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
288 			schedule_work(&smcibdev->port_event_work);
289 		break;
290 	case IB_EVENT_PORT_ERR:
291 		port_idx = ibevent->element.port_num - 1;
292 		if (port_idx >= SMC_MAX_PORTS)
293 			break;
294 		set_bit(port_idx, &smcibdev->port_event_mask);
295 		if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
296 			schedule_work(&smcibdev->port_event_work);
297 		break;
298 	case IB_EVENT_GID_CHANGE:
299 		port_idx = ibevent->element.port_num - 1;
300 		if (port_idx >= SMC_MAX_PORTS)
301 			break;
302 		set_bit(port_idx, &smcibdev->port_event_mask);
303 		schedule_work(&smcibdev->port_event_work);
304 		break;
305 	default:
306 		break;
307 	}
308 }
309 
310 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
311 {
312 	if (lnk->roce_pd)
313 		ib_dealloc_pd(lnk->roce_pd);
314 	lnk->roce_pd = NULL;
315 }
316 
317 int smc_ib_create_protection_domain(struct smc_link *lnk)
318 {
319 	int rc;
320 
321 	lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
322 	rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
323 	if (IS_ERR(lnk->roce_pd))
324 		lnk->roce_pd = NULL;
325 	return rc;
326 }
327 
328 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
329 {
330 	struct smc_link *lnk = (struct smc_link *)priv;
331 	struct smc_ib_device *smcibdev = lnk->smcibdev;
332 	u8 port_idx;
333 
334 	switch (ibevent->event) {
335 	case IB_EVENT_QP_FATAL:
336 	case IB_EVENT_QP_ACCESS_ERR:
337 		port_idx = ibevent->element.qp->port - 1;
338 		if (port_idx >= SMC_MAX_PORTS)
339 			break;
340 		set_bit(port_idx, &smcibdev->port_event_mask);
341 		if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
342 			schedule_work(&smcibdev->port_event_work);
343 		break;
344 	default:
345 		break;
346 	}
347 }
348 
349 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
350 {
351 	if (lnk->roce_qp)
352 		ib_destroy_qp(lnk->roce_qp);
353 	lnk->roce_qp = NULL;
354 }
355 
356 /* create a queue pair within the protection domain for a link */
357 int smc_ib_create_queue_pair(struct smc_link *lnk)
358 {
359 	struct ib_qp_init_attr qp_attr = {
360 		.event_handler = smc_ib_qp_event_handler,
361 		.qp_context = lnk,
362 		.send_cq = lnk->smcibdev->roce_cq_send,
363 		.recv_cq = lnk->smcibdev->roce_cq_recv,
364 		.srq = NULL,
365 		.cap = {
366 				/* include unsolicited rdma_writes as well,
367 				 * there are max. 2 RDMA_WRITE per 1 WR_SEND
368 				 */
369 			.max_send_wr = SMC_WR_BUF_CNT * 3,
370 			.max_recv_wr = SMC_WR_BUF_CNT * 3,
371 			.max_send_sge = SMC_IB_MAX_SEND_SGE,
372 			.max_recv_sge = 1,
373 		},
374 		.sq_sig_type = IB_SIGNAL_REQ_WR,
375 		.qp_type = IB_QPT_RC,
376 	};
377 	int rc;
378 
379 	lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
380 	rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
381 	if (IS_ERR(lnk->roce_qp))
382 		lnk->roce_qp = NULL;
383 	else
384 		smc_wr_remember_qp_attr(lnk);
385 	return rc;
386 }
387 
388 void smc_ib_put_memory_region(struct ib_mr *mr)
389 {
390 	ib_dereg_mr(mr);
391 }
392 
393 static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
394 {
395 	unsigned int offset = 0;
396 	int sg_num;
397 
398 	/* map the largest prefix of a dma mapped SG list */
399 	sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
400 			      buf_slot->sgt[link_idx].sgl,
401 			      buf_slot->sgt[link_idx].orig_nents,
402 			      &offset, PAGE_SIZE);
403 
404 	return sg_num;
405 }
406 
407 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
408 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
409 			     struct smc_buf_desc *buf_slot, u8 link_idx)
410 {
411 	if (buf_slot->mr_rx[link_idx])
412 		return 0; /* already done */
413 
414 	buf_slot->mr_rx[link_idx] =
415 		ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
416 	if (IS_ERR(buf_slot->mr_rx[link_idx])) {
417 		int rc;
418 
419 		rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
420 		buf_slot->mr_rx[link_idx] = NULL;
421 		return rc;
422 	}
423 
424 	if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
425 		return -EINVAL;
426 
427 	return 0;
428 }
429 
430 /* synchronize buffer usage for cpu access */
431 void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
432 			    struct smc_buf_desc *buf_slot,
433 			    enum dma_data_direction data_direction)
434 {
435 	struct scatterlist *sg;
436 	unsigned int i;
437 
438 	/* for now there is just one DMA address */
439 	for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
440 		    buf_slot->sgt[lnk->link_idx].nents, i) {
441 		if (!sg_dma_len(sg))
442 			break;
443 		ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
444 					   sg_dma_address(sg),
445 					   sg_dma_len(sg),
446 					   data_direction);
447 	}
448 }
449 
450 /* synchronize buffer usage for device access */
451 void smc_ib_sync_sg_for_device(struct smc_link *lnk,
452 			       struct smc_buf_desc *buf_slot,
453 			       enum dma_data_direction data_direction)
454 {
455 	struct scatterlist *sg;
456 	unsigned int i;
457 
458 	/* for now there is just one DMA address */
459 	for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
460 		    buf_slot->sgt[lnk->link_idx].nents, i) {
461 		if (!sg_dma_len(sg))
462 			break;
463 		ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
464 					      sg_dma_address(sg),
465 					      sg_dma_len(sg),
466 					      data_direction);
467 	}
468 }
469 
470 /* Map a new TX or RX buffer SG-table to DMA */
471 int smc_ib_buf_map_sg(struct smc_link *lnk,
472 		      struct smc_buf_desc *buf_slot,
473 		      enum dma_data_direction data_direction)
474 {
475 	int mapped_nents;
476 
477 	mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
478 				     buf_slot->sgt[lnk->link_idx].sgl,
479 				     buf_slot->sgt[lnk->link_idx].orig_nents,
480 				     data_direction);
481 	if (!mapped_nents)
482 		return -ENOMEM;
483 
484 	return mapped_nents;
485 }
486 
487 void smc_ib_buf_unmap_sg(struct smc_link *lnk,
488 			 struct smc_buf_desc *buf_slot,
489 			 enum dma_data_direction data_direction)
490 {
491 	if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
492 		return; /* already unmapped */
493 
494 	ib_dma_unmap_sg(lnk->smcibdev->ibdev,
495 			buf_slot->sgt[lnk->link_idx].sgl,
496 			buf_slot->sgt[lnk->link_idx].orig_nents,
497 			data_direction);
498 	buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;
499 }
500 
501 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
502 {
503 	struct ib_cq_init_attr cqattr =	{
504 		.cqe = SMC_MAX_CQE, .comp_vector = 0 };
505 	int cqe_size_order, smc_order;
506 	long rc;
507 
508 	/* the calculated number of cq entries fits to mlx5 cq allocation */
509 	cqe_size_order = cache_line_size() == 128 ? 7 : 6;
510 	smc_order = MAX_ORDER - cqe_size_order - 1;
511 	if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
512 		cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
513 	smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
514 					      smc_wr_tx_cq_handler, NULL,
515 					      smcibdev, &cqattr);
516 	rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
517 	if (IS_ERR(smcibdev->roce_cq_send)) {
518 		smcibdev->roce_cq_send = NULL;
519 		return rc;
520 	}
521 	smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
522 					      smc_wr_rx_cq_handler, NULL,
523 					      smcibdev, &cqattr);
524 	rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
525 	if (IS_ERR(smcibdev->roce_cq_recv)) {
526 		smcibdev->roce_cq_recv = NULL;
527 		goto err;
528 	}
529 	smc_wr_add_dev(smcibdev);
530 	smcibdev->initialized = 1;
531 	return rc;
532 
533 err:
534 	ib_destroy_cq(smcibdev->roce_cq_send);
535 	return rc;
536 }
537 
538 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
539 {
540 	if (!smcibdev->initialized)
541 		return;
542 	smcibdev->initialized = 0;
543 	ib_destroy_cq(smcibdev->roce_cq_recv);
544 	ib_destroy_cq(smcibdev->roce_cq_send);
545 	smc_wr_remove_dev(smcibdev);
546 }
547 
548 static struct ib_client smc_ib_client;
549 
550 /* callback function for ib_register_client() */
551 static int smc_ib_add_dev(struct ib_device *ibdev)
552 {
553 	struct smc_ib_device *smcibdev;
554 	u8 port_cnt;
555 	int i;
556 
557 	if (ibdev->node_type != RDMA_NODE_IB_CA)
558 		return -EOPNOTSUPP;
559 
560 	smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
561 	if (!smcibdev)
562 		return -ENOMEM;
563 
564 	smcibdev->ibdev = ibdev;
565 	INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
566 	atomic_set(&smcibdev->lnk_cnt, 0);
567 	init_waitqueue_head(&smcibdev->lnks_deleted);
568 	spin_lock(&smc_ib_devices.lock);
569 	list_add_tail(&smcibdev->list, &smc_ib_devices.list);
570 	spin_unlock(&smc_ib_devices.lock);
571 	ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
572 	INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
573 			      smc_ib_global_event_handler);
574 	ib_register_event_handler(&smcibdev->event_handler);
575 
576 	/* trigger reading of the port attributes */
577 	port_cnt = smcibdev->ibdev->phys_port_cnt;
578 	pr_warn_ratelimited("smc: adding ib device %s with port count %d\n",
579 			    smcibdev->ibdev->name, port_cnt);
580 	for (i = 0;
581 	     i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
582 	     i++) {
583 		set_bit(i, &smcibdev->port_event_mask);
584 		/* determine pnetids of the port */
585 		if (smc_pnetid_by_dev_port(ibdev->dev.parent, i,
586 					   smcibdev->pnetid[i]))
587 			smc_pnetid_by_table_ib(smcibdev, i + 1);
588 		pr_warn_ratelimited("smc:    ib device %s port %d has pnetid "
589 				    "%.16s%s\n",
590 				    smcibdev->ibdev->name, i + 1,
591 				    smcibdev->pnetid[i],
592 				    smcibdev->pnetid_by_user[i] ?
593 				     " (user defined)" :
594 				     "");
595 	}
596 	schedule_work(&smcibdev->port_event_work);
597 	return 0;
598 }
599 
600 /* callback function for ib_unregister_client() */
601 static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
602 {
603 	struct smc_ib_device *smcibdev = client_data;
604 
605 	spin_lock(&smc_ib_devices.lock);
606 	list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
607 	spin_unlock(&smc_ib_devices.lock);
608 	pr_warn_ratelimited("smc: removing ib device %s\n",
609 			    smcibdev->ibdev->name);
610 	smc_smcr_terminate_all(smcibdev);
611 	smc_ib_cleanup_per_ibdev(smcibdev);
612 	ib_unregister_event_handler(&smcibdev->event_handler);
613 	cancel_work_sync(&smcibdev->port_event_work);
614 	kfree(smcibdev);
615 }
616 
617 static struct ib_client smc_ib_client = {
618 	.name	= "smc_ib",
619 	.add	= smc_ib_add_dev,
620 	.remove = smc_ib_remove_dev,
621 };
622 
623 int __init smc_ib_register_client(void)
624 {
625 	smc_ib_init_local_systemid();
626 	return ib_register_client(&smc_ib_client);
627 }
628 
629 void smc_ib_unregister_client(void)
630 {
631 	ib_unregister_client(&smc_ib_client);
632 }
633