xref: /openbmc/linux/drivers/scsi/elx/libefc/efc_cmds.c (revision caa80275)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 #include "efclib.h"
8 #include "../libefc_sli/sli4.h"
9 #include "efc_cmds.h"
10 #include "efc_sm.h"
11 
12 static void
13 efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
14 {
15 	struct efc *efc = nport->efc;
16 
17 	/* Clear the nport attached flag */
18 	nport->attached = false;
19 
20 	/* Free the service parameters buffer */
21 	if (nport->dma.virt) {
22 		dma_free_coherent(&efc->pci->dev, nport->dma.size,
23 				  nport->dma.virt, nport->dma.phys);
24 		memset(&nport->dma, 0, sizeof(struct efc_dma));
25 	}
26 
27 	/* Free the SLI resources */
28 	sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
29 
30 	efc_nport_cb(efc, evt, nport);
31 }
32 
33 static int
34 efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
35 {
36 	struct efc *efc = nport->efc;
37 	struct sli4_mbox_command_header *hdr =
38 			(struct sli4_mbox_command_header *)mqe;
39 
40 	if (status || le16_to_cpu(hdr->status)) {
41 		efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
42 			      nport->indicator, status, le16_to_cpu(hdr->status));
43 		return -EIO;
44 	}
45 
46 	return 0;
47 }
48 
49 static int
50 efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
51 {
52 	struct efc_nport *nport = arg;
53 	int evt = EFC_EVT_NPORT_FREE_OK;
54 	int rc;
55 
56 	rc = efc_nport_get_mbox_status(nport, mqe, status);
57 	if (rc)
58 		evt = EFC_EVT_NPORT_FREE_FAIL;
59 
60 	efc_nport_free_resources(nport, evt, mqe);
61 	return rc;
62 }
63 
64 static void
65 efc_nport_free_unreg_vpi(struct efc_nport *nport)
66 {
67 	struct efc *efc = nport->efc;
68 	int rc;
69 	u8 data[SLI4_BMBX_SIZE];
70 
71 	rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
72 			       SLI4_UNREG_TYPE_PORT);
73 	if (rc) {
74 		efc_log_err(efc, "UNREG_VPI format failure\n");
75 		efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
76 		return;
77 	}
78 
79 	rc = efc->tt.issue_mbox_rqst(efc->base, data,
80 				     efc_nport_free_unreg_vpi_cb, nport);
81 	if (rc) {
82 		efc_log_err(efc, "UNREG_VPI command failure\n");
83 		efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
84 	}
85 }
86 
87 static void
88 efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
89 {
90 	struct efc *efc = nport->efc;
91 
92 	/* Now inform the registered callbacks */
93 	efc_nport_cb(efc, evt, nport);
94 
95 	/* Set the nport attached flag */
96 	if (evt == EFC_EVT_NPORT_ATTACH_OK)
97 		nport->attached = true;
98 
99 	/* If there is a pending free request, then handle it now */
100 	if (nport->free_req_pending)
101 		efc_nport_free_unreg_vpi(nport);
102 }
103 
104 static int
105 efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
106 {
107 	struct efc_nport *nport = arg;
108 
109 	if (efc_nport_get_mbox_status(nport, mqe, status)) {
110 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
111 		return -EIO;
112 	}
113 
114 	efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
115 	return 0;
116 }
117 
118 static void
119 efc_nport_alloc_init_vpi(struct efc_nport *nport)
120 {
121 	struct efc *efc = nport->efc;
122 	u8 data[SLI4_BMBX_SIZE];
123 	int rc;
124 
125 	/* If there is a pending free request, then handle it now */
126 	if (nport->free_req_pending) {
127 		efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
128 		return;
129 	}
130 
131 	rc = sli_cmd_init_vpi(efc->sli, data,
132 			      nport->indicator, nport->domain->indicator);
133 	if (rc) {
134 		efc_log_err(efc, "INIT_VPI format failure\n");
135 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
136 		return;
137 	}
138 
139 	rc = efc->tt.issue_mbox_rqst(efc->base, data,
140 			efc_nport_alloc_init_vpi_cb, nport);
141 	if (rc) {
142 		efc_log_err(efc, "INIT_VPI command failure\n");
143 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
144 	}
145 }
146 
147 static int
148 efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
149 {
150 	struct efc_nport *nport = arg;
151 	u8 *payload = NULL;
152 
153 	if (efc_nport_get_mbox_status(nport, mqe, status)) {
154 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
155 		return -EIO;
156 	}
157 
158 	payload = nport->dma.virt;
159 
160 	memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
161 	       sizeof(nport->sli_wwpn));
162 	memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
163 	       sizeof(nport->sli_wwnn));
164 
165 	dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
166 			  nport->dma.phys);
167 	memset(&nport->dma, 0, sizeof(struct efc_dma));
168 	efc_nport_alloc_init_vpi(nport);
169 	return 0;
170 }
171 
172 static void
173 efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
174 {
175 	u8 data[SLI4_BMBX_SIZE];
176 	int rc;
177 
178 	/* Allocate memory for the service parameters */
179 	nport->dma.size = EFC_SPARAM_DMA_SZ;
180 	nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
181 					     nport->dma.size, &nport->dma.phys,
182 					     GFP_DMA);
183 	if (!nport->dma.virt) {
184 		efc_log_err(efc, "Failed to allocate DMA memory\n");
185 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
186 		return;
187 	}
188 
189 	rc = sli_cmd_read_sparm64(efc->sli, data,
190 				  &nport->dma, nport->indicator);
191 	if (rc) {
192 		efc_log_err(efc, "READ_SPARM64 format failure\n");
193 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
194 		return;
195 	}
196 
197 	rc = efc->tt.issue_mbox_rqst(efc->base, data,
198 				     efc_nport_alloc_read_sparm64_cb, nport);
199 	if (rc) {
200 		efc_log_err(efc, "READ_SPARM64 command failure\n");
201 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
202 	}
203 }
204 
205 int
206 efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
207 		    struct efc_domain *domain, u8 *wwpn)
208 {
209 	u32 index;
210 
211 	nport->indicator = U32_MAX;
212 	nport->free_req_pending = false;
213 
214 	if (wwpn)
215 		memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
216 
217 	/*
218 	 * allocate a VPI object for the port and stores it in the
219 	 * indicator field of the port object.
220 	 */
221 	if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
222 			       &nport->indicator, &index)) {
223 		efc_log_err(efc, "VPI allocation failure\n");
224 		return -EIO;
225 	}
226 
227 	if (domain) {
228 		/*
229 		 * If the WWPN is NULL, fetch the default
230 		 * WWPN and WWNN before initializing the VPI
231 		 */
232 		if (!wwpn)
233 			efc_nport_alloc_read_sparm64(efc, nport);
234 		else
235 			efc_nport_alloc_init_vpi(nport);
236 	} else if (!wwpn) {
237 		/* domain NULL and wwpn non-NULL */
238 		efc_log_err(efc, "need WWN for physical port\n");
239 		sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
240 		return -EIO;
241 	}
242 
243 	return 0;
244 }
245 
246 static int
247 efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
248 			    void *arg)
249 {
250 	struct efc_nport *nport = arg;
251 
252 	if (efc_nport_get_mbox_status(nport, mqe, status)) {
253 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
254 		return -EIO;
255 	}
256 
257 	efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
258 	return 0;
259 }
260 
261 int
262 efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
263 {
264 	u8 buf[SLI4_BMBX_SIZE];
265 	int rc = 0;
266 
267 	if (!nport) {
268 		efc_log_err(efc, "bad param(s) nport=%p\n", nport);
269 		return -EIO;
270 	}
271 
272 	nport->fc_id = fc_id;
273 
274 	/* register previously-allocated VPI with the device */
275 	rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
276 			     nport->sli_wwpn, nport->indicator,
277 			    nport->domain->indicator, false);
278 	if (rc) {
279 		efc_log_err(efc, "REG_VPI format failure\n");
280 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
281 		return rc;
282 	}
283 
284 	rc = efc->tt.issue_mbox_rqst(efc->base, buf,
285 				     efc_nport_attach_reg_vpi_cb, nport);
286 	if (rc) {
287 		efc_log_err(efc, "REG_VPI command failure\n");
288 		efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
289 	}
290 
291 	return rc;
292 }
293 
294 int
295 efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
296 {
297 	if (!nport) {
298 		efc_log_err(efc, "bad parameter(s) nport=%p\n",	nport);
299 		return -EIO;
300 	}
301 
302 	/* Issue the UNREG_VPI command to free the assigned VPI context */
303 	if (nport->attached)
304 		efc_nport_free_unreg_vpi(nport);
305 	else
306 		nport->free_req_pending = true;
307 
308 	return 0;
309 }
310 
311 static int
312 efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
313 {
314 	struct efc *efc = domain->efc;
315 	struct sli4_mbox_command_header *hdr =
316 			(struct sli4_mbox_command_header *)mqe;
317 
318 	if (status || le16_to_cpu(hdr->status)) {
319 		efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
320 			      domain->indicator, status,
321 			      le16_to_cpu(hdr->status));
322 		return -EIO;
323 	}
324 
325 	return 0;
326 }
327 
328 static void
329 efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
330 {
331 	struct efc *efc = domain->efc;
332 
333 	/* Free the service parameters buffer */
334 	if (domain->dma.virt) {
335 		dma_free_coherent(&efc->pci->dev,
336 				  domain->dma.size, domain->dma.virt,
337 				  domain->dma.phys);
338 		memset(&domain->dma, 0, sizeof(struct efc_dma));
339 	}
340 
341 	/* Free the SLI resources */
342 	sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
343 
344 	efc_domain_cb(efc, evt, domain);
345 }
346 
347 static void
348 efc_domain_send_nport_evt(struct efc_domain *domain,
349 			  int port_evt, int domain_evt, void *data)
350 {
351 	struct efc *efc = domain->efc;
352 
353 	/* Send alloc/attach ok to the physical nport */
354 	efc_nport_send_evt(domain->nport, port_evt, NULL);
355 
356 	/* Now inform the registered callbacks */
357 	efc_domain_cb(efc, domain_evt, domain);
358 }
359 
360 static int
361 efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
362 				 void *arg)
363 {
364 	struct efc_domain *domain = arg;
365 
366 	if (efc_domain_get_mbox_status(domain, mqe, status)) {
367 		efc_domain_free_resources(domain,
368 					  EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
369 		return -EIO;
370 	}
371 
372 	efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
373 				  EFC_HW_DOMAIN_ALLOC_OK, mqe);
374 	return 0;
375 }
376 
377 static void
378 efc_domain_alloc_read_sparm64(struct efc_domain *domain)
379 {
380 	struct efc *efc = domain->efc;
381 	u8 data[SLI4_BMBX_SIZE];
382 	int rc;
383 
384 	rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
385 	if (rc) {
386 		efc_log_err(efc, "READ_SPARM64 format failure\n");
387 		efc_domain_free_resources(domain,
388 					  EFC_HW_DOMAIN_ALLOC_FAIL, data);
389 		return;
390 	}
391 
392 	rc = efc->tt.issue_mbox_rqst(efc->base, data,
393 				     efc_domain_alloc_read_sparm64_cb, domain);
394 	if (rc) {
395 		efc_log_err(efc, "READ_SPARM64 command failure\n");
396 		efc_domain_free_resources(domain,
397 					  EFC_HW_DOMAIN_ALLOC_FAIL, data);
398 	}
399 }
400 
401 static int
402 efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
403 			     void *arg)
404 {
405 	struct efc_domain *domain = arg;
406 
407 	if (efc_domain_get_mbox_status(domain, mqe, status)) {
408 		efc_domain_free_resources(domain,
409 					  EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
410 		return -EIO;
411 	}
412 
413 	efc_domain_alloc_read_sparm64(domain);
414 	return 0;
415 }
416 
417 static void
418 efc_domain_alloc_init_vfi(struct efc_domain *domain)
419 {
420 	struct efc *efc = domain->efc;
421 	struct efc_nport *nport = domain->nport;
422 	u8 data[SLI4_BMBX_SIZE];
423 	int rc;
424 
425 	/*
426 	 * For FC, the HW alread registered an FCFI.
427 	 * Copy FCF information into the domain and jump to INIT_VFI.
428 	 */
429 	domain->fcf_indicator = efc->fcfi;
430 	rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
431 			      domain->fcf_indicator, nport->indicator);
432 	if (rc) {
433 		efc_log_err(efc, "INIT_VFI format failure\n");
434 		efc_domain_free_resources(domain,
435 					  EFC_HW_DOMAIN_ALLOC_FAIL, data);
436 		return;
437 	}
438 
439 	efc_log_err(efc, "%s issue mbox\n", __func__);
440 	rc = efc->tt.issue_mbox_rqst(efc->base, data,
441 				     efc_domain_alloc_init_vfi_cb, domain);
442 	if (rc) {
443 		efc_log_err(efc, "INIT_VFI command failure\n");
444 		efc_domain_free_resources(domain,
445 					  EFC_HW_DOMAIN_ALLOC_FAIL, data);
446 	}
447 }
448 
449 int
450 efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
451 {
452 	u32 index;
453 
454 	if (!domain || !domain->nport) {
455 		efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
456 			    domain, domain ? domain->nport : NULL);
457 		return -EIO;
458 	}
459 
460 	/* allocate memory for the service parameters */
461 	domain->dma.size = EFC_SPARAM_DMA_SZ;
462 	domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
463 					      domain->dma.size,
464 					      &domain->dma.phys, GFP_DMA);
465 	if (!domain->dma.virt) {
466 		efc_log_err(efc, "Failed to allocate DMA memory\n");
467 		return -EIO;
468 	}
469 
470 	domain->fcf = fcf;
471 	domain->fcf_indicator = U32_MAX;
472 	domain->indicator = U32_MAX;
473 
474 	if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
475 			       &index)) {
476 		efc_log_err(efc, "VFI allocation failure\n");
477 
478 		dma_free_coherent(&efc->pci->dev,
479 				  domain->dma.size, domain->dma.virt,
480 				  domain->dma.phys);
481 		memset(&domain->dma, 0, sizeof(struct efc_dma));
482 
483 		return -EIO;
484 	}
485 
486 	efc_domain_alloc_init_vfi(domain);
487 	return 0;
488 }
489 
490 static int
491 efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
492 			     void *arg)
493 {
494 	struct efc_domain *domain = arg;
495 
496 	if (efc_domain_get_mbox_status(domain, mqe, status)) {
497 		efc_domain_free_resources(domain,
498 					  EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
499 		return -EIO;
500 	}
501 
502 	efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
503 				  EFC_HW_DOMAIN_ATTACH_OK, mqe);
504 	return 0;
505 }
506 
507 int
508 efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
509 {
510 	u8 buf[SLI4_BMBX_SIZE];
511 	int rc = 0;
512 
513 	if (!domain) {
514 		efc_log_err(efc, "bad param(s) domain=%p\n", domain);
515 		return -EIO;
516 	}
517 
518 	domain->nport->fc_id = fc_id;
519 
520 	rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
521 			     domain->fcf_indicator, domain->dma,
522 			     domain->nport->indicator, domain->nport->sli_wwpn,
523 			     domain->nport->fc_id);
524 	if (rc) {
525 		efc_log_err(efc, "REG_VFI format failure\n");
526 		goto cleanup;
527 	}
528 
529 	rc = efc->tt.issue_mbox_rqst(efc->base, buf,
530 				     efc_domain_attach_reg_vfi_cb, domain);
531 	if (rc) {
532 		efc_log_err(efc, "REG_VFI command failure\n");
533 		goto cleanup;
534 	}
535 
536 	return rc;
537 
538 cleanup:
539 	efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
540 
541 	return rc;
542 }
543 
544 static int
545 efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
546 {
547 	struct efc_domain *domain = arg;
548 	int evt = EFC_HW_DOMAIN_FREE_OK;
549 	int rc;
550 
551 	rc = efc_domain_get_mbox_status(domain, mqe, status);
552 	if (rc) {
553 		evt = EFC_HW_DOMAIN_FREE_FAIL;
554 		rc = -EIO;
555 	}
556 
557 	efc_domain_free_resources(domain, evt, mqe);
558 	return rc;
559 }
560 
561 static void
562 efc_domain_free_unreg_vfi(struct efc_domain *domain)
563 {
564 	struct efc *efc = domain->efc;
565 	int rc;
566 	u8 data[SLI4_BMBX_SIZE];
567 
568 	rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
569 			       SLI4_UNREG_TYPE_DOMAIN);
570 	if (rc) {
571 		efc_log_err(efc, "UNREG_VFI format failure\n");
572 		goto cleanup;
573 	}
574 
575 	rc = efc->tt.issue_mbox_rqst(efc->base, data,
576 				     efc_domain_free_unreg_vfi_cb, domain);
577 	if (rc) {
578 		efc_log_err(efc, "UNREG_VFI command failure\n");
579 		goto cleanup;
580 	}
581 
582 	return;
583 
584 cleanup:
585 	efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
586 }
587 
588 int
589 efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
590 {
591 	if (!domain) {
592 		efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
593 		return -EIO;
594 	}
595 
596 	efc_domain_free_unreg_vfi(domain);
597 	return 0;
598 }
599 
600 int
601 efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
602 		   struct efc_nport *nport)
603 {
604 	/* Check for invalid indicator */
605 	if (rnode->indicator != U32_MAX) {
606 		efc_log_err(efc,
607 			    "RPI allocation failure addr=%#x rpi=%#x\n",
608 			    fc_addr, rnode->indicator);
609 		return -EIO;
610 	}
611 
612 	/* NULL SLI port indicates an unallocated remote node */
613 	rnode->nport = NULL;
614 
615 	if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
616 			       &rnode->indicator, &rnode->index)) {
617 		efc_log_err(efc, "RPI allocation failure addr=%#x\n",
618 			    fc_addr);
619 		return -EIO;
620 	}
621 
622 	rnode->fc_id = fc_addr;
623 	rnode->nport = nport;
624 
625 	return 0;
626 }
627 
628 static int
629 efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
630 {
631 	struct efc_remote_node *rnode = arg;
632 	struct sli4_mbox_command_header *hdr =
633 				(struct sli4_mbox_command_header *)mqe;
634 	int evt = 0;
635 
636 	if (status || le16_to_cpu(hdr->status)) {
637 		efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
638 			      le16_to_cpu(hdr->status));
639 		rnode->attached = false;
640 		evt = EFC_EVT_NODE_ATTACH_FAIL;
641 	} else {
642 		rnode->attached = true;
643 		evt = EFC_EVT_NODE_ATTACH_OK;
644 	}
645 
646 	efc_remote_node_cb(efc, evt, rnode);
647 
648 	return 0;
649 }
650 
651 int
652 efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
653 		    struct efc_dma *sparms)
654 {
655 	int rc = -EIO;
656 	u8 buf[SLI4_BMBX_SIZE];
657 
658 	if (!rnode || !sparms) {
659 		efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
660 			    rnode, sparms);
661 		return -EIO;
662 	}
663 
664 	/*
665 	 * If the attach count is non-zero, this RPI has already been reg'd.
666 	 * Otherwise, register the RPI
667 	 */
668 	if (rnode->index == U32_MAX) {
669 		efc_log_err(efc, "bad parameter rnode->index invalid\n");
670 		return -EIO;
671 	}
672 
673 	/* Update a remote node object with the remote port's service params */
674 	if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
675 			     rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
676 		rc = efc->tt.issue_mbox_rqst(efc->base, buf,
677 					     efc_cmd_node_attach_cb, rnode);
678 
679 	return rc;
680 }
681 
682 int
683 efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
684 {
685 	int rc = 0;
686 
687 	if (!rnode) {
688 		efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
689 		return -EIO;
690 	}
691 
692 	if (rnode->nport) {
693 		if (rnode->attached) {
694 			efc_log_err(efc, "rnode is still attached\n");
695 			return -EIO;
696 		}
697 		if (rnode->indicator != U32_MAX) {
698 			if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
699 					      rnode->indicator)) {
700 				efc_log_err(efc,
701 					    "RPI free fail RPI %d addr=%#x\n",
702 					    rnode->indicator, rnode->fc_id);
703 				rc = -EIO;
704 			} else {
705 				rnode->indicator = U32_MAX;
706 				rnode->index = U32_MAX;
707 			}
708 		}
709 	}
710 
711 	return rc;
712 }
713 
714 static int
715 efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
716 {
717 	struct efc_remote_node *rnode = arg;
718 	struct sli4_mbox_command_header *hdr =
719 				(struct sli4_mbox_command_header *)mqe;
720 	int evt = EFC_EVT_NODE_FREE_FAIL;
721 	int rc = 0;
722 
723 	if (status || le16_to_cpu(hdr->status)) {
724 		efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
725 			      le16_to_cpu(hdr->status));
726 
727 		/*
728 		 * In certain cases, a non-zero MQE status is OK (all must be
729 		 * true):
730 		 *   - node is attached
731 		 *   - status is 0x1400
732 		 */
733 		if (!rnode->attached ||
734 		    (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
735 			rc = -EIO;
736 	}
737 
738 	if (!rc) {
739 		rnode->attached = false;
740 		evt = EFC_EVT_NODE_FREE_OK;
741 	}
742 
743 	efc_remote_node_cb(efc, evt, rnode);
744 
745 	return rc;
746 }
747 
748 int
749 efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
750 {
751 	u8 buf[SLI4_BMBX_SIZE];
752 	int rc = -EIO;
753 
754 	if (!rnode) {
755 		efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
756 		return -EIO;
757 	}
758 
759 	if (rnode->nport) {
760 		if (!rnode->attached)
761 			return -EIO;
762 
763 		rc = -EIO;
764 
765 		if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
766 				       SLI4_RSRC_RPI, U32_MAX))
767 			rc = efc->tt.issue_mbox_rqst(efc->base, buf,
768 					efc_cmd_node_free_cb, rnode);
769 
770 		if (rc != 0) {
771 			efc_log_err(efc, "UNREG_RPI failed\n");
772 			rc = -EIO;
773 		}
774 	}
775 
776 	return rc;
777 }
778