1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/kthread.h>
30 #include <linux/pci.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/signal.h>
34
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc_scsi.h"
47 #include "lpfc.h"
48 #include "lpfc_logmsg.h"
49 #include "lpfc_crtn.h"
50 #include "lpfc_version.h"
51 #include "lpfc_vport.h"
52
lpfc_vport_set_state(struct lpfc_vport * vport,enum fc_vport_state new_state)53 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
54 enum fc_vport_state new_state)
55 {
56 struct fc_vport *fc_vport = vport->fc_vport;
57
58 if (fc_vport) {
59 /*
60 * When the transport defines fc_vport_set state we will replace
61 * this code with the following line
62 */
63 /* fc_vport_set_state(fc_vport, new_state); */
64 if (new_state != FC_VPORT_INITIALIZING)
65 fc_vport->vport_last_state = fc_vport->vport_state;
66 fc_vport->vport_state = new_state;
67 }
68
69 /* for all the error states we will set the invternal state to FAILED */
70 switch (new_state) {
71 case FC_VPORT_NO_FABRIC_SUPP:
72 case FC_VPORT_NO_FABRIC_RSCS:
73 case FC_VPORT_FABRIC_LOGOUT:
74 case FC_VPORT_FABRIC_REJ_WWN:
75 case FC_VPORT_FAILED:
76 vport->port_state = LPFC_VPORT_FAILED;
77 break;
78 case FC_VPORT_LINKDOWN:
79 vport->port_state = LPFC_VPORT_UNKNOWN;
80 break;
81 default:
82 /* do nothing */
83 break;
84 }
85 }
86
87 int
lpfc_alloc_vpi(struct lpfc_hba * phba)88 lpfc_alloc_vpi(struct lpfc_hba *phba)
89 {
90 unsigned long vpi;
91
92 spin_lock_irq(&phba->hbalock);
93 /* Start at bit 1 because vpi zero is reserved for the physical port */
94 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
95 if (vpi > phba->max_vpi)
96 vpi = 0;
97 else
98 set_bit(vpi, phba->vpi_bmask);
99 if (phba->sli_rev == LPFC_SLI_REV4)
100 phba->sli4_hba.max_cfg_param.vpi_used++;
101 spin_unlock_irq(&phba->hbalock);
102 return vpi;
103 }
104
105 static void
lpfc_free_vpi(struct lpfc_hba * phba,int vpi)106 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
107 {
108 if (vpi == 0)
109 return;
110 spin_lock_irq(&phba->hbalock);
111 clear_bit(vpi, phba->vpi_bmask);
112 if (phba->sli_rev == LPFC_SLI_REV4)
113 phba->sli4_hba.max_cfg_param.vpi_used--;
114 spin_unlock_irq(&phba->hbalock);
115 }
116
117 static int
lpfc_vport_sparm(struct lpfc_hba * phba,struct lpfc_vport * vport)118 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
119 {
120 LPFC_MBOXQ_t *pmb;
121 MAILBOX_t *mb;
122 struct lpfc_dmabuf *mp;
123 int rc;
124
125 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
126 if (!pmb) {
127 return -ENOMEM;
128 }
129 mb = &pmb->u.mb;
130
131 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
132 if (rc) {
133 mempool_free(pmb, phba->mbox_mem_pool);
134 return -ENOMEM;
135 }
136
137 /*
138 * Wait for the read_sparams mailbox to complete. Driver needs
139 * this per vport to start the FDISC. If the mailbox fails,
140 * just cleanup and return an error unless the failure is a
141 * mailbox timeout. For MBX_TIMEOUT, allow the default
142 * mbox completion handler to take care of the cleanup. This
143 * is safe as the mailbox command isn't one that triggers
144 * another mailbox.
145 */
146 pmb->vport = vport;
147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
148 if (rc != MBX_SUCCESS) {
149 if (signal_pending(current)) {
150 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
151 "1830 Signal aborted mbxCmd x%x\n",
152 mb->mbxCommand);
153 if (rc != MBX_TIMEOUT)
154 lpfc_mbox_rsrc_cleanup(phba, pmb,
155 MBOX_THD_UNLOCKED);
156 return -EINTR;
157 } else {
158 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
159 "1818 VPort failed init, mbxCmd x%x "
160 "READ_SPARM mbxStatus x%x, rc = x%x\n",
161 mb->mbxCommand, mb->mbxStatus, rc);
162 if (rc != MBX_TIMEOUT)
163 lpfc_mbox_rsrc_cleanup(phba, pmb,
164 MBOX_THD_UNLOCKED);
165 return -EIO;
166 }
167 }
168
169 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
170 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
171 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
172 sizeof (struct lpfc_name));
173 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
174 sizeof (struct lpfc_name));
175 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
176 return 0;
177 }
178
179 static int
lpfc_valid_wwn_format(struct lpfc_hba * phba,struct lpfc_name * wwn,const char * name_type)180 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
181 const char *name_type)
182 {
183 /* ensure that IEEE format 1 addresses
184 * contain zeros in bits 59-48
185 */
186 if (!((wwn->u.wwn[0] >> 4) == 1 &&
187 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
188 return 1;
189
190 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
191 "1822 Invalid %s: %02x:%02x:%02x:%02x:"
192 "%02x:%02x:%02x:%02x\n",
193 name_type,
194 wwn->u.wwn[0], wwn->u.wwn[1],
195 wwn->u.wwn[2], wwn->u.wwn[3],
196 wwn->u.wwn[4], wwn->u.wwn[5],
197 wwn->u.wwn[6], wwn->u.wwn[7]);
198 return 0;
199 }
200
201 static int
lpfc_unique_wwpn(struct lpfc_hba * phba,struct lpfc_vport * new_vport)202 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
203 {
204 struct lpfc_vport *vport;
205 unsigned long flags;
206
207 spin_lock_irqsave(&phba->port_list_lock, flags);
208 list_for_each_entry(vport, &phba->port_list, listentry) {
209 if (vport == new_vport)
210 continue;
211 /* If they match, return not unique */
212 if (memcmp(&vport->fc_sparam.portName,
213 &new_vport->fc_sparam.portName,
214 sizeof(struct lpfc_name)) == 0) {
215 spin_unlock_irqrestore(&phba->port_list_lock, flags);
216 return 0;
217 }
218 }
219 spin_unlock_irqrestore(&phba->port_list_lock, flags);
220 return 1;
221 }
222
223 /**
224 * lpfc_discovery_wait - Wait for driver discovery to quiesce
225 * @vport: The virtual port for which this call is being executed.
226 *
227 * This driver calls this routine specifically from lpfc_vport_delete
228 * to enforce a synchronous execution of vport
229 * delete relative to discovery activities. The
230 * lpfc_vport_delete routine should not return until it
231 * can reasonably guarantee that discovery has quiesced.
232 * Post FDISC LOGO, the driver must wait until its SAN teardown is
233 * complete and all resources recovered before allowing
234 * cleanup.
235 *
236 * This routine does not require any locks held.
237 **/
lpfc_discovery_wait(struct lpfc_vport * vport)238 static void lpfc_discovery_wait(struct lpfc_vport *vport)
239 {
240 struct lpfc_hba *phba = vport->phba;
241 uint32_t wait_flags = 0;
242 unsigned long wait_time_max;
243 unsigned long start_time;
244
245 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
246 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
247
248 /*
249 * The time constraint on this loop is a balance between the
250 * fabric RA_TOV value and dev_loss tmo. The driver's
251 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
252 */
253 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
254 wait_time_max += jiffies;
255 start_time = jiffies;
256 while (time_before(jiffies, wait_time_max)) {
257 if ((vport->num_disc_nodes > 0) ||
258 (vport->fc_flag & wait_flags) ||
259 ((vport->port_state > LPFC_VPORT_FAILED) &&
260 (vport->port_state < LPFC_VPORT_READY))) {
261 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
262 "1833 Vport discovery quiesce Wait:"
263 " state x%x fc_flags x%x"
264 " num_nodes x%x, waiting 1000 msecs"
265 " total wait msecs x%x\n",
266 vport->port_state, vport->fc_flag,
267 vport->num_disc_nodes,
268 jiffies_to_msecs(jiffies - start_time));
269 msleep(1000);
270 } else {
271 /* Base case. Wait variants satisfied. Break out */
272 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
273 "1834 Vport discovery quiesced:"
274 " state x%x fc_flags x%x"
275 " wait msecs x%x\n",
276 vport->port_state, vport->fc_flag,
277 jiffies_to_msecs(jiffies
278 - start_time));
279 break;
280 }
281 }
282
283 if (time_after(jiffies, wait_time_max))
284 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
285 "1835 Vport discovery quiesce failed:"
286 " state x%x fc_flags x%x wait msecs x%x\n",
287 vport->port_state, vport->fc_flag,
288 jiffies_to_msecs(jiffies - start_time));
289 }
290
291 int
lpfc_vport_create(struct fc_vport * fc_vport,bool disable)292 lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
293 {
294 struct lpfc_nodelist *ndlp;
295 struct Scsi_Host *shost = fc_vport->shost;
296 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
297 struct lpfc_hba *phba = pport->phba;
298 struct lpfc_vport *vport = NULL;
299 int instance;
300 int vpi;
301 int rc = VPORT_ERROR;
302 int status;
303
304 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
306 "1808 Create VPORT failed: "
307 "NPIV is not enabled: SLImode:%d\n",
308 phba->sli_rev);
309 rc = VPORT_INVAL;
310 goto error_out;
311 }
312
313 /* NPIV is not supported if HBA has NVME Target enabled */
314 if (phba->nvmet_support) {
315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
316 "3189 Create VPORT failed: "
317 "NPIV is not supported on NVME Target\n");
318 rc = VPORT_INVAL;
319 goto error_out;
320 }
321
322 vpi = lpfc_alloc_vpi(phba);
323 if (vpi == 0) {
324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
325 "1809 Create VPORT failed: "
326 "Max VPORTs (%d) exceeded\n",
327 phba->max_vpi);
328 rc = VPORT_NORESOURCES;
329 goto error_out;
330 }
331
332 /* Assign an unused board number */
333 if ((instance = lpfc_get_instance()) < 0) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
335 "1810 Create VPORT failed: Cannot get "
336 "instance number\n");
337 lpfc_free_vpi(phba, vpi);
338 rc = VPORT_NORESOURCES;
339 goto error_out;
340 }
341
342 vport = lpfc_create_port(phba, instance, &fc_vport->dev);
343 if (!vport) {
344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
345 "1811 Create VPORT failed: vpi x%x\n", vpi);
346 lpfc_free_vpi(phba, vpi);
347 rc = VPORT_NORESOURCES;
348 goto error_out;
349 }
350
351 vport->vpi = vpi;
352 lpfc_debugfs_initialize(vport);
353
354 if ((status = lpfc_vport_sparm(phba, vport))) {
355 if (status == -EINTR) {
356 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
357 "1831 Create VPORT Interrupted.\n");
358 rc = VPORT_ERROR;
359 } else {
360 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
361 "1813 Create VPORT failed. "
362 "Cannot get sparam\n");
363 rc = VPORT_NORESOURCES;
364 }
365 lpfc_free_vpi(phba, vpi);
366 destroy_port(vport);
367 goto error_out;
368 }
369
370 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
371 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
372
373 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
374 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
375
376 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
377 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
378 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
379 "1821 Create VPORT failed. "
380 "Invalid WWN format\n");
381 lpfc_free_vpi(phba, vpi);
382 destroy_port(vport);
383 rc = VPORT_INVAL;
384 goto error_out;
385 }
386
387 if (!lpfc_unique_wwpn(phba, vport)) {
388 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
389 "1823 Create VPORT failed. "
390 "Duplicate WWN on HBA\n");
391 lpfc_free_vpi(phba, vpi);
392 destroy_port(vport);
393 rc = VPORT_INVAL;
394 goto error_out;
395 }
396
397 /* Create binary sysfs attribute for vport */
398 lpfc_alloc_sysfs_attr(vport);
399
400 /* Set the DFT_LUN_Q_DEPTH accordingly */
401 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
402
403 /* Only the physical port can support NVME for now */
404 vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
405
406 *(struct lpfc_vport **)fc_vport->dd_data = vport;
407 vport->fc_vport = fc_vport;
408
409 /* At this point we are fully registered with SCSI Layer. */
410 vport->load_flag |= FC_ALLOW_FDMI;
411 if (phba->cfg_enable_SmartSAN ||
412 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
413 /* Setup appropriate attribute masks */
414 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
415 vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
416 }
417
418 /*
419 * In SLI4, the vpi must be activated before it can be used
420 * by the port.
421 */
422 if ((phba->sli_rev == LPFC_SLI_REV4) &&
423 (pport->fc_flag & FC_VFI_REGISTERED)) {
424 rc = lpfc_sli4_init_vpi(vport);
425 if (rc) {
426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
427 "1838 Failed to INIT_VPI on vpi %d "
428 "status %d\n", vpi, rc);
429 rc = VPORT_NORESOURCES;
430 lpfc_free_vpi(phba, vpi);
431 goto error_out;
432 }
433 } else if (phba->sli_rev == LPFC_SLI_REV4) {
434 /*
435 * Driver cannot INIT_VPI now. Set the flags to
436 * init_vpi when reg_vfi complete.
437 */
438 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
439 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
440 rc = VPORT_OK;
441 goto out;
442 }
443
444 if ((phba->link_state < LPFC_LINK_UP) ||
445 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
446 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
447 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
448 rc = VPORT_OK;
449 goto out;
450 }
451
452 if (disable) {
453 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
454 rc = VPORT_OK;
455 goto out;
456 }
457
458 /* Use the Physical nodes Fabric NDLP to determine if the link is
459 * up and ready to FDISC.
460 */
461 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
462 if (ndlp &&
463 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
464 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
465 lpfc_set_disctmo(vport);
466 lpfc_initial_fdisc(vport);
467 } else {
468 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
469 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
470 "0262 No NPIV Fabric support\n");
471 }
472 } else {
473 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
474 }
475 rc = VPORT_OK;
476
477 out:
478 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
479 "1825 Vport Created.\n");
480 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
481 error_out:
482 return rc;
483 }
484
485 static int
lpfc_send_npiv_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)486 lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
487 {
488 int rc;
489 struct lpfc_hba *phba = vport->phba;
490
491 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
492
493 spin_lock_irq(&ndlp->lock);
494 if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
495 !ndlp->logo_waitq) {
496 ndlp->logo_waitq = &waitq;
497 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
498 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
499 ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
500 }
501 spin_unlock_irq(&ndlp->lock);
502 rc = lpfc_issue_els_npiv_logo(vport, ndlp);
503 if (!rc) {
504 wait_event_timeout(waitq,
505 (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
506 msecs_to_jiffies(phba->fc_ratov * 2000));
507
508 if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
509 goto logo_cmpl;
510 /* LOGO wait failed. Correct status. */
511 rc = -EINTR;
512 } else {
513 rc = -EIO;
514 }
515
516 /* Error - clean up node flags. */
517 spin_lock_irq(&ndlp->lock);
518 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
519 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
520 spin_unlock_irq(&ndlp->lock);
521
522 logo_cmpl:
523 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
524 "1824 Issue LOGO completes with status %d\n",
525 rc);
526 spin_lock_irq(&ndlp->lock);
527 ndlp->logo_waitq = NULL;
528 spin_unlock_irq(&ndlp->lock);
529 return rc;
530 }
531
532 static int
disable_vport(struct fc_vport * fc_vport)533 disable_vport(struct fc_vport *fc_vport)
534 {
535 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
536 struct lpfc_hba *phba = vport->phba;
537 struct lpfc_nodelist *ndlp = NULL;
538 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
539
540 /* Can't disable during an outstanding delete. */
541 if (vport->load_flag & FC_UNLOADING)
542 return 0;
543
544 ndlp = lpfc_findnode_did(vport, Fabric_DID);
545 if (ndlp && phba->link_state >= LPFC_LINK_UP)
546 (void)lpfc_send_npiv_logo(vport, ndlp);
547
548 lpfc_sli_host_down(vport);
549 lpfc_cleanup_rpis(vport, 0);
550
551 lpfc_stop_vport_timers(vport);
552 lpfc_unreg_all_rpis(vport);
553 lpfc_unreg_default_rpis(vport);
554 /*
555 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
556 * scsi_host_put() to release the vport.
557 */
558 lpfc_mbx_unreg_vpi(vport);
559 if (phba->sli_rev == LPFC_SLI_REV4) {
560 spin_lock_irq(shost->host_lock);
561 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
562 spin_unlock_irq(shost->host_lock);
563 }
564
565 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
566 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
567 "1826 Vport Disabled.\n");
568 return VPORT_OK;
569 }
570
571 static int
enable_vport(struct fc_vport * fc_vport)572 enable_vport(struct fc_vport *fc_vport)
573 {
574 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
575 struct lpfc_hba *phba = vport->phba;
576 struct lpfc_nodelist *ndlp = NULL;
577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
578
579 if ((phba->link_state < LPFC_LINK_UP) ||
580 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
581 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
582 return VPORT_OK;
583 }
584
585 spin_lock_irq(shost->host_lock);
586 vport->load_flag |= FC_LOADING;
587 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
588 spin_unlock_irq(shost->host_lock);
589 lpfc_issue_init_vpi(vport);
590 goto out;
591 }
592
593 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
594 spin_unlock_irq(shost->host_lock);
595
596 /* Use the Physical nodes Fabric NDLP to determine if the link is
597 * up and ready to FDISC.
598 */
599 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
600 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
601 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
602 lpfc_set_disctmo(vport);
603 lpfc_initial_fdisc(vport);
604 } else {
605 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
606 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
607 "0264 No NPIV Fabric support\n");
608 }
609 } else {
610 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
611 }
612
613 out:
614 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
615 "1827 Vport Enabled.\n");
616 return VPORT_OK;
617 }
618
619 int
lpfc_vport_disable(struct fc_vport * fc_vport,bool disable)620 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
621 {
622 if (disable)
623 return disable_vport(fc_vport);
624 else
625 return enable_vport(fc_vport);
626 }
627
628 int
lpfc_vport_delete(struct fc_vport * fc_vport)629 lpfc_vport_delete(struct fc_vport *fc_vport)
630 {
631 struct lpfc_nodelist *ndlp = NULL;
632 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
633 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
634 struct lpfc_hba *phba = vport->phba;
635 int rc;
636 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
637
638 if (vport->port_type == LPFC_PHYSICAL_PORT) {
639 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
640 "1812 vport_delete failed: Cannot delete "
641 "physical host\n");
642 return VPORT_ERROR;
643 }
644
645 /* If the vport is a static vport fail the deletion. */
646 if ((vport->vport_flag & STATIC_VPORT) &&
647 !(phba->pport->load_flag & FC_UNLOADING)) {
648 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
649 "1837 vport_delete failed: Cannot delete "
650 "static vport.\n");
651 return VPORT_ERROR;
652 }
653
654 spin_lock_irq(&phba->hbalock);
655 vport->load_flag |= FC_UNLOADING;
656 spin_unlock_irq(&phba->hbalock);
657
658 /*
659 * If we are not unloading the driver then prevent the vport_delete
660 * from happening until after this vport's discovery is finished.
661 */
662 if (!(phba->pport->load_flag & FC_UNLOADING)) {
663 int check_count = 0;
664 while (check_count < ((phba->fc_ratov * 3) + 3) &&
665 vport->port_state > LPFC_VPORT_FAILED &&
666 vport->port_state < LPFC_VPORT_READY) {
667 check_count++;
668 msleep(1000);
669 }
670 if (vport->port_state > LPFC_VPORT_FAILED &&
671 vport->port_state < LPFC_VPORT_READY)
672 return -EAGAIN;
673 }
674
675 /*
676 * Take early refcount for outstanding I/O requests we schedule during
677 * delete processing for unreg_vpi. Always keep this before
678 * scsi_remove_host() as we can no longer obtain a reference through
679 * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
680 */
681 if (!scsi_host_get(shost))
682 return VPORT_INVAL;
683
684 lpfc_free_sysfs_attr(vport);
685 lpfc_debugfs_terminate(vport);
686
687 /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
688 ndlp = lpfc_findnode_did(vport, Fabric_DID);
689 if (!ndlp)
690 goto skip_logo;
691
692 /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */
693 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
694 phba->link_state >= LPFC_LINK_UP &&
695 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
696 if (vport->cfg_enable_da_id) {
697 /* Send DA_ID and wait for a completion. This is best
698 * effort. If the DA_ID fails, likely the fabric will
699 * "leak" NportIDs but at least the driver issued the
700 * command.
701 */
702 ndlp = lpfc_findnode_did(vport, NameServer_DID);
703 if (!ndlp)
704 goto issue_logo;
705
706 spin_lock_irq(&ndlp->lock);
707 ndlp->da_id_waitq = &waitq;
708 ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
709 spin_unlock_irq(&ndlp->lock);
710
711 rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
712 if (!rc) {
713 wait_event_timeout(waitq,
714 !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
715 msecs_to_jiffies(phba->fc_ratov * 2000));
716 }
717
718 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
719 "1829 DA_ID issue status %d. "
720 "SFlag x%x NState x%x, NFlag x%x "
721 "Rpi x%x\n",
722 rc, ndlp->save_flags, ndlp->nlp_state,
723 ndlp->nlp_flag, ndlp->nlp_rpi);
724
725 /* Remove the waitq and save_flags. It no
726 * longer matters if the wake happened.
727 */
728 spin_lock_irq(&ndlp->lock);
729 ndlp->da_id_waitq = NULL;
730 ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
731 spin_unlock_irq(&ndlp->lock);
732 }
733
734 issue_logo:
735 /*
736 * If the vpi is not registered, then a valid FDISC doesn't
737 * exist and there is no need for a ELS LOGO. Just cleanup
738 * the ndlp.
739 */
740 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
741 goto skip_logo;
742
743 /* Issue a Fabric LOGO to cleanup fabric resources. */
744 ndlp = lpfc_findnode_did(vport, Fabric_DID);
745 if (!ndlp)
746 goto skip_logo;
747
748 rc = lpfc_send_npiv_logo(vport, ndlp);
749 if (rc)
750 goto skip_logo;
751 }
752
753 if (!(phba->pport->load_flag & FC_UNLOADING))
754 lpfc_discovery_wait(vport);
755
756 skip_logo:
757
758 /* Remove FC host to break driver binding. */
759 fc_remove_host(shost);
760 scsi_remove_host(shost);
761
762 lpfc_cleanup(vport);
763
764 /* Remove scsi host now. The nodes are cleaned up. */
765 lpfc_sli_host_down(vport);
766 lpfc_stop_vport_timers(vport);
767
768 if (!(phba->pport->load_flag & FC_UNLOADING)) {
769 lpfc_unreg_all_rpis(vport);
770 lpfc_unreg_default_rpis(vport);
771 /*
772 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
773 * does the scsi_host_put() to release the vport.
774 */
775 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
776 lpfc_mbx_unreg_vpi(vport))
777 scsi_host_put(shost);
778 } else {
779 scsi_host_put(shost);
780 }
781
782 lpfc_free_vpi(phba, vport->vpi);
783 vport->work_port_events = 0;
784 spin_lock_irq(&phba->port_list_lock);
785 list_del_init(&vport->listentry);
786 spin_unlock_irq(&phba->port_list_lock);
787 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
788 "1828 Vport Deleted.\n");
789 scsi_host_put(shost);
790 return VPORT_OK;
791 }
792
793 struct lpfc_vport **
lpfc_create_vport_work_array(struct lpfc_hba * phba)794 lpfc_create_vport_work_array(struct lpfc_hba *phba)
795 {
796 struct lpfc_vport *port_iterator;
797 struct lpfc_vport **vports;
798 int index = 0;
799 vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *),
800 GFP_KERNEL);
801 if (vports == NULL)
802 return NULL;
803 spin_lock_irq(&phba->port_list_lock);
804 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
805 if (port_iterator->load_flag & FC_UNLOADING)
806 continue;
807 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
808 lpfc_printf_vlog(port_iterator, KERN_ERR,
809 LOG_TRACE_EVENT,
810 "1801 Create vport work array FAILED: "
811 "cannot do scsi_host_get\n");
812 continue;
813 }
814 vports[index++] = port_iterator;
815 }
816 spin_unlock_irq(&phba->port_list_lock);
817 return vports;
818 }
819
820 void
lpfc_destroy_vport_work_array(struct lpfc_hba * phba,struct lpfc_vport ** vports)821 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
822 {
823 int i;
824 if (vports == NULL)
825 return;
826 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
827 scsi_host_put(lpfc_shost_from_vport(vports[i]));
828 kfree(vports);
829 }
830
831