xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_attr.c (revision 2d68bb26)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
16 
17 /* SYSFS attributes --------------------------------------------------------- */
18 
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 			   struct bin_attribute *bin_attr,
22 			   char *buf, loff_t off, size_t count)
23 {
24 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 	    struct device, kobj)));
26 	struct qla_hw_data *ha = vha->hw;
27 	int rval = 0;
28 
29 	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 		return 0;
31 
32 	mutex_lock(&ha->optrom_mutex);
33 	if (IS_P3P_TYPE(ha)) {
34 		if (off < ha->md_template_size) {
35 			rval = memory_read_from_buffer(buf, count,
36 			    &off, ha->md_tmplt_hdr, ha->md_template_size);
37 		} else {
38 			off -= ha->md_template_size;
39 			rval = memory_read_from_buffer(buf, count,
40 			    &off, ha->md_dump, ha->md_dump_size);
41 		}
42 	} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 		rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 		    MCTP_DUMP_SIZE);
45 	} else if (ha->fw_dump_reading) {
46 		rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 					ha->fw_dump_len);
48 	} else {
49 		rval = 0;
50 	}
51 	mutex_unlock(&ha->optrom_mutex);
52 	return rval;
53 }
54 
55 static ssize_t
56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
57 			    struct bin_attribute *bin_attr,
58 			    char *buf, loff_t off, size_t count)
59 {
60 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
61 	    struct device, kobj)));
62 	struct qla_hw_data *ha = vha->hw;
63 	int reading;
64 
65 	if (off != 0)
66 		return (0);
67 
68 	reading = simple_strtol(buf, NULL, 10);
69 	switch (reading) {
70 	case 0:
71 		if (!ha->fw_dump_reading)
72 			break;
73 
74 		ql_log(ql_log_info, vha, 0x705d,
75 		    "Firmware dump cleared on (%ld).\n", vha->host_no);
76 
77 		if (IS_P3P_TYPE(ha)) {
78 			qla82xx_md_free(vha);
79 			qla82xx_md_prep(vha);
80 		}
81 		ha->fw_dump_reading = 0;
82 		ha->fw_dumped = 0;
83 		break;
84 	case 1:
85 		if (ha->fw_dumped && !ha->fw_dump_reading) {
86 			ha->fw_dump_reading = 1;
87 
88 			ql_log(ql_log_info, vha, 0x705e,
89 			    "Raw firmware dump ready for read on (%ld).\n",
90 			    vha->host_no);
91 		}
92 		break;
93 	case 2:
94 		qla2x00_alloc_fw_dump(vha);
95 		break;
96 	case 3:
97 		if (IS_QLA82XX(ha)) {
98 			qla82xx_idc_lock(ha);
99 			qla82xx_set_reset_owner(vha);
100 			qla82xx_idc_unlock(ha);
101 		} else if (IS_QLA8044(ha)) {
102 			qla8044_idc_lock(ha);
103 			qla82xx_set_reset_owner(vha);
104 			qla8044_idc_unlock(ha);
105 		} else {
106 			ha->fw_dump_mpi = 1;
107 			qla2x00_system_error(vha);
108 		}
109 		break;
110 	case 4:
111 		if (IS_P3P_TYPE(ha)) {
112 			if (ha->md_tmplt_hdr)
113 				ql_dbg(ql_dbg_user, vha, 0x705b,
114 				    "MiniDump supported with this firmware.\n");
115 			else
116 				ql_dbg(ql_dbg_user, vha, 0x709d,
117 				    "MiniDump not supported with this firmware.\n");
118 		}
119 		break;
120 	case 5:
121 		if (IS_P3P_TYPE(ha))
122 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
123 		break;
124 	case 6:
125 		if (!ha->mctp_dump_reading)
126 			break;
127 		ql_log(ql_log_info, vha, 0x70c1,
128 		    "MCTP dump cleared on (%ld).\n", vha->host_no);
129 		ha->mctp_dump_reading = 0;
130 		ha->mctp_dumped = 0;
131 		break;
132 	case 7:
133 		if (ha->mctp_dumped && !ha->mctp_dump_reading) {
134 			ha->mctp_dump_reading = 1;
135 			ql_log(ql_log_info, vha, 0x70c2,
136 			    "Raw mctp dump ready for read on (%ld).\n",
137 			    vha->host_no);
138 		}
139 		break;
140 	}
141 	return count;
142 }
143 
144 static struct bin_attribute sysfs_fw_dump_attr = {
145 	.attr = {
146 		.name = "fw_dump",
147 		.mode = S_IRUSR | S_IWUSR,
148 	},
149 	.size = 0,
150 	.read = qla2x00_sysfs_read_fw_dump,
151 	.write = qla2x00_sysfs_write_fw_dump,
152 };
153 
154 static ssize_t
155 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
156 			 struct bin_attribute *bin_attr,
157 			 char *buf, loff_t off, size_t count)
158 {
159 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
160 	    struct device, kobj)));
161 	struct qla_hw_data *ha = vha->hw;
162 	uint32_t faddr;
163 	struct active_regions active_regions = { };
164 
165 	if (!capable(CAP_SYS_ADMIN))
166 		return 0;
167 
168 	mutex_lock(&ha->optrom_mutex);
169 	if (qla2x00_chip_is_down(vha)) {
170 		mutex_unlock(&ha->optrom_mutex);
171 		return -EAGAIN;
172 	}
173 
174 	if (!IS_NOCACHE_VPD_TYPE(ha)) {
175 		mutex_unlock(&ha->optrom_mutex);
176 		goto skip;
177 	}
178 
179 	faddr = ha->flt_region_nvram;
180 	if (IS_QLA28XX(ha)) {
181 		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
182 			faddr = ha->flt_region_nvram_sec;
183 	}
184 	ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
185 
186 	mutex_unlock(&ha->optrom_mutex);
187 
188 skip:
189 	return memory_read_from_buffer(buf, count, &off, ha->nvram,
190 					ha->nvram_size);
191 }
192 
193 static ssize_t
194 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
195 			  struct bin_attribute *bin_attr,
196 			  char *buf, loff_t off, size_t count)
197 {
198 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
199 	    struct device, kobj)));
200 	struct qla_hw_data *ha = vha->hw;
201 	uint16_t	cnt;
202 
203 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
204 	    !ha->isp_ops->write_nvram)
205 		return -EINVAL;
206 
207 	/* Checksum NVRAM. */
208 	if (IS_FWI2_CAPABLE(ha)) {
209 		uint32_t *iter;
210 		uint32_t chksum;
211 
212 		iter = (uint32_t *)buf;
213 		chksum = 0;
214 		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
215 			chksum += le32_to_cpu(*iter);
216 		chksum = ~chksum + 1;
217 		*iter = cpu_to_le32(chksum);
218 	} else {
219 		uint8_t *iter;
220 		uint8_t chksum;
221 
222 		iter = (uint8_t *)buf;
223 		chksum = 0;
224 		for (cnt = 0; cnt < count - 1; cnt++)
225 			chksum += *iter++;
226 		chksum = ~chksum + 1;
227 		*iter = chksum;
228 	}
229 
230 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
231 		ql_log(ql_log_warn, vha, 0x705f,
232 		    "HBA not online, failing NVRAM update.\n");
233 		return -EAGAIN;
234 	}
235 
236 	mutex_lock(&ha->optrom_mutex);
237 	if (qla2x00_chip_is_down(vha)) {
238 		mutex_unlock(&ha->optrom_mutex);
239 		return -EAGAIN;
240 	}
241 
242 	/* Write NVRAM. */
243 	ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
244 	ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
245 	    count);
246 	mutex_unlock(&ha->optrom_mutex);
247 
248 	ql_dbg(ql_dbg_user, vha, 0x7060,
249 	    "Setting ISP_ABORT_NEEDED\n");
250 	/* NVRAM settings take effect immediately. */
251 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
252 	qla2xxx_wake_dpc(vha);
253 	qla2x00_wait_for_chip_reset(vha);
254 
255 	return count;
256 }
257 
258 static struct bin_attribute sysfs_nvram_attr = {
259 	.attr = {
260 		.name = "nvram",
261 		.mode = S_IRUSR | S_IWUSR,
262 	},
263 	.size = 512,
264 	.read = qla2x00_sysfs_read_nvram,
265 	.write = qla2x00_sysfs_write_nvram,
266 };
267 
268 static ssize_t
269 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
270 			  struct bin_attribute *bin_attr,
271 			  char *buf, loff_t off, size_t count)
272 {
273 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
274 	    struct device, kobj)));
275 	struct qla_hw_data *ha = vha->hw;
276 	ssize_t rval = 0;
277 
278 	mutex_lock(&ha->optrom_mutex);
279 
280 	if (ha->optrom_state != QLA_SREADING)
281 		goto out;
282 
283 	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
284 	    ha->optrom_region_size);
285 
286 out:
287 	mutex_unlock(&ha->optrom_mutex);
288 
289 	return rval;
290 }
291 
292 static ssize_t
293 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
294 			   struct bin_attribute *bin_attr,
295 			   char *buf, loff_t off, size_t count)
296 {
297 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
298 	    struct device, kobj)));
299 	struct qla_hw_data *ha = vha->hw;
300 
301 	mutex_lock(&ha->optrom_mutex);
302 
303 	if (ha->optrom_state != QLA_SWRITING) {
304 		mutex_unlock(&ha->optrom_mutex);
305 		return -EINVAL;
306 	}
307 	if (off > ha->optrom_region_size) {
308 		mutex_unlock(&ha->optrom_mutex);
309 		return -ERANGE;
310 	}
311 	if (off + count > ha->optrom_region_size)
312 		count = ha->optrom_region_size - off;
313 
314 	memcpy(&ha->optrom_buffer[off], buf, count);
315 	mutex_unlock(&ha->optrom_mutex);
316 
317 	return count;
318 }
319 
320 static struct bin_attribute sysfs_optrom_attr = {
321 	.attr = {
322 		.name = "optrom",
323 		.mode = S_IRUSR | S_IWUSR,
324 	},
325 	.size = 0,
326 	.read = qla2x00_sysfs_read_optrom,
327 	.write = qla2x00_sysfs_write_optrom,
328 };
329 
330 static ssize_t
331 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
332 			       struct bin_attribute *bin_attr,
333 			       char *buf, loff_t off, size_t count)
334 {
335 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
336 	    struct device, kobj)));
337 	struct qla_hw_data *ha = vha->hw;
338 	uint32_t start = 0;
339 	uint32_t size = ha->optrom_size;
340 	int val, valid;
341 	ssize_t rval = count;
342 
343 	if (off)
344 		return -EINVAL;
345 
346 	if (unlikely(pci_channel_offline(ha->pdev)))
347 		return -EAGAIN;
348 
349 	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
350 		return -EINVAL;
351 	if (start > ha->optrom_size)
352 		return -EINVAL;
353 	if (size > ha->optrom_size - start)
354 		size = ha->optrom_size - start;
355 
356 	mutex_lock(&ha->optrom_mutex);
357 	if (qla2x00_chip_is_down(vha)) {
358 		mutex_unlock(&ha->optrom_mutex);
359 		return -EAGAIN;
360 	}
361 	switch (val) {
362 	case 0:
363 		if (ha->optrom_state != QLA_SREADING &&
364 		    ha->optrom_state != QLA_SWRITING) {
365 			rval =  -EINVAL;
366 			goto out;
367 		}
368 		ha->optrom_state = QLA_SWAITING;
369 
370 		ql_dbg(ql_dbg_user, vha, 0x7061,
371 		    "Freeing flash region allocation -- 0x%x bytes.\n",
372 		    ha->optrom_region_size);
373 
374 		vfree(ha->optrom_buffer);
375 		ha->optrom_buffer = NULL;
376 		break;
377 	case 1:
378 		if (ha->optrom_state != QLA_SWAITING) {
379 			rval = -EINVAL;
380 			goto out;
381 		}
382 
383 		ha->optrom_region_start = start;
384 		ha->optrom_region_size = size;
385 
386 		ha->optrom_state = QLA_SREADING;
387 		ha->optrom_buffer = vzalloc(ha->optrom_region_size);
388 		if (ha->optrom_buffer == NULL) {
389 			ql_log(ql_log_warn, vha, 0x7062,
390 			    "Unable to allocate memory for optrom retrieval "
391 			    "(%x).\n", ha->optrom_region_size);
392 
393 			ha->optrom_state = QLA_SWAITING;
394 			rval = -ENOMEM;
395 			goto out;
396 		}
397 
398 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
399 			ql_log(ql_log_warn, vha, 0x7063,
400 			    "HBA not online, failing NVRAM update.\n");
401 			rval = -EAGAIN;
402 			goto out;
403 		}
404 
405 		ql_dbg(ql_dbg_user, vha, 0x7064,
406 		    "Reading flash region -- 0x%x/0x%x.\n",
407 		    ha->optrom_region_start, ha->optrom_region_size);
408 
409 		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
410 		    ha->optrom_region_start, ha->optrom_region_size);
411 		break;
412 	case 2:
413 		if (ha->optrom_state != QLA_SWAITING) {
414 			rval = -EINVAL;
415 			goto out;
416 		}
417 
418 		/*
419 		 * We need to be more restrictive on which FLASH regions are
420 		 * allowed to be updated via user-space.  Regions accessible
421 		 * via this method include:
422 		 *
423 		 * ISP21xx/ISP22xx/ISP23xx type boards:
424 		 *
425 		 * 	0x000000 -> 0x020000 -- Boot code.
426 		 *
427 		 * ISP2322/ISP24xx type boards:
428 		 *
429 		 * 	0x000000 -> 0x07ffff -- Boot code.
430 		 * 	0x080000 -> 0x0fffff -- Firmware.
431 		 *
432 		 * ISP25xx type boards:
433 		 *
434 		 * 	0x000000 -> 0x07ffff -- Boot code.
435 		 * 	0x080000 -> 0x0fffff -- Firmware.
436 		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
437 		 *
438 		 * > ISP25xx type boards:
439 		 *
440 		 *      None -- should go through BSG.
441 		 */
442 		valid = 0;
443 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
444 			valid = 1;
445 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
446 			valid = 1;
447 		if (!valid) {
448 			ql_log(ql_log_warn, vha, 0x7065,
449 			    "Invalid start region 0x%x/0x%x.\n", start, size);
450 			rval = -EINVAL;
451 			goto out;
452 		}
453 
454 		ha->optrom_region_start = start;
455 		ha->optrom_region_size = size;
456 
457 		ha->optrom_state = QLA_SWRITING;
458 		ha->optrom_buffer = vzalloc(ha->optrom_region_size);
459 		if (ha->optrom_buffer == NULL) {
460 			ql_log(ql_log_warn, vha, 0x7066,
461 			    "Unable to allocate memory for optrom update "
462 			    "(%x)\n", ha->optrom_region_size);
463 
464 			ha->optrom_state = QLA_SWAITING;
465 			rval = -ENOMEM;
466 			goto out;
467 		}
468 
469 		ql_dbg(ql_dbg_user, vha, 0x7067,
470 		    "Staging flash region write -- 0x%x/0x%x.\n",
471 		    ha->optrom_region_start, ha->optrom_region_size);
472 
473 		break;
474 	case 3:
475 		if (ha->optrom_state != QLA_SWRITING) {
476 			rval = -EINVAL;
477 			goto out;
478 		}
479 
480 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
481 			ql_log(ql_log_warn, vha, 0x7068,
482 			    "HBA not online, failing flash update.\n");
483 			rval = -EAGAIN;
484 			goto out;
485 		}
486 
487 		ql_dbg(ql_dbg_user, vha, 0x7069,
488 		    "Writing flash region -- 0x%x/0x%x.\n",
489 		    ha->optrom_region_start, ha->optrom_region_size);
490 
491 		rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
492 		    ha->optrom_region_start, ha->optrom_region_size);
493 		if (rval)
494 			rval = -EIO;
495 		break;
496 	default:
497 		rval = -EINVAL;
498 	}
499 
500 out:
501 	mutex_unlock(&ha->optrom_mutex);
502 	return rval;
503 }
504 
505 static struct bin_attribute sysfs_optrom_ctl_attr = {
506 	.attr = {
507 		.name = "optrom_ctl",
508 		.mode = S_IWUSR,
509 	},
510 	.size = 0,
511 	.write = qla2x00_sysfs_write_optrom_ctl,
512 };
513 
514 static ssize_t
515 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
516 		       struct bin_attribute *bin_attr,
517 		       char *buf, loff_t off, size_t count)
518 {
519 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520 	    struct device, kobj)));
521 	struct qla_hw_data *ha = vha->hw;
522 	uint32_t faddr;
523 	struct active_regions active_regions = { };
524 
525 	if (unlikely(pci_channel_offline(ha->pdev)))
526 		return -EAGAIN;
527 
528 	if (!capable(CAP_SYS_ADMIN))
529 		return -EINVAL;
530 
531 	if (IS_NOCACHE_VPD_TYPE(ha))
532 		goto skip;
533 
534 	faddr = ha->flt_region_vpd << 2;
535 
536 	if (IS_QLA28XX(ha)) {
537 		qla28xx_get_aux_images(vha, &active_regions);
538 		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
539 			faddr = ha->flt_region_vpd_sec << 2;
540 
541 		ql_dbg(ql_dbg_init, vha, 0x7070,
542 		    "Loading %s nvram image.\n",
543 		    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
544 		    "primary" : "secondary");
545 	}
546 
547 	mutex_lock(&ha->optrom_mutex);
548 	if (qla2x00_chip_is_down(vha)) {
549 		mutex_unlock(&ha->optrom_mutex);
550 		return -EAGAIN;
551 	}
552 
553 	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
554 	mutex_unlock(&ha->optrom_mutex);
555 
556 	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
557 skip:
558 	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
559 }
560 
561 static ssize_t
562 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
563 			struct bin_attribute *bin_attr,
564 			char *buf, loff_t off, size_t count)
565 {
566 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
567 	    struct device, kobj)));
568 	struct qla_hw_data *ha = vha->hw;
569 	uint8_t *tmp_data;
570 
571 	if (unlikely(pci_channel_offline(ha->pdev)))
572 		return 0;
573 
574 	if (qla2x00_chip_is_down(vha))
575 		return 0;
576 
577 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
578 	    !ha->isp_ops->write_nvram)
579 		return 0;
580 
581 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
582 		ql_log(ql_log_warn, vha, 0x706a,
583 		    "HBA not online, failing VPD update.\n");
584 		return -EAGAIN;
585 	}
586 
587 	mutex_lock(&ha->optrom_mutex);
588 	if (qla2x00_chip_is_down(vha)) {
589 		mutex_unlock(&ha->optrom_mutex);
590 		return -EAGAIN;
591 	}
592 
593 	/* Write NVRAM. */
594 	ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
595 	ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
596 
597 	/* Update flash version information for 4Gb & above. */
598 	if (!IS_FWI2_CAPABLE(ha)) {
599 		mutex_unlock(&ha->optrom_mutex);
600 		return -EINVAL;
601 	}
602 
603 	tmp_data = vmalloc(256);
604 	if (!tmp_data) {
605 		mutex_unlock(&ha->optrom_mutex);
606 		ql_log(ql_log_warn, vha, 0x706b,
607 		    "Unable to allocate memory for VPD information update.\n");
608 		return -ENOMEM;
609 	}
610 	ha->isp_ops->get_flash_version(vha, tmp_data);
611 	vfree(tmp_data);
612 
613 	mutex_unlock(&ha->optrom_mutex);
614 
615 	return count;
616 }
617 
618 static struct bin_attribute sysfs_vpd_attr = {
619 	.attr = {
620 		.name = "vpd",
621 		.mode = S_IRUSR | S_IWUSR,
622 	},
623 	.size = 0,
624 	.read = qla2x00_sysfs_read_vpd,
625 	.write = qla2x00_sysfs_write_vpd,
626 };
627 
628 static ssize_t
629 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
630 		       struct bin_attribute *bin_attr,
631 		       char *buf, loff_t off, size_t count)
632 {
633 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
634 	    struct device, kobj)));
635 	int rval;
636 
637 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
638 		return 0;
639 
640 	mutex_lock(&vha->hw->optrom_mutex);
641 	if (qla2x00_chip_is_down(vha)) {
642 		mutex_unlock(&vha->hw->optrom_mutex);
643 		return 0;
644 	}
645 
646 	rval = qla2x00_read_sfp_dev(vha, buf, count);
647 	mutex_unlock(&vha->hw->optrom_mutex);
648 
649 	if (rval)
650 		return -EIO;
651 
652 	return count;
653 }
654 
655 static struct bin_attribute sysfs_sfp_attr = {
656 	.attr = {
657 		.name = "sfp",
658 		.mode = S_IRUSR | S_IWUSR,
659 	},
660 	.size = SFP_DEV_SIZE,
661 	.read = qla2x00_sysfs_read_sfp,
662 };
663 
664 static ssize_t
665 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
666 			struct bin_attribute *bin_attr,
667 			char *buf, loff_t off, size_t count)
668 {
669 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
670 	    struct device, kobj)));
671 	struct qla_hw_data *ha = vha->hw;
672 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
673 	int type;
674 	uint32_t idc_control;
675 	uint8_t *tmp_data = NULL;
676 
677 	if (off != 0)
678 		return -EINVAL;
679 
680 	type = simple_strtol(buf, NULL, 10);
681 	switch (type) {
682 	case 0x2025c:
683 		ql_log(ql_log_info, vha, 0x706e,
684 		    "Issuing ISP reset.\n");
685 
686 		scsi_block_requests(vha->host);
687 		if (IS_QLA82XX(ha)) {
688 			ha->flags.isp82xx_no_md_cap = 1;
689 			qla82xx_idc_lock(ha);
690 			qla82xx_set_reset_owner(vha);
691 			qla82xx_idc_unlock(ha);
692 		} else if (IS_QLA8044(ha)) {
693 			qla8044_idc_lock(ha);
694 			idc_control = qla8044_rd_reg(ha,
695 			    QLA8044_IDC_DRV_CTRL);
696 			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
697 			    (idc_control | GRACEFUL_RESET_BIT1));
698 			qla82xx_set_reset_owner(vha);
699 			qla8044_idc_unlock(ha);
700 		} else {
701 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
702 			qla2xxx_wake_dpc(vha);
703 		}
704 		qla2x00_wait_for_chip_reset(vha);
705 		scsi_unblock_requests(vha->host);
706 		break;
707 	case 0x2025d:
708 		if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
709 			return -EPERM;
710 
711 		ql_log(ql_log_info, vha, 0x706f,
712 		    "Issuing MPI reset.\n");
713 
714 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
715 			uint32_t idc_control;
716 
717 			qla83xx_idc_lock(vha, 0);
718 			__qla83xx_get_idc_control(vha, &idc_control);
719 			idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
720 			__qla83xx_set_idc_control(vha, idc_control);
721 			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
722 			    QLA8XXX_DEV_NEED_RESET);
723 			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
724 			qla83xx_idc_unlock(vha, 0);
725 			break;
726 		} else {
727 			/* Make sure FC side is not in reset */
728 			WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
729 				     QLA_SUCCESS);
730 
731 			/* Issue MPI reset */
732 			scsi_block_requests(vha->host);
733 			if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
734 				ql_log(ql_log_warn, vha, 0x7070,
735 				    "MPI reset failed.\n");
736 			scsi_unblock_requests(vha->host);
737 			break;
738 		}
739 	case 0x2025e:
740 		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
741 			ql_log(ql_log_info, vha, 0x7071,
742 			    "FCoE ctx reset not supported.\n");
743 			return -EPERM;
744 		}
745 
746 		ql_log(ql_log_info, vha, 0x7072,
747 		    "Issuing FCoE ctx reset.\n");
748 		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
749 		qla2xxx_wake_dpc(vha);
750 		qla2x00_wait_for_fcoe_ctx_reset(vha);
751 		break;
752 	case 0x2025f:
753 		if (!IS_QLA8031(ha))
754 			return -EPERM;
755 		ql_log(ql_log_info, vha, 0x70bc,
756 		    "Disabling Reset by IDC control\n");
757 		qla83xx_idc_lock(vha, 0);
758 		__qla83xx_get_idc_control(vha, &idc_control);
759 		idc_control |= QLA83XX_IDC_RESET_DISABLED;
760 		__qla83xx_set_idc_control(vha, idc_control);
761 		qla83xx_idc_unlock(vha, 0);
762 		break;
763 	case 0x20260:
764 		if (!IS_QLA8031(ha))
765 			return -EPERM;
766 		ql_log(ql_log_info, vha, 0x70bd,
767 		    "Enabling Reset by IDC control\n");
768 		qla83xx_idc_lock(vha, 0);
769 		__qla83xx_get_idc_control(vha, &idc_control);
770 		idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
771 		__qla83xx_set_idc_control(vha, idc_control);
772 		qla83xx_idc_unlock(vha, 0);
773 		break;
774 	case 0x20261:
775 		ql_dbg(ql_dbg_user, vha, 0x70e0,
776 		    "Updating cache versions without reset ");
777 
778 		tmp_data = vmalloc(256);
779 		if (!tmp_data) {
780 			ql_log(ql_log_warn, vha, 0x70e1,
781 			    "Unable to allocate memory for VPD information update.\n");
782 			return -ENOMEM;
783 		}
784 		ha->isp_ops->get_flash_version(vha, tmp_data);
785 		vfree(tmp_data);
786 		break;
787 	}
788 	return count;
789 }
790 
791 static struct bin_attribute sysfs_reset_attr = {
792 	.attr = {
793 		.name = "reset",
794 		.mode = S_IWUSR,
795 	},
796 	.size = 0,
797 	.write = qla2x00_sysfs_write_reset,
798 };
799 
800 static ssize_t
801 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
802 			struct bin_attribute *bin_attr,
803 			char *buf, loff_t off, size_t count)
804 {
805 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
806 	    struct device, kobj)));
807 	int type;
808 	port_id_t did;
809 
810 	if (!capable(CAP_SYS_ADMIN))
811 		return 0;
812 
813 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
814 		return 0;
815 
816 	if (qla2x00_chip_is_down(vha))
817 		return 0;
818 
819 	type = simple_strtol(buf, NULL, 10);
820 
821 	did.b.domain = (type & 0x00ff0000) >> 16;
822 	did.b.area = (type & 0x0000ff00) >> 8;
823 	did.b.al_pa = (type & 0x000000ff);
824 
825 	ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
826 	    did.b.domain, did.b.area, did.b.al_pa);
827 
828 	ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
829 
830 	qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
831 	return count;
832 }
833 
834 static struct bin_attribute sysfs_issue_logo_attr = {
835 	.attr = {
836 		.name = "issue_logo",
837 		.mode = S_IWUSR,
838 	},
839 	.size = 0,
840 	.write = qla2x00_issue_logo,
841 };
842 
843 static ssize_t
844 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
845 		       struct bin_attribute *bin_attr,
846 		       char *buf, loff_t off, size_t count)
847 {
848 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
849 	    struct device, kobj)));
850 	struct qla_hw_data *ha = vha->hw;
851 	int rval;
852 	uint16_t actual_size;
853 
854 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
855 		return 0;
856 
857 	if (unlikely(pci_channel_offline(ha->pdev)))
858 		return 0;
859 	mutex_lock(&vha->hw->optrom_mutex);
860 	if (qla2x00_chip_is_down(vha)) {
861 		mutex_unlock(&vha->hw->optrom_mutex);
862 		return 0;
863 	}
864 
865 	if (ha->xgmac_data)
866 		goto do_read;
867 
868 	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
869 	    &ha->xgmac_data_dma, GFP_KERNEL);
870 	if (!ha->xgmac_data) {
871 		mutex_unlock(&vha->hw->optrom_mutex);
872 		ql_log(ql_log_warn, vha, 0x7076,
873 		    "Unable to allocate memory for XGMAC read-data.\n");
874 		return 0;
875 	}
876 
877 do_read:
878 	actual_size = 0;
879 	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
880 
881 	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
882 	    XGMAC_DATA_SIZE, &actual_size);
883 
884 	mutex_unlock(&vha->hw->optrom_mutex);
885 	if (rval != QLA_SUCCESS) {
886 		ql_log(ql_log_warn, vha, 0x7077,
887 		    "Unable to read XGMAC data (%x).\n", rval);
888 		count = 0;
889 	}
890 
891 	count = actual_size > count ? count : actual_size;
892 	memcpy(buf, ha->xgmac_data, count);
893 
894 	return count;
895 }
896 
897 static struct bin_attribute sysfs_xgmac_stats_attr = {
898 	.attr = {
899 		.name = "xgmac_stats",
900 		.mode = S_IRUSR,
901 	},
902 	.size = 0,
903 	.read = qla2x00_sysfs_read_xgmac_stats,
904 };
905 
906 static ssize_t
907 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
908 		       struct bin_attribute *bin_attr,
909 		       char *buf, loff_t off, size_t count)
910 {
911 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
912 	    struct device, kobj)));
913 	struct qla_hw_data *ha = vha->hw;
914 	int rval;
915 
916 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
917 		return 0;
918 
919 	if (ha->dcbx_tlv)
920 		goto do_read;
921 	mutex_lock(&vha->hw->optrom_mutex);
922 	if (qla2x00_chip_is_down(vha)) {
923 		mutex_unlock(&vha->hw->optrom_mutex);
924 		return 0;
925 	}
926 
927 	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
928 	    &ha->dcbx_tlv_dma, GFP_KERNEL);
929 	if (!ha->dcbx_tlv) {
930 		mutex_unlock(&vha->hw->optrom_mutex);
931 		ql_log(ql_log_warn, vha, 0x7078,
932 		    "Unable to allocate memory for DCBX TLV read-data.\n");
933 		return -ENOMEM;
934 	}
935 
936 do_read:
937 	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
938 
939 	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
940 	    DCBX_TLV_DATA_SIZE);
941 
942 	mutex_unlock(&vha->hw->optrom_mutex);
943 
944 	if (rval != QLA_SUCCESS) {
945 		ql_log(ql_log_warn, vha, 0x7079,
946 		    "Unable to read DCBX TLV (%x).\n", rval);
947 		return -EIO;
948 	}
949 
950 	memcpy(buf, ha->dcbx_tlv, count);
951 
952 	return count;
953 }
954 
955 static struct bin_attribute sysfs_dcbx_tlv_attr = {
956 	.attr = {
957 		.name = "dcbx_tlv",
958 		.mode = S_IRUSR,
959 	},
960 	.size = 0,
961 	.read = qla2x00_sysfs_read_dcbx_tlv,
962 };
963 
964 static struct sysfs_entry {
965 	char *name;
966 	struct bin_attribute *attr;
967 	int type;
968 } bin_file_entries[] = {
969 	{ "fw_dump", &sysfs_fw_dump_attr, },
970 	{ "nvram", &sysfs_nvram_attr, },
971 	{ "optrom", &sysfs_optrom_attr, },
972 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
973 	{ "vpd", &sysfs_vpd_attr, 1 },
974 	{ "sfp", &sysfs_sfp_attr, 1 },
975 	{ "reset", &sysfs_reset_attr, },
976 	{ "issue_logo", &sysfs_issue_logo_attr, },
977 	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
978 	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
979 	{ NULL },
980 };
981 
982 void
983 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
984 {
985 	struct Scsi_Host *host = vha->host;
986 	struct sysfs_entry *iter;
987 	int ret;
988 
989 	for (iter = bin_file_entries; iter->name; iter++) {
990 		if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
991 			continue;
992 		if (iter->type == 2 && !IS_QLA25XX(vha->hw))
993 			continue;
994 		if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
995 			continue;
996 
997 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
998 		    iter->attr);
999 		if (ret)
1000 			ql_log(ql_log_warn, vha, 0x00f3,
1001 			    "Unable to create sysfs %s binary attribute (%d).\n",
1002 			    iter->name, ret);
1003 		else
1004 			ql_dbg(ql_dbg_init, vha, 0x00f4,
1005 			    "Successfully created sysfs %s binary attribute.\n",
1006 			    iter->name);
1007 	}
1008 }
1009 
1010 void
1011 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1012 {
1013 	struct Scsi_Host *host = vha->host;
1014 	struct sysfs_entry *iter;
1015 	struct qla_hw_data *ha = vha->hw;
1016 
1017 	for (iter = bin_file_entries; iter->name; iter++) {
1018 		if (iter->type && !IS_FWI2_CAPABLE(ha))
1019 			continue;
1020 		if (iter->type == 2 && !IS_QLA25XX(ha))
1021 			continue;
1022 		if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1023 			continue;
1024 		if (iter->type == 0x27 &&
1025 		    (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1026 			continue;
1027 
1028 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
1029 		    iter->attr);
1030 	}
1031 
1032 	if (stop_beacon && ha->beacon_blink_led == 1)
1033 		ha->isp_ops->beacon_off(vha);
1034 }
1035 
1036 /* Scsi_Host attributes. */
1037 
1038 static ssize_t
1039 qla2x00_driver_version_show(struct device *dev,
1040 			  struct device_attribute *attr, char *buf)
1041 {
1042 	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1043 }
1044 
1045 static ssize_t
1046 qla2x00_fw_version_show(struct device *dev,
1047 			struct device_attribute *attr, char *buf)
1048 {
1049 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1050 	struct qla_hw_data *ha = vha->hw;
1051 	char fw_str[128];
1052 
1053 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1054 	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1055 }
1056 
1057 static ssize_t
1058 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1059 			char *buf)
1060 {
1061 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1062 	struct qla_hw_data *ha = vha->hw;
1063 	uint32_t sn;
1064 
1065 	if (IS_QLAFX00(vha->hw)) {
1066 		return scnprintf(buf, PAGE_SIZE, "%s\n",
1067 		    vha->hw->mr.serial_num);
1068 	} else if (IS_FWI2_CAPABLE(ha)) {
1069 		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1070 		return strlen(strcat(buf, "\n"));
1071 	}
1072 
1073 	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1074 	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1075 	    sn % 100000);
1076 }
1077 
1078 static ssize_t
1079 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1080 		      char *buf)
1081 {
1082 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1083 
1084 	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1085 }
1086 
1087 static ssize_t
1088 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1089 		    char *buf)
1090 {
1091 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1092 	struct qla_hw_data *ha = vha->hw;
1093 
1094 	if (IS_QLAFX00(vha->hw))
1095 		return scnprintf(buf, PAGE_SIZE, "%s\n",
1096 		    vha->hw->mr.hw_version);
1097 
1098 	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1099 	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
1100 	    ha->product_id[3]);
1101 }
1102 
1103 static ssize_t
1104 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1105 			char *buf)
1106 {
1107 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108 
1109 	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1110 }
1111 
1112 static ssize_t
1113 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1114 			char *buf)
1115 {
1116 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1117 
1118 	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1119 }
1120 
1121 static ssize_t
1122 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1123 		      char *buf)
1124 {
1125 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1126 	char pci_info[30];
1127 
1128 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1129 			 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1130 							sizeof(pci_info)));
1131 }
1132 
1133 static ssize_t
1134 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1135 			char *buf)
1136 {
1137 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1138 	struct qla_hw_data *ha = vha->hw;
1139 	int len = 0;
1140 
1141 	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1142 	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1143 	    vha->device_flags & DFLG_NO_CABLE)
1144 		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1145 	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1146 	    qla2x00_chip_is_down(vha))
1147 		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1148 	else {
1149 		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1150 
1151 		switch (ha->current_topology) {
1152 		case ISP_CFG_NL:
1153 			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1154 			break;
1155 		case ISP_CFG_FL:
1156 			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1157 			break;
1158 		case ISP_CFG_N:
1159 			len += scnprintf(buf + len, PAGE_SIZE-len,
1160 			    "N_Port to N_Port\n");
1161 			break;
1162 		case ISP_CFG_F:
1163 			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1164 			break;
1165 		default:
1166 			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1167 			break;
1168 		}
1169 	}
1170 	return len;
1171 }
1172 
1173 static ssize_t
1174 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1175 		 char *buf)
1176 {
1177 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1178 	int len = 0;
1179 
1180 	switch (vha->hw->zio_mode) {
1181 	case QLA_ZIO_MODE_6:
1182 		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1183 		break;
1184 	case QLA_ZIO_DISABLED:
1185 		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1186 		break;
1187 	}
1188 	return len;
1189 }
1190 
1191 static ssize_t
1192 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1193 		  const char *buf, size_t count)
1194 {
1195 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1196 	struct qla_hw_data *ha = vha->hw;
1197 	int val = 0;
1198 	uint16_t zio_mode;
1199 
1200 	if (!IS_ZIO_SUPPORTED(ha))
1201 		return -ENOTSUPP;
1202 
1203 	if (sscanf(buf, "%d", &val) != 1)
1204 		return -EINVAL;
1205 
1206 	if (val)
1207 		zio_mode = QLA_ZIO_MODE_6;
1208 	else
1209 		zio_mode = QLA_ZIO_DISABLED;
1210 
1211 	/* Update per-hba values and queue a reset. */
1212 	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1213 		ha->zio_mode = zio_mode;
1214 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1215 	}
1216 	return strlen(buf);
1217 }
1218 
1219 static ssize_t
1220 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1221 		       char *buf)
1222 {
1223 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1224 
1225 	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1226 }
1227 
1228 static ssize_t
1229 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1230 			const char *buf, size_t count)
1231 {
1232 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1233 	int val = 0;
1234 	uint16_t zio_timer;
1235 
1236 	if (sscanf(buf, "%d", &val) != 1)
1237 		return -EINVAL;
1238 	if (val > 25500 || val < 100)
1239 		return -ERANGE;
1240 
1241 	zio_timer = (uint16_t)(val / 100);
1242 	vha->hw->zio_timer = zio_timer;
1243 
1244 	return strlen(buf);
1245 }
1246 
1247 static ssize_t
1248 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1249 		       char *buf)
1250 {
1251 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1252 
1253 	return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1254 	    vha->hw->last_zio_threshold);
1255 }
1256 
1257 static ssize_t
1258 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1259     const char *buf, size_t count)
1260 {
1261 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1262 	int val = 0;
1263 
1264 	if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1265 		return -EINVAL;
1266 	if (sscanf(buf, "%d", &val) != 1)
1267 		return -EINVAL;
1268 	if (val < 0 || val > 256)
1269 		return -ERANGE;
1270 
1271 	atomic_set(&vha->hw->zio_threshold, val);
1272 	return strlen(buf);
1273 }
1274 
1275 static ssize_t
1276 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1277 		    char *buf)
1278 {
1279 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1280 	int len = 0;
1281 
1282 	if (vha->hw->beacon_blink_led)
1283 		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1284 	else
1285 		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1286 	return len;
1287 }
1288 
1289 static ssize_t
1290 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1291 		     const char *buf, size_t count)
1292 {
1293 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1294 	struct qla_hw_data *ha = vha->hw;
1295 	int val = 0;
1296 	int rval;
1297 
1298 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1299 		return -EPERM;
1300 
1301 	if (sscanf(buf, "%d", &val) != 1)
1302 		return -EINVAL;
1303 
1304 	mutex_lock(&vha->hw->optrom_mutex);
1305 	if (qla2x00_chip_is_down(vha)) {
1306 		mutex_unlock(&vha->hw->optrom_mutex);
1307 		ql_log(ql_log_warn, vha, 0x707a,
1308 		    "Abort ISP active -- ignoring beacon request.\n");
1309 		return -EBUSY;
1310 	}
1311 
1312 	if (val)
1313 		rval = ha->isp_ops->beacon_on(vha);
1314 	else
1315 		rval = ha->isp_ops->beacon_off(vha);
1316 
1317 	if (rval != QLA_SUCCESS)
1318 		count = 0;
1319 
1320 	mutex_unlock(&vha->hw->optrom_mutex);
1321 
1322 	return count;
1323 }
1324 
1325 static ssize_t
1326 qla2x00_optrom_bios_version_show(struct device *dev,
1327 				 struct device_attribute *attr, char *buf)
1328 {
1329 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1330 	struct qla_hw_data *ha = vha->hw;
1331 
1332 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1333 	    ha->bios_revision[0]);
1334 }
1335 
1336 static ssize_t
1337 qla2x00_optrom_efi_version_show(struct device *dev,
1338 				struct device_attribute *attr, char *buf)
1339 {
1340 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1341 	struct qla_hw_data *ha = vha->hw;
1342 
1343 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1344 	    ha->efi_revision[0]);
1345 }
1346 
1347 static ssize_t
1348 qla2x00_optrom_fcode_version_show(struct device *dev,
1349 				  struct device_attribute *attr, char *buf)
1350 {
1351 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1352 	struct qla_hw_data *ha = vha->hw;
1353 
1354 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1355 	    ha->fcode_revision[0]);
1356 }
1357 
1358 static ssize_t
1359 qla2x00_optrom_fw_version_show(struct device *dev,
1360 			       struct device_attribute *attr, char *buf)
1361 {
1362 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1363 	struct qla_hw_data *ha = vha->hw;
1364 
1365 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1366 	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1367 	    ha->fw_revision[3]);
1368 }
1369 
1370 static ssize_t
1371 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1372     struct device_attribute *attr, char *buf)
1373 {
1374 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1375 	struct qla_hw_data *ha = vha->hw;
1376 
1377 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1378 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1379 		return scnprintf(buf, PAGE_SIZE, "\n");
1380 
1381 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1382 	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1383 	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1384 }
1385 
1386 static ssize_t
1387 qla2x00_total_isp_aborts_show(struct device *dev,
1388 			      struct device_attribute *attr, char *buf)
1389 {
1390 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1391 
1392 	return scnprintf(buf, PAGE_SIZE, "%d\n",
1393 	    vha->qla_stats.total_isp_aborts);
1394 }
1395 
1396 static ssize_t
1397 qla24xx_84xx_fw_version_show(struct device *dev,
1398 	struct device_attribute *attr, char *buf)
1399 {
1400 	int rval = QLA_SUCCESS;
1401 	uint16_t status[2] = { 0 };
1402 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1403 	struct qla_hw_data *ha = vha->hw;
1404 
1405 	if (!IS_QLA84XX(ha))
1406 		return scnprintf(buf, PAGE_SIZE, "\n");
1407 
1408 	if (!ha->cs84xx->op_fw_version) {
1409 		rval = qla84xx_verify_chip(vha, status);
1410 
1411 		if (!rval && !status[0])
1412 			return scnprintf(buf, PAGE_SIZE, "%u\n",
1413 			    (uint32_t)ha->cs84xx->op_fw_version);
1414 	}
1415 
1416 	return scnprintf(buf, PAGE_SIZE, "\n");
1417 }
1418 
1419 static ssize_t
1420 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1421     char *buf)
1422 {
1423 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1424 	struct qla_hw_data *ha = vha->hw;
1425 
1426 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1427 		return scnprintf(buf, PAGE_SIZE, "\n");
1428 
1429 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1430 	    ha->serdes_version[0], ha->serdes_version[1],
1431 	    ha->serdes_version[2]);
1432 }
1433 
1434 static ssize_t
1435 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1436     char *buf)
1437 {
1438 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1439 	struct qla_hw_data *ha = vha->hw;
1440 
1441 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1442 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1443 		return scnprintf(buf, PAGE_SIZE, "\n");
1444 
1445 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1446 	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1447 	    ha->mpi_capabilities);
1448 }
1449 
1450 static ssize_t
1451 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1452     char *buf)
1453 {
1454 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1455 	struct qla_hw_data *ha = vha->hw;
1456 
1457 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1458 		return scnprintf(buf, PAGE_SIZE, "\n");
1459 
1460 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1461 	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1462 }
1463 
1464 static ssize_t
1465 qla2x00_flash_block_size_show(struct device *dev,
1466 			      struct device_attribute *attr, char *buf)
1467 {
1468 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1469 	struct qla_hw_data *ha = vha->hw;
1470 
1471 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1472 }
1473 
1474 static ssize_t
1475 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1476     char *buf)
1477 {
1478 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1479 
1480 	if (!IS_CNA_CAPABLE(vha->hw))
1481 		return scnprintf(buf, PAGE_SIZE, "\n");
1482 
1483 	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1484 }
1485 
1486 static ssize_t
1487 qla2x00_vn_port_mac_address_show(struct device *dev,
1488     struct device_attribute *attr, char *buf)
1489 {
1490 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1491 
1492 	if (!IS_CNA_CAPABLE(vha->hw))
1493 		return scnprintf(buf, PAGE_SIZE, "\n");
1494 
1495 	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1496 }
1497 
1498 static ssize_t
1499 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1500     char *buf)
1501 {
1502 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1503 
1504 	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1505 }
1506 
1507 static ssize_t
1508 qla2x00_thermal_temp_show(struct device *dev,
1509 	struct device_attribute *attr, char *buf)
1510 {
1511 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1512 	uint16_t temp = 0;
1513 	int rc;
1514 
1515 	mutex_lock(&vha->hw->optrom_mutex);
1516 	if (qla2x00_chip_is_down(vha)) {
1517 		mutex_unlock(&vha->hw->optrom_mutex);
1518 		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1519 		goto done;
1520 	}
1521 
1522 	if (vha->hw->flags.eeh_busy) {
1523 		mutex_unlock(&vha->hw->optrom_mutex);
1524 		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1525 		goto done;
1526 	}
1527 
1528 	rc = qla2x00_get_thermal_temp(vha, &temp);
1529 	mutex_unlock(&vha->hw->optrom_mutex);
1530 	if (rc == QLA_SUCCESS)
1531 		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1532 
1533 done:
1534 	return scnprintf(buf, PAGE_SIZE, "\n");
1535 }
1536 
1537 static ssize_t
1538 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1539     char *buf)
1540 {
1541 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1542 	int rval = QLA_FUNCTION_FAILED;
1543 	uint16_t state[6];
1544 	uint32_t pstate;
1545 
1546 	if (IS_QLAFX00(vha->hw)) {
1547 		pstate = qlafx00_fw_state_show(dev, attr, buf);
1548 		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1549 	}
1550 
1551 	mutex_lock(&vha->hw->optrom_mutex);
1552 	if (qla2x00_chip_is_down(vha)) {
1553 		mutex_unlock(&vha->hw->optrom_mutex);
1554 		ql_log(ql_log_warn, vha, 0x707c,
1555 		    "ISP reset active.\n");
1556 		goto out;
1557 	} else if (vha->hw->flags.eeh_busy) {
1558 		mutex_unlock(&vha->hw->optrom_mutex);
1559 		goto out;
1560 	}
1561 
1562 	rval = qla2x00_get_firmware_state(vha, state);
1563 	mutex_unlock(&vha->hw->optrom_mutex);
1564 out:
1565 	if (rval != QLA_SUCCESS) {
1566 		memset(state, -1, sizeof(state));
1567 		rval = qla2x00_get_firmware_state(vha, state);
1568 	}
1569 
1570 	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1571 	    state[0], state[1], state[2], state[3], state[4], state[5]);
1572 }
1573 
1574 static ssize_t
1575 qla2x00_diag_requests_show(struct device *dev,
1576 	struct device_attribute *attr, char *buf)
1577 {
1578 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1579 
1580 	if (!IS_BIDI_CAPABLE(vha->hw))
1581 		return scnprintf(buf, PAGE_SIZE, "\n");
1582 
1583 	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1584 }
1585 
1586 static ssize_t
1587 qla2x00_diag_megabytes_show(struct device *dev,
1588 	struct device_attribute *attr, char *buf)
1589 {
1590 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1591 
1592 	if (!IS_BIDI_CAPABLE(vha->hw))
1593 		return scnprintf(buf, PAGE_SIZE, "\n");
1594 
1595 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
1596 	    vha->bidi_stats.transfer_bytes >> 20);
1597 }
1598 
1599 static ssize_t
1600 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1601 	char *buf)
1602 {
1603 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1604 	struct qla_hw_data *ha = vha->hw;
1605 	uint32_t size;
1606 
1607 	if (!ha->fw_dumped)
1608 		size = 0;
1609 	else if (IS_P3P_TYPE(ha))
1610 		size = ha->md_template_size + ha->md_dump_size;
1611 	else
1612 		size = ha->fw_dump_len;
1613 
1614 	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1615 }
1616 
1617 static ssize_t
1618 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1619 	struct device_attribute *attr, char *buf)
1620 {
1621 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1622 
1623 	if (!IS_P3P_TYPE(vha->hw))
1624 		return scnprintf(buf, PAGE_SIZE, "\n");
1625 	else
1626 		return scnprintf(buf, PAGE_SIZE, "%s\n",
1627 		    vha->hw->allow_cna_fw_dump ? "true" : "false");
1628 }
1629 
1630 static ssize_t
1631 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1632 	struct device_attribute *attr, const char *buf, size_t count)
1633 {
1634 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1635 	int val = 0;
1636 
1637 	if (!IS_P3P_TYPE(vha->hw))
1638 		return -EINVAL;
1639 
1640 	if (sscanf(buf, "%d", &val) != 1)
1641 		return -EINVAL;
1642 
1643 	vha->hw->allow_cna_fw_dump = val != 0;
1644 
1645 	return strlen(buf);
1646 }
1647 
1648 static ssize_t
1649 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1650 	char *buf)
1651 {
1652 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1653 	struct qla_hw_data *ha = vha->hw;
1654 
1655 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1656 		return scnprintf(buf, PAGE_SIZE, "\n");
1657 
1658 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1659 	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1660 }
1661 
1662 static ssize_t
1663 qla2x00_min_supported_speed_show(struct device *dev,
1664     struct device_attribute *attr, char *buf)
1665 {
1666 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1667 	struct qla_hw_data *ha = vha->hw;
1668 
1669 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1670 		return scnprintf(buf, PAGE_SIZE, "\n");
1671 
1672 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1673 	    ha->min_supported_speed == 6 ? "64Gps" :
1674 	    ha->min_supported_speed == 5 ? "32Gps" :
1675 	    ha->min_supported_speed == 4 ? "16Gps" :
1676 	    ha->min_supported_speed == 3 ? "8Gps" :
1677 	    ha->min_supported_speed == 2 ? "4Gps" :
1678 	    ha->min_supported_speed != 0 ? "unknown" : "");
1679 }
1680 
1681 static ssize_t
1682 qla2x00_max_supported_speed_show(struct device *dev,
1683     struct device_attribute *attr, char *buf)
1684 {
1685 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1686 	struct qla_hw_data *ha = vha->hw;
1687 
1688 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1689 		return scnprintf(buf, PAGE_SIZE, "\n");
1690 
1691 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1692 	    ha->max_supported_speed  == 2 ? "64Gps" :
1693 	    ha->max_supported_speed  == 1 ? "32Gps" :
1694 	    ha->max_supported_speed  == 0 ? "16Gps" : "unknown");
1695 }
1696 
1697 static ssize_t
1698 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1699     const char *buf, size_t count)
1700 {
1701 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1702 	ulong type, speed;
1703 	int oldspeed, rval;
1704 	int mode = QLA_SET_DATA_RATE_LR;
1705 	struct qla_hw_data *ha = vha->hw;
1706 
1707 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1708 		ql_log(ql_log_warn, vha, 0x70d8,
1709 		    "Speed setting not supported \n");
1710 		return -EINVAL;
1711 	}
1712 
1713 	rval = kstrtol(buf, 10, &type);
1714 	if (rval)
1715 		return rval;
1716 	speed = type;
1717 	if (type == 40 || type == 80 || type == 160 ||
1718 	    type == 320) {
1719 		ql_dbg(ql_dbg_user, vha, 0x70d9,
1720 		    "Setting will be affected after a loss of sync\n");
1721 		type = type/10;
1722 		mode = QLA_SET_DATA_RATE_NOLR;
1723 	}
1724 
1725 	oldspeed = ha->set_data_rate;
1726 
1727 	switch (type) {
1728 	case 0:
1729 		ha->set_data_rate = PORT_SPEED_AUTO;
1730 		break;
1731 	case 4:
1732 		ha->set_data_rate = PORT_SPEED_4GB;
1733 		break;
1734 	case 8:
1735 		ha->set_data_rate = PORT_SPEED_8GB;
1736 		break;
1737 	case 16:
1738 		ha->set_data_rate = PORT_SPEED_16GB;
1739 		break;
1740 	case 32:
1741 		ha->set_data_rate = PORT_SPEED_32GB;
1742 		break;
1743 	default:
1744 		ql_log(ql_log_warn, vha, 0x1199,
1745 		    "Unrecognized speed setting:%lx. Setting Autoneg\n",
1746 		    speed);
1747 		ha->set_data_rate = PORT_SPEED_AUTO;
1748 	}
1749 
1750 	if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1751 		return -EINVAL;
1752 
1753 	ql_log(ql_log_info, vha, 0x70da,
1754 	    "Setting speed to %lx Gbps \n", type);
1755 
1756 	rval = qla2x00_set_data_rate(vha, mode);
1757 	if (rval != QLA_SUCCESS)
1758 		return -EIO;
1759 
1760 	return strlen(buf);
1761 }
1762 
1763 static ssize_t
1764 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1765     char *buf)
1766 {
1767 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1768 	struct qla_hw_data *ha = vha->hw;
1769 	ssize_t rval;
1770 	char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1771 
1772 	rval = qla2x00_get_data_rate(vha);
1773 	if (rval != QLA_SUCCESS) {
1774 		ql_log(ql_log_warn, vha, 0x70db,
1775 		    "Unable to get port speed rval:%zd\n", rval);
1776 		return -EINVAL;
1777 	}
1778 
1779 	ql_log(ql_log_info, vha, 0x70d6,
1780 	    "port speed:%d\n", ha->link_data_rate);
1781 
1782 	return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1783 }
1784 
1785 /* ----- */
1786 
1787 static ssize_t
1788 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1789 {
1790 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1791 	int len = 0;
1792 
1793 	len += scnprintf(buf + len, PAGE_SIZE-len,
1794 	    "Supported options: enabled | disabled | dual | exclusive\n");
1795 
1796 	/* --- */
1797 	len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1798 
1799 	switch (vha->qlini_mode) {
1800 	case QLA2XXX_INI_MODE_EXCLUSIVE:
1801 		len += scnprintf(buf + len, PAGE_SIZE-len,
1802 		    QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1803 		break;
1804 	case QLA2XXX_INI_MODE_DISABLED:
1805 		len += scnprintf(buf + len, PAGE_SIZE-len,
1806 		    QLA2XXX_INI_MODE_STR_DISABLED);
1807 		break;
1808 	case QLA2XXX_INI_MODE_ENABLED:
1809 		len += scnprintf(buf + len, PAGE_SIZE-len,
1810 		    QLA2XXX_INI_MODE_STR_ENABLED);
1811 		break;
1812 	case QLA2XXX_INI_MODE_DUAL:
1813 		len += scnprintf(buf + len, PAGE_SIZE-len,
1814 		    QLA2XXX_INI_MODE_STR_DUAL);
1815 		break;
1816 	}
1817 	len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1818 
1819 	return len;
1820 }
1821 
1822 static char *mode_to_str[] = {
1823 	"exclusive",
1824 	"disabled",
1825 	"enabled",
1826 	"dual",
1827 };
1828 
1829 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1830 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1831 {
1832 	int rc = 0;
1833 	enum {
1834 		NO_ACTION,
1835 		MODE_CHANGE_ACCEPT,
1836 		MODE_CHANGE_NO_ACTION,
1837 		TARGET_STILL_ACTIVE,
1838 	};
1839 	int action = NO_ACTION;
1840 	int set_mode = 0;
1841 	u8  eo_toggle = 0;	/* exchange offload flipped */
1842 
1843 	switch (vha->qlini_mode) {
1844 	case QLA2XXX_INI_MODE_DISABLED:
1845 		switch (op) {
1846 		case QLA2XXX_INI_MODE_DISABLED:
1847 			if (qla_tgt_mode_enabled(vha)) {
1848 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1849 				    vha->hw->flags.exchoffld_enabled)
1850 					eo_toggle = 1;
1851 				if (((vha->ql2xexchoffld !=
1852 				    vha->u_ql2xexchoffld) &&
1853 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1854 				    eo_toggle) {
1855 					/*
1856 					 * The number of exchange to be offload
1857 					 * was tweaked or offload option was
1858 					 * flipped
1859 					 */
1860 					action = MODE_CHANGE_ACCEPT;
1861 				} else {
1862 					action = MODE_CHANGE_NO_ACTION;
1863 				}
1864 			} else {
1865 				action = MODE_CHANGE_NO_ACTION;
1866 			}
1867 			break;
1868 		case QLA2XXX_INI_MODE_EXCLUSIVE:
1869 			if (qla_tgt_mode_enabled(vha)) {
1870 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1871 				    vha->hw->flags.exchoffld_enabled)
1872 					eo_toggle = 1;
1873 				if (((vha->ql2xexchoffld !=
1874 				    vha->u_ql2xexchoffld) &&
1875 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1876 				    eo_toggle) {
1877 					/*
1878 					 * The number of exchange to be offload
1879 					 * was tweaked or offload option was
1880 					 * flipped
1881 					 */
1882 					action = MODE_CHANGE_ACCEPT;
1883 				} else {
1884 					action = MODE_CHANGE_NO_ACTION;
1885 				}
1886 			} else {
1887 				action = MODE_CHANGE_ACCEPT;
1888 			}
1889 			break;
1890 		case QLA2XXX_INI_MODE_DUAL:
1891 			action = MODE_CHANGE_ACCEPT;
1892 			/* active_mode is target only, reset it to dual */
1893 			if (qla_tgt_mode_enabled(vha)) {
1894 				set_mode = 1;
1895 				action = MODE_CHANGE_ACCEPT;
1896 			} else {
1897 				action = MODE_CHANGE_NO_ACTION;
1898 			}
1899 			break;
1900 
1901 		case QLA2XXX_INI_MODE_ENABLED:
1902 			if (qla_tgt_mode_enabled(vha))
1903 				action = TARGET_STILL_ACTIVE;
1904 			else {
1905 				action = MODE_CHANGE_ACCEPT;
1906 				set_mode = 1;
1907 			}
1908 			break;
1909 		}
1910 		break;
1911 
1912 	case QLA2XXX_INI_MODE_EXCLUSIVE:
1913 		switch (op) {
1914 		case QLA2XXX_INI_MODE_EXCLUSIVE:
1915 			if (qla_tgt_mode_enabled(vha)) {
1916 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1917 				    vha->hw->flags.exchoffld_enabled)
1918 					eo_toggle = 1;
1919 				if (((vha->ql2xexchoffld !=
1920 				    vha->u_ql2xexchoffld) &&
1921 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1922 				    eo_toggle)
1923 					/*
1924 					 * The number of exchange to be offload
1925 					 * was tweaked or offload option was
1926 					 * flipped
1927 					 */
1928 					action = MODE_CHANGE_ACCEPT;
1929 				else
1930 					action = NO_ACTION;
1931 			} else
1932 				action = NO_ACTION;
1933 
1934 			break;
1935 
1936 		case QLA2XXX_INI_MODE_DISABLED:
1937 			if (qla_tgt_mode_enabled(vha)) {
1938 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1939 				    vha->hw->flags.exchoffld_enabled)
1940 					eo_toggle = 1;
1941 				if (((vha->ql2xexchoffld !=
1942 				      vha->u_ql2xexchoffld) &&
1943 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1944 				    eo_toggle)
1945 					action = MODE_CHANGE_ACCEPT;
1946 				else
1947 					action = MODE_CHANGE_NO_ACTION;
1948 			} else
1949 				action = MODE_CHANGE_NO_ACTION;
1950 			break;
1951 
1952 		case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1953 			if (qla_tgt_mode_enabled(vha)) {
1954 				action = MODE_CHANGE_ACCEPT;
1955 				set_mode = 1;
1956 			} else
1957 				action = MODE_CHANGE_ACCEPT;
1958 			break;
1959 
1960 		case QLA2XXX_INI_MODE_ENABLED:
1961 			if (qla_tgt_mode_enabled(vha))
1962 				action = TARGET_STILL_ACTIVE;
1963 			else {
1964 				if (vha->hw->flags.fw_started)
1965 					action = MODE_CHANGE_NO_ACTION;
1966 				else
1967 					action = MODE_CHANGE_ACCEPT;
1968 			}
1969 			break;
1970 		}
1971 		break;
1972 
1973 	case QLA2XXX_INI_MODE_ENABLED:
1974 		switch (op) {
1975 		case QLA2XXX_INI_MODE_ENABLED:
1976 			if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1977 			    vha->hw->flags.exchoffld_enabled)
1978 				eo_toggle = 1;
1979 			if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1980 				NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1981 			    eo_toggle)
1982 				action = MODE_CHANGE_ACCEPT;
1983 			else
1984 				action = NO_ACTION;
1985 			break;
1986 		case QLA2XXX_INI_MODE_DUAL:
1987 		case QLA2XXX_INI_MODE_DISABLED:
1988 			action = MODE_CHANGE_ACCEPT;
1989 			break;
1990 		default:
1991 			action = MODE_CHANGE_NO_ACTION;
1992 			break;
1993 		}
1994 		break;
1995 
1996 	case QLA2XXX_INI_MODE_DUAL:
1997 		switch (op) {
1998 		case QLA2XXX_INI_MODE_DUAL:
1999 			if (qla_tgt_mode_enabled(vha) ||
2000 			    qla_dual_mode_enabled(vha)) {
2001 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2002 					vha->u_ql2xiniexchg) !=
2003 				    vha->hw->flags.exchoffld_enabled)
2004 					eo_toggle = 1;
2005 
2006 				if ((((vha->ql2xexchoffld +
2007 				       vha->ql2xiniexchg) !=
2008 				    (vha->u_ql2xiniexchg +
2009 				     vha->u_ql2xexchoffld)) &&
2010 				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2011 					vha->u_ql2xexchoffld)) || eo_toggle)
2012 					action = MODE_CHANGE_ACCEPT;
2013 				else
2014 					action = NO_ACTION;
2015 			} else {
2016 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2017 					vha->u_ql2xiniexchg) !=
2018 				    vha->hw->flags.exchoffld_enabled)
2019 					eo_toggle = 1;
2020 
2021 				if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2022 				    != (vha->u_ql2xiniexchg +
2023 					vha->u_ql2xexchoffld)) &&
2024 				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2025 					vha->u_ql2xexchoffld)) || eo_toggle)
2026 					action = MODE_CHANGE_NO_ACTION;
2027 				else
2028 					action = NO_ACTION;
2029 			}
2030 			break;
2031 
2032 		case QLA2XXX_INI_MODE_DISABLED:
2033 			if (qla_tgt_mode_enabled(vha) ||
2034 			    qla_dual_mode_enabled(vha)) {
2035 				/* turning off initiator mode */
2036 				set_mode = 1;
2037 				action = MODE_CHANGE_ACCEPT;
2038 			} else {
2039 				action = MODE_CHANGE_NO_ACTION;
2040 			}
2041 			break;
2042 
2043 		case QLA2XXX_INI_MODE_EXCLUSIVE:
2044 			if (qla_tgt_mode_enabled(vha) ||
2045 			    qla_dual_mode_enabled(vha)) {
2046 				set_mode = 1;
2047 				action = MODE_CHANGE_ACCEPT;
2048 			} else {
2049 				action = MODE_CHANGE_ACCEPT;
2050 			}
2051 			break;
2052 
2053 		case QLA2XXX_INI_MODE_ENABLED:
2054 			if (qla_tgt_mode_enabled(vha) ||
2055 			    qla_dual_mode_enabled(vha)) {
2056 				action = TARGET_STILL_ACTIVE;
2057 			} else {
2058 				action = MODE_CHANGE_ACCEPT;
2059 			}
2060 		}
2061 		break;
2062 	}
2063 
2064 	switch (action) {
2065 	case MODE_CHANGE_ACCEPT:
2066 		ql_log(ql_log_warn, vha, 0xffff,
2067 		    "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2068 		    mode_to_str[vha->qlini_mode], mode_to_str[op],
2069 		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2070 		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2071 
2072 		vha->qlini_mode = op;
2073 		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2074 		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2075 		if (set_mode)
2076 			qlt_set_mode(vha);
2077 		vha->flags.online = 1;
2078 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2079 		break;
2080 
2081 	case MODE_CHANGE_NO_ACTION:
2082 		ql_log(ql_log_warn, vha, 0xffff,
2083 		    "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2084 		    mode_to_str[vha->qlini_mode], mode_to_str[op],
2085 		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2086 		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2087 		vha->qlini_mode = op;
2088 		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2089 		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2090 		break;
2091 
2092 	case TARGET_STILL_ACTIVE:
2093 		ql_log(ql_log_warn, vha, 0xffff,
2094 		    "Target Mode is active. Unable to change Mode.\n");
2095 		break;
2096 
2097 	case NO_ACTION:
2098 	default:
2099 		ql_log(ql_log_warn, vha, 0xffff,
2100 		    "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2101 		    vha->qlini_mode, op,
2102 		    vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2103 		break;
2104 	}
2105 
2106 	return rc;
2107 }
2108 
2109 static ssize_t
2110 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2111     const char *buf, size_t count)
2112 {
2113 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2114 	int ini;
2115 
2116 	if (!buf)
2117 		return -EINVAL;
2118 
2119 	if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2120 		strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2121 		ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2122 	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2123 		strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2124 		ini = QLA2XXX_INI_MODE_DISABLED;
2125 	else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2126 		  strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2127 		ini = QLA2XXX_INI_MODE_ENABLED;
2128 	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2129 		strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2130 		ini = QLA2XXX_INI_MODE_DUAL;
2131 	else
2132 		return -EINVAL;
2133 
2134 	qla_set_ini_mode(vha, ini);
2135 	return strlen(buf);
2136 }
2137 
2138 static ssize_t
2139 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2140     char *buf)
2141 {
2142 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2143 	int len = 0;
2144 
2145 	len += scnprintf(buf + len, PAGE_SIZE-len,
2146 		"target exchange: new %d : current: %d\n\n",
2147 		vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2148 
2149 	len += scnprintf(buf + len, PAGE_SIZE-len,
2150 	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2151 	    vha->host_no);
2152 
2153 	return len;
2154 }
2155 
2156 static ssize_t
2157 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2158     const char *buf, size_t count)
2159 {
2160 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2161 	int val = 0;
2162 
2163 	if (sscanf(buf, "%d", &val) != 1)
2164 		return -EINVAL;
2165 
2166 	if (val > FW_MAX_EXCHANGES_CNT)
2167 		val = FW_MAX_EXCHANGES_CNT;
2168 	else if (val < 0)
2169 		val = 0;
2170 
2171 	vha->u_ql2xexchoffld = val;
2172 	return strlen(buf);
2173 }
2174 
2175 static ssize_t
2176 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2177     char *buf)
2178 {
2179 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2180 	int len = 0;
2181 
2182 	len += scnprintf(buf + len, PAGE_SIZE-len,
2183 		"target exchange: new %d : current: %d\n\n",
2184 		vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2185 
2186 	len += scnprintf(buf + len, PAGE_SIZE-len,
2187 	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2188 	    vha->host_no);
2189 
2190 	return len;
2191 }
2192 
2193 static ssize_t
2194 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2195     const char *buf, size_t count)
2196 {
2197 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2198 	int val = 0;
2199 
2200 	if (sscanf(buf, "%d", &val) != 1)
2201 		return -EINVAL;
2202 
2203 	if (val > FW_MAX_EXCHANGES_CNT)
2204 		val = FW_MAX_EXCHANGES_CNT;
2205 	else if (val < 0)
2206 		val = 0;
2207 
2208 	vha->u_ql2xiniexchg = val;
2209 	return strlen(buf);
2210 }
2211 
2212 static ssize_t
2213 qla2x00_dif_bundle_statistics_show(struct device *dev,
2214     struct device_attribute *attr, char *buf)
2215 {
2216 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2217 	struct qla_hw_data *ha = vha->hw;
2218 
2219 	return scnprintf(buf, PAGE_SIZE,
2220 	    "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2221 	    ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2222 	    ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2223 	    ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2224 }
2225 
2226 static ssize_t
2227 qla2x00_fw_attr_show(struct device *dev,
2228     struct device_attribute *attr, char *buf)
2229 {
2230 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2231 	struct qla_hw_data *ha = vha->hw;
2232 
2233 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2234 		return scnprintf(buf, PAGE_SIZE, "\n");
2235 
2236 	return scnprintf(buf, PAGE_SIZE, "%llx\n",
2237 	    (uint64_t)ha->fw_attributes_ext[1] << 48 |
2238 	    (uint64_t)ha->fw_attributes_ext[0] << 32 |
2239 	    (uint64_t)ha->fw_attributes_h << 16 |
2240 	    (uint64_t)ha->fw_attributes);
2241 }
2242 
2243 static ssize_t
2244 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2245     char *buf)
2246 {
2247 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2248 
2249 	return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2250 }
2251 
2252 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2253 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2254 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2255 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2256 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2257 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2258 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2259 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2260 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2261 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2262 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2263 		   qla2x00_zio_timer_store);
2264 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2265 		   qla2x00_beacon_store);
2266 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2267 		   qla2x00_optrom_bios_version_show, NULL);
2268 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2269 		   qla2x00_optrom_efi_version_show, NULL);
2270 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2271 		   qla2x00_optrom_fcode_version_show, NULL);
2272 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2273 		   NULL);
2274 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2275     qla2x00_optrom_gold_fw_version_show, NULL);
2276 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2277 		   NULL);
2278 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2279 		   NULL);
2280 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2281 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2282 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2283 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2284 		   NULL);
2285 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2286 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2287 		   qla2x00_vn_port_mac_address_show, NULL);
2288 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2289 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2290 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2291 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2292 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2293 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2294 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2295 		   qla2x00_allow_cna_fw_dump_show,
2296 		   qla2x00_allow_cna_fw_dump_store);
2297 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2298 static DEVICE_ATTR(min_supported_speed, 0444,
2299 		   qla2x00_min_supported_speed_show, NULL);
2300 static DEVICE_ATTR(max_supported_speed, 0444,
2301 		   qla2x00_max_supported_speed_show, NULL);
2302 static DEVICE_ATTR(zio_threshold, 0644,
2303     qla_zio_threshold_show,
2304     qla_zio_threshold_store);
2305 static DEVICE_ATTR_RW(qlini_mode);
2306 static DEVICE_ATTR_RW(ql2xexchoffld);
2307 static DEVICE_ATTR_RW(ql2xiniexchg);
2308 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2309     qla2x00_dif_bundle_statistics_show, NULL);
2310 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2311     qla2x00_port_speed_store);
2312 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2313 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2314 
2315 
2316 struct device_attribute *qla2x00_host_attrs[] = {
2317 	&dev_attr_driver_version,
2318 	&dev_attr_fw_version,
2319 	&dev_attr_serial_num,
2320 	&dev_attr_isp_name,
2321 	&dev_attr_isp_id,
2322 	&dev_attr_model_name,
2323 	&dev_attr_model_desc,
2324 	&dev_attr_pci_info,
2325 	&dev_attr_link_state,
2326 	&dev_attr_zio,
2327 	&dev_attr_zio_timer,
2328 	&dev_attr_beacon,
2329 	&dev_attr_optrom_bios_version,
2330 	&dev_attr_optrom_efi_version,
2331 	&dev_attr_optrom_fcode_version,
2332 	&dev_attr_optrom_fw_version,
2333 	&dev_attr_84xx_fw_version,
2334 	&dev_attr_total_isp_aborts,
2335 	&dev_attr_serdes_version,
2336 	&dev_attr_mpi_version,
2337 	&dev_attr_phy_version,
2338 	&dev_attr_flash_block_size,
2339 	&dev_attr_vlan_id,
2340 	&dev_attr_vn_port_mac_address,
2341 	&dev_attr_fabric_param,
2342 	&dev_attr_fw_state,
2343 	&dev_attr_optrom_gold_fw_version,
2344 	&dev_attr_thermal_temp,
2345 	&dev_attr_diag_requests,
2346 	&dev_attr_diag_megabytes,
2347 	&dev_attr_fw_dump_size,
2348 	&dev_attr_allow_cna_fw_dump,
2349 	&dev_attr_pep_version,
2350 	&dev_attr_min_supported_speed,
2351 	&dev_attr_max_supported_speed,
2352 	&dev_attr_zio_threshold,
2353 	&dev_attr_dif_bundle_statistics,
2354 	&dev_attr_port_speed,
2355 	&dev_attr_port_no,
2356 	&dev_attr_fw_attr,
2357 	NULL, /* reserve for qlini_mode */
2358 	NULL, /* reserve for ql2xiniexchg */
2359 	NULL, /* reserve for ql2xexchoffld */
2360 	NULL,
2361 };
2362 
2363 void qla_insert_tgt_attrs(void)
2364 {
2365 	struct device_attribute **attr;
2366 
2367 	/* advance to empty slot */
2368 	for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2369 		continue;
2370 
2371 	*attr = &dev_attr_qlini_mode;
2372 	attr++;
2373 	*attr = &dev_attr_ql2xiniexchg;
2374 	attr++;
2375 	*attr = &dev_attr_ql2xexchoffld;
2376 }
2377 
2378 /* Host attributes. */
2379 
2380 static void
2381 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2382 {
2383 	scsi_qla_host_t *vha = shost_priv(shost);
2384 
2385 	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2386 	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2387 }
2388 
2389 static void
2390 qla2x00_get_host_speed(struct Scsi_Host *shost)
2391 {
2392 	scsi_qla_host_t *vha = shost_priv(shost);
2393 	u32 speed;
2394 
2395 	if (IS_QLAFX00(vha->hw)) {
2396 		qlafx00_get_host_speed(shost);
2397 		return;
2398 	}
2399 
2400 	switch (vha->hw->link_data_rate) {
2401 	case PORT_SPEED_1GB:
2402 		speed = FC_PORTSPEED_1GBIT;
2403 		break;
2404 	case PORT_SPEED_2GB:
2405 		speed = FC_PORTSPEED_2GBIT;
2406 		break;
2407 	case PORT_SPEED_4GB:
2408 		speed = FC_PORTSPEED_4GBIT;
2409 		break;
2410 	case PORT_SPEED_8GB:
2411 		speed = FC_PORTSPEED_8GBIT;
2412 		break;
2413 	case PORT_SPEED_10GB:
2414 		speed = FC_PORTSPEED_10GBIT;
2415 		break;
2416 	case PORT_SPEED_16GB:
2417 		speed = FC_PORTSPEED_16GBIT;
2418 		break;
2419 	case PORT_SPEED_32GB:
2420 		speed = FC_PORTSPEED_32GBIT;
2421 		break;
2422 	case PORT_SPEED_64GB:
2423 		speed = FC_PORTSPEED_64GBIT;
2424 		break;
2425 	default:
2426 		speed = FC_PORTSPEED_UNKNOWN;
2427 		break;
2428 	}
2429 
2430 	fc_host_speed(shost) = speed;
2431 }
2432 
2433 static void
2434 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2435 {
2436 	scsi_qla_host_t *vha = shost_priv(shost);
2437 	uint32_t port_type;
2438 
2439 	if (vha->vp_idx) {
2440 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2441 		return;
2442 	}
2443 	switch (vha->hw->current_topology) {
2444 	case ISP_CFG_NL:
2445 		port_type = FC_PORTTYPE_LPORT;
2446 		break;
2447 	case ISP_CFG_FL:
2448 		port_type = FC_PORTTYPE_NLPORT;
2449 		break;
2450 	case ISP_CFG_N:
2451 		port_type = FC_PORTTYPE_PTP;
2452 		break;
2453 	case ISP_CFG_F:
2454 		port_type = FC_PORTTYPE_NPORT;
2455 		break;
2456 	default:
2457 		port_type = FC_PORTTYPE_UNKNOWN;
2458 		break;
2459 	}
2460 
2461 	fc_host_port_type(shost) = port_type;
2462 }
2463 
2464 static void
2465 qla2x00_get_starget_node_name(struct scsi_target *starget)
2466 {
2467 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2468 	scsi_qla_host_t *vha = shost_priv(host);
2469 	fc_port_t *fcport;
2470 	u64 node_name = 0;
2471 
2472 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2473 		if (fcport->rport &&
2474 		    starget->id == fcport->rport->scsi_target_id) {
2475 			node_name = wwn_to_u64(fcport->node_name);
2476 			break;
2477 		}
2478 	}
2479 
2480 	fc_starget_node_name(starget) = node_name;
2481 }
2482 
2483 static void
2484 qla2x00_get_starget_port_name(struct scsi_target *starget)
2485 {
2486 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2487 	scsi_qla_host_t *vha = shost_priv(host);
2488 	fc_port_t *fcport;
2489 	u64 port_name = 0;
2490 
2491 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2492 		if (fcport->rport &&
2493 		    starget->id == fcport->rport->scsi_target_id) {
2494 			port_name = wwn_to_u64(fcport->port_name);
2495 			break;
2496 		}
2497 	}
2498 
2499 	fc_starget_port_name(starget) = port_name;
2500 }
2501 
2502 static void
2503 qla2x00_get_starget_port_id(struct scsi_target *starget)
2504 {
2505 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2506 	scsi_qla_host_t *vha = shost_priv(host);
2507 	fc_port_t *fcport;
2508 	uint32_t port_id = ~0U;
2509 
2510 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2511 		if (fcport->rport &&
2512 		    starget->id == fcport->rport->scsi_target_id) {
2513 			port_id = fcport->d_id.b.domain << 16 |
2514 			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2515 			break;
2516 		}
2517 	}
2518 
2519 	fc_starget_port_id(starget) = port_id;
2520 }
2521 
2522 static inline void
2523 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2524 {
2525 	rport->dev_loss_tmo = timeout ? timeout : 1;
2526 }
2527 
2528 static void
2529 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2530 {
2531 	struct Scsi_Host *host = rport_to_shost(rport);
2532 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2533 	unsigned long flags;
2534 
2535 	if (!fcport)
2536 		return;
2537 
2538 	/* Now that the rport has been deleted, set the fcport state to
2539 	   FCS_DEVICE_DEAD */
2540 	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2541 
2542 	/*
2543 	 * Transport has effectively 'deleted' the rport, clear
2544 	 * all local references.
2545 	 */
2546 	spin_lock_irqsave(host->host_lock, flags);
2547 	fcport->rport = fcport->drport = NULL;
2548 	*((fc_port_t **)rport->dd_data) = NULL;
2549 	spin_unlock_irqrestore(host->host_lock, flags);
2550 
2551 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2552 		return;
2553 
2554 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2555 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2556 		return;
2557 	}
2558 }
2559 
2560 static void
2561 qla2x00_terminate_rport_io(struct fc_rport *rport)
2562 {
2563 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2564 
2565 	if (!fcport)
2566 		return;
2567 
2568 	if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2569 		return;
2570 
2571 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2572 		return;
2573 
2574 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2575 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2576 		return;
2577 	}
2578 	/*
2579 	 * At this point all fcport's software-states are cleared.  Perform any
2580 	 * final cleanup of firmware resources (PCBs and XCBs).
2581 	 */
2582 	if (fcport->loop_id != FC_NO_LOOP_ID) {
2583 		if (IS_FWI2_CAPABLE(fcport->vha->hw))
2584 			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2585 			    fcport->loop_id, fcport->d_id.b.domain,
2586 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2587 		else
2588 			qla2x00_port_logout(fcport->vha, fcport);
2589 	}
2590 }
2591 
2592 static int
2593 qla2x00_issue_lip(struct Scsi_Host *shost)
2594 {
2595 	scsi_qla_host_t *vha = shost_priv(shost);
2596 
2597 	if (IS_QLAFX00(vha->hw))
2598 		return 0;
2599 
2600 	qla2x00_loop_reset(vha);
2601 	return 0;
2602 }
2603 
2604 static struct fc_host_statistics *
2605 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2606 {
2607 	scsi_qla_host_t *vha = shost_priv(shost);
2608 	struct qla_hw_data *ha = vha->hw;
2609 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2610 	int rval;
2611 	struct link_statistics *stats;
2612 	dma_addr_t stats_dma;
2613 	struct fc_host_statistics *p = &vha->fc_host_stat;
2614 
2615 	memset(p, -1, sizeof(*p));
2616 
2617 	if (IS_QLAFX00(vha->hw))
2618 		goto done;
2619 
2620 	if (test_bit(UNLOADING, &vha->dpc_flags))
2621 		goto done;
2622 
2623 	if (unlikely(pci_channel_offline(ha->pdev)))
2624 		goto done;
2625 
2626 	if (qla2x00_chip_is_down(vha))
2627 		goto done;
2628 
2629 	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2630 				   GFP_KERNEL);
2631 	if (!stats) {
2632 		ql_log(ql_log_warn, vha, 0x707d,
2633 		    "Failed to allocate memory for stats.\n");
2634 		goto done;
2635 	}
2636 
2637 	rval = QLA_FUNCTION_FAILED;
2638 	if (IS_FWI2_CAPABLE(ha)) {
2639 		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2640 	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2641 	    !ha->dpc_active) {
2642 		/* Must be in a 'READY' state for statistics retrieval. */
2643 		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2644 						stats, stats_dma);
2645 	}
2646 
2647 	if (rval != QLA_SUCCESS)
2648 		goto done_free;
2649 
2650 	p->link_failure_count = stats->link_fail_cnt;
2651 	p->loss_of_sync_count = stats->loss_sync_cnt;
2652 	p->loss_of_signal_count = stats->loss_sig_cnt;
2653 	p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
2654 	p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
2655 	p->invalid_crc_count = stats->inval_crc_cnt;
2656 	if (IS_FWI2_CAPABLE(ha)) {
2657 		p->lip_count = stats->lip_cnt;
2658 		p->tx_frames = stats->tx_frames;
2659 		p->rx_frames = stats->rx_frames;
2660 		p->dumped_frames = stats->discarded_frames;
2661 		p->nos_count = stats->nos_rcvd;
2662 		p->error_frames =
2663 			stats->dropped_frames + stats->discarded_frames;
2664 		p->rx_words = vha->qla_stats.input_bytes;
2665 		p->tx_words = vha->qla_stats.output_bytes;
2666 	}
2667 	p->fcp_control_requests = vha->qla_stats.control_requests;
2668 	p->fcp_input_requests = vha->qla_stats.input_requests;
2669 	p->fcp_output_requests = vha->qla_stats.output_requests;
2670 	p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2671 	p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2672 	p->seconds_since_last_reset =
2673 		get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2674 	do_div(p->seconds_since_last_reset, HZ);
2675 
2676 done_free:
2677 	dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2678 	    stats, stats_dma);
2679 done:
2680 	return p;
2681 }
2682 
2683 static void
2684 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2685 {
2686 	scsi_qla_host_t *vha = shost_priv(shost);
2687 	struct qla_hw_data *ha = vha->hw;
2688 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2689 	struct link_statistics *stats;
2690 	dma_addr_t stats_dma;
2691 
2692 	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2693 	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2694 
2695 	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2696 
2697 	if (IS_FWI2_CAPABLE(ha)) {
2698 		stats = dma_alloc_coherent(&ha->pdev->dev,
2699 		    sizeof(*stats), &stats_dma, GFP_KERNEL);
2700 		if (!stats) {
2701 			ql_log(ql_log_warn, vha, 0x70d7,
2702 			    "Failed to allocate memory for stats.\n");
2703 			return;
2704 		}
2705 
2706 		/* reset firmware statistics */
2707 		qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2708 
2709 		dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2710 		    stats, stats_dma);
2711 	}
2712 }
2713 
2714 static void
2715 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2716 {
2717 	scsi_qla_host_t *vha = shost_priv(shost);
2718 
2719 	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2720 	    sizeof(fc_host_symbolic_name(shost)));
2721 }
2722 
2723 static void
2724 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2725 {
2726 	scsi_qla_host_t *vha = shost_priv(shost);
2727 
2728 	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2729 }
2730 
2731 static void
2732 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2733 {
2734 	scsi_qla_host_t *vha = shost_priv(shost);
2735 	static const uint8_t node_name[WWN_SIZE] = {
2736 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2737 	};
2738 	u64 fabric_name = wwn_to_u64(node_name);
2739 
2740 	if (vha->device_flags & SWITCH_FOUND)
2741 		fabric_name = wwn_to_u64(vha->fabric_node_name);
2742 
2743 	fc_host_fabric_name(shost) = fabric_name;
2744 }
2745 
2746 static void
2747 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2748 {
2749 	scsi_qla_host_t *vha = shost_priv(shost);
2750 	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2751 
2752 	if (!base_vha->flags.online) {
2753 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2754 		return;
2755 	}
2756 
2757 	switch (atomic_read(&base_vha->loop_state)) {
2758 	case LOOP_UPDATE:
2759 		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2760 		break;
2761 	case LOOP_DOWN:
2762 		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2763 			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2764 		else
2765 			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2766 		break;
2767 	case LOOP_DEAD:
2768 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2769 		break;
2770 	case LOOP_READY:
2771 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2772 		break;
2773 	default:
2774 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2775 		break;
2776 	}
2777 }
2778 
2779 static int
2780 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2781 {
2782 	int	ret = 0;
2783 	uint8_t	qos = 0;
2784 	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2785 	scsi_qla_host_t *vha = NULL;
2786 	struct qla_hw_data *ha = base_vha->hw;
2787 	int	cnt;
2788 	struct req_que *req = ha->req_q_map[0];
2789 	struct qla_qpair *qpair;
2790 
2791 	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2792 	if (ret) {
2793 		ql_log(ql_log_warn, vha, 0x707e,
2794 		    "Vport sanity check failed, status %x\n", ret);
2795 		return (ret);
2796 	}
2797 
2798 	vha = qla24xx_create_vhost(fc_vport);
2799 	if (vha == NULL) {
2800 		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2801 		return FC_VPORT_FAILED;
2802 	}
2803 	if (disable) {
2804 		atomic_set(&vha->vp_state, VP_OFFLINE);
2805 		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2806 	} else
2807 		atomic_set(&vha->vp_state, VP_FAILED);
2808 
2809 	/* ready to create vport */
2810 	ql_log(ql_log_info, vha, 0x7080,
2811 	    "VP entry id %d assigned.\n", vha->vp_idx);
2812 
2813 	/* initialized vport states */
2814 	atomic_set(&vha->loop_state, LOOP_DOWN);
2815 	vha->vp_err_state = VP_ERR_PORTDWN;
2816 	vha->vp_prev_err_state = VP_ERR_UNKWN;
2817 	/* Check if physical ha port is Up */
2818 	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2819 	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2820 		/* Don't retry or attempt login of this virtual port */
2821 		ql_dbg(ql_dbg_user, vha, 0x7081,
2822 		    "Vport loop state is not UP.\n");
2823 		atomic_set(&vha->loop_state, LOOP_DEAD);
2824 		if (!disable)
2825 			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2826 	}
2827 
2828 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2829 		if (ha->fw_attributes & BIT_4) {
2830 			int prot = 0, guard;
2831 
2832 			vha->flags.difdix_supported = 1;
2833 			ql_dbg(ql_dbg_user, vha, 0x7082,
2834 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
2835 			if (ql2xenabledif == 1)
2836 				prot = SHOST_DIX_TYPE0_PROTECTION;
2837 			scsi_host_set_prot(vha->host,
2838 			    prot | SHOST_DIF_TYPE1_PROTECTION
2839 			    | SHOST_DIF_TYPE2_PROTECTION
2840 			    | SHOST_DIF_TYPE3_PROTECTION
2841 			    | SHOST_DIX_TYPE1_PROTECTION
2842 			    | SHOST_DIX_TYPE2_PROTECTION
2843 			    | SHOST_DIX_TYPE3_PROTECTION);
2844 
2845 			guard = SHOST_DIX_GUARD_CRC;
2846 
2847 			if (IS_PI_IPGUARD_CAPABLE(ha) &&
2848 			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2849 				guard |= SHOST_DIX_GUARD_IP;
2850 
2851 			scsi_host_set_guard(vha->host, guard);
2852 		} else
2853 			vha->flags.difdix_supported = 0;
2854 	}
2855 
2856 	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2857 				   &ha->pdev->dev)) {
2858 		ql_dbg(ql_dbg_user, vha, 0x7083,
2859 		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2860 		goto vport_create_failed_2;
2861 	}
2862 
2863 	/* initialize attributes */
2864 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2865 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2866 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2867 	fc_host_supported_classes(vha->host) =
2868 		fc_host_supported_classes(base_vha->host);
2869 	fc_host_supported_speeds(vha->host) =
2870 		fc_host_supported_speeds(base_vha->host);
2871 
2872 	qlt_vport_create(vha, ha);
2873 	qla24xx_vport_disable(fc_vport, disable);
2874 
2875 	if (!ql2xmqsupport || !ha->npiv_info)
2876 		goto vport_queue;
2877 
2878 	/* Create a request queue in QoS mode for the vport */
2879 	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2880 		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2881 			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2882 					8) == 0) {
2883 			qos = ha->npiv_info[cnt].q_qos;
2884 			break;
2885 		}
2886 	}
2887 
2888 	if (qos) {
2889 		qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2890 		if (!qpair)
2891 			ql_log(ql_log_warn, vha, 0x7084,
2892 			    "Can't create qpair for VP[%d]\n",
2893 			    vha->vp_idx);
2894 		else {
2895 			ql_dbg(ql_dbg_multiq, vha, 0xc001,
2896 			    "Queue pair: %d Qos: %d) created for VP[%d]\n",
2897 			    qpair->id, qos, vha->vp_idx);
2898 			ql_dbg(ql_dbg_user, vha, 0x7085,
2899 			    "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2900 			    qpair->id, qos, vha->vp_idx);
2901 			req = qpair->req;
2902 			vha->qpair = qpair;
2903 		}
2904 	}
2905 
2906 vport_queue:
2907 	vha->req = req;
2908 	return 0;
2909 
2910 vport_create_failed_2:
2911 	qla24xx_disable_vp(vha);
2912 	qla24xx_deallocate_vp_id(vha);
2913 	scsi_host_put(vha->host);
2914 	return FC_VPORT_FAILED;
2915 }
2916 
2917 static int
2918 qla24xx_vport_delete(struct fc_vport *fc_vport)
2919 {
2920 	scsi_qla_host_t *vha = fc_vport->dd_data;
2921 	struct qla_hw_data *ha = vha->hw;
2922 	uint16_t id = vha->vp_idx;
2923 
2924 	set_bit(VPORT_DELETE, &vha->dpc_flags);
2925 
2926 	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2927 	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2928 		msleep(1000);
2929 
2930 	qla_nvme_delete(vha);
2931 
2932 	qla24xx_disable_vp(vha);
2933 	qla2x00_wait_for_sess_deletion(vha);
2934 
2935 	vha->flags.delete_progress = 1;
2936 
2937 	qlt_remove_target(ha, vha);
2938 
2939 	fc_remove_host(vha->host);
2940 
2941 	scsi_remove_host(vha->host);
2942 
2943 	/* Allow timer to run to drain queued items, when removing vp */
2944 	qla24xx_deallocate_vp_id(vha);
2945 
2946 	if (vha->timer_active) {
2947 		qla2x00_vp_stop_timer(vha);
2948 		ql_dbg(ql_dbg_user, vha, 0x7086,
2949 		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2950 	}
2951 
2952 	qla2x00_free_fcports(vha);
2953 
2954 	mutex_lock(&ha->vport_lock);
2955 	ha->cur_vport_count--;
2956 	clear_bit(vha->vp_idx, ha->vp_idx_map);
2957 	mutex_unlock(&ha->vport_lock);
2958 
2959 	dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2960 	    vha->gnl.ldma);
2961 
2962 	vha->gnl.l = NULL;
2963 
2964 	vfree(vha->scan.l);
2965 
2966 	if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2967 		if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2968 			ql_log(ql_log_warn, vha, 0x7087,
2969 			    "Queue Pair delete failed.\n");
2970 	}
2971 
2972 	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2973 	scsi_host_put(vha->host);
2974 	return 0;
2975 }
2976 
2977 static int
2978 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2979 {
2980 	scsi_qla_host_t *vha = fc_vport->dd_data;
2981 
2982 	if (disable)
2983 		qla24xx_disable_vp(vha);
2984 	else
2985 		qla24xx_enable_vp(vha);
2986 
2987 	return 0;
2988 }
2989 
2990 struct fc_function_template qla2xxx_transport_functions = {
2991 
2992 	.show_host_node_name = 1,
2993 	.show_host_port_name = 1,
2994 	.show_host_supported_classes = 1,
2995 	.show_host_supported_speeds = 1,
2996 
2997 	.get_host_port_id = qla2x00_get_host_port_id,
2998 	.show_host_port_id = 1,
2999 	.get_host_speed = qla2x00_get_host_speed,
3000 	.show_host_speed = 1,
3001 	.get_host_port_type = qla2x00_get_host_port_type,
3002 	.show_host_port_type = 1,
3003 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3004 	.show_host_symbolic_name = 1,
3005 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
3006 	.show_host_system_hostname = 1,
3007 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
3008 	.show_host_fabric_name = 1,
3009 	.get_host_port_state = qla2x00_get_host_port_state,
3010 	.show_host_port_state = 1,
3011 
3012 	.dd_fcrport_size = sizeof(struct fc_port *),
3013 	.show_rport_supported_classes = 1,
3014 
3015 	.get_starget_node_name = qla2x00_get_starget_node_name,
3016 	.show_starget_node_name = 1,
3017 	.get_starget_port_name = qla2x00_get_starget_port_name,
3018 	.show_starget_port_name = 1,
3019 	.get_starget_port_id  = qla2x00_get_starget_port_id,
3020 	.show_starget_port_id = 1,
3021 
3022 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3023 	.show_rport_dev_loss_tmo = 1,
3024 
3025 	.issue_fc_host_lip = qla2x00_issue_lip,
3026 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3027 	.terminate_rport_io = qla2x00_terminate_rport_io,
3028 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
3029 	.reset_fc_host_stats = qla2x00_reset_host_stats,
3030 
3031 	.vport_create = qla24xx_vport_create,
3032 	.vport_disable = qla24xx_vport_disable,
3033 	.vport_delete = qla24xx_vport_delete,
3034 	.bsg_request = qla24xx_bsg_request,
3035 	.bsg_timeout = qla24xx_bsg_timeout,
3036 };
3037 
3038 struct fc_function_template qla2xxx_transport_vport_functions = {
3039 
3040 	.show_host_node_name = 1,
3041 	.show_host_port_name = 1,
3042 	.show_host_supported_classes = 1,
3043 
3044 	.get_host_port_id = qla2x00_get_host_port_id,
3045 	.show_host_port_id = 1,
3046 	.get_host_speed = qla2x00_get_host_speed,
3047 	.show_host_speed = 1,
3048 	.get_host_port_type = qla2x00_get_host_port_type,
3049 	.show_host_port_type = 1,
3050 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3051 	.show_host_symbolic_name = 1,
3052 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
3053 	.show_host_system_hostname = 1,
3054 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
3055 	.show_host_fabric_name = 1,
3056 	.get_host_port_state = qla2x00_get_host_port_state,
3057 	.show_host_port_state = 1,
3058 
3059 	.dd_fcrport_size = sizeof(struct fc_port *),
3060 	.show_rport_supported_classes = 1,
3061 
3062 	.get_starget_node_name = qla2x00_get_starget_node_name,
3063 	.show_starget_node_name = 1,
3064 	.get_starget_port_name = qla2x00_get_starget_port_name,
3065 	.show_starget_port_name = 1,
3066 	.get_starget_port_id  = qla2x00_get_starget_port_id,
3067 	.show_starget_port_id = 1,
3068 
3069 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3070 	.show_rport_dev_loss_tmo = 1,
3071 
3072 	.issue_fc_host_lip = qla2x00_issue_lip,
3073 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3074 	.terminate_rport_io = qla2x00_terminate_rport_io,
3075 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
3076 	.reset_fc_host_stats = qla2x00_reset_host_stats,
3077 
3078 	.bsg_request = qla24xx_bsg_request,
3079 	.bsg_timeout = qla24xx_bsg_timeout,
3080 };
3081 
3082 void
3083 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3084 {
3085 	struct qla_hw_data *ha = vha->hw;
3086 	u32 speeds = FC_PORTSPEED_UNKNOWN;
3087 
3088 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3089 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3090 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3091 	fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3092 			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3093 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3094 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3095 
3096 	if (IS_CNA_CAPABLE(ha))
3097 		speeds = FC_PORTSPEED_10GBIT;
3098 	else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3099 		if (ha->max_supported_speed == 2) {
3100 			if (ha->min_supported_speed <= 6)
3101 				speeds |= FC_PORTSPEED_64GBIT;
3102 		}
3103 		if (ha->max_supported_speed == 2 ||
3104 		    ha->max_supported_speed == 1) {
3105 			if (ha->min_supported_speed <= 5)
3106 				speeds |= FC_PORTSPEED_32GBIT;
3107 		}
3108 		if (ha->max_supported_speed == 2 ||
3109 		    ha->max_supported_speed == 1 ||
3110 		    ha->max_supported_speed == 0) {
3111 			if (ha->min_supported_speed <= 4)
3112 				speeds |= FC_PORTSPEED_16GBIT;
3113 		}
3114 		if (ha->max_supported_speed == 1 ||
3115 		    ha->max_supported_speed == 0) {
3116 			if (ha->min_supported_speed <= 3)
3117 				speeds |= FC_PORTSPEED_8GBIT;
3118 		}
3119 		if (ha->max_supported_speed == 0) {
3120 			if (ha->min_supported_speed <= 2)
3121 				speeds |= FC_PORTSPEED_4GBIT;
3122 		}
3123 	} else if (IS_QLA2031(ha))
3124 		speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3125 			FC_PORTSPEED_4GBIT;
3126 	else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3127 		speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3128 			FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3129 	else if (IS_QLA24XX_TYPE(ha))
3130 		speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3131 			FC_PORTSPEED_1GBIT;
3132 	else if (IS_QLA23XX(ha))
3133 		speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3134 	else
3135 		speeds = FC_PORTSPEED_1GBIT;
3136 
3137 	fc_host_supported_speeds(vha->host) = speeds;
3138 }
3139