xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_dfs.c (revision b296a6d5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 
11 static struct dentry *qla2x00_dfs_root;
12 static atomic_t qla2x00_dfs_root_count;
13 
14 #define QLA_DFS_RPORT_DEVLOSS_TMO	1
15 
16 static int
17 qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18 {
19 	switch (attr_id) {
20 	case QLA_DFS_RPORT_DEVLOSS_TMO:
21 		/* Only supported for FC-NVMe devices that are registered. */
22 		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23 			return -EIO;
24 		*val = fp->nvme_remote_port->dev_loss_tmo;
25 		break;
26 	default:
27 		return -EINVAL;
28 	}
29 	return 0;
30 }
31 
32 static int
33 qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34 {
35 	switch (attr_id) {
36 	case QLA_DFS_RPORT_DEVLOSS_TMO:
37 		/* Only supported for FC-NVMe devices that are registered. */
38 		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39 			return -EIO;
40 #if (IS_ENABLED(CONFIG_NVME_FC))
41 		return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42 						      val);
43 #else /* CONFIG_NVME_FC */
44 		return -EINVAL;
45 #endif /* CONFIG_NVME_FC */
46 	default:
47 		return -EINVAL;
48 	}
49 	return 0;
50 }
51 
52 #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr)		\
53 static int qla_dfs_rport_##_attr##_get(void *data, u64 *val)	\
54 {								\
55 	struct fc_port *fp = data;				\
56 	return qla_dfs_rport_get(fp, _attr_id, val);		\
57 }								\
58 static int qla_dfs_rport_##_attr##_set(void *data, u64 val)	\
59 {								\
60 	struct fc_port *fp = data;				\
61 	return qla_dfs_rport_set(fp, _attr_id, val);		\
62 }								\
63 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops,		\
64 		qla_dfs_rport_##_attr##_get,			\
65 		qla_dfs_rport_##_attr##_set, "%llu\n")
66 
67 /*
68  * Wrapper for getting fc_port fields.
69  *
70  * _attr    : Attribute name.
71  * _get_val : Accessor macro to retrieve the value.
72  */
73 #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)			\
74 static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val)	\
75 {									\
76 	struct fc_port *fp = data;					\
77 	*val = _get_val;						\
78 	return 0;							\
79 }									\
80 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops,		\
81 		qla_dfs_rport_field_##_attr##_get,			\
82 		NULL, "%llu\n")
83 
84 #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85 	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86 
87 #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88 	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89 
90 DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91 
92 DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93 DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94 DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95 DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96 DEFINE_QLA_DFS_RPORT_FIELD(flags);
97 DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98 DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99 DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100 DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101 DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102 DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103 DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104 
105 void
106 qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107 {
108 	char wwn[32];
109 
110 #define QLA_CREATE_RPORT_FIELD_ATTR(_attr)			\
111 	debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir,	\
112 		fp, &qla_dfs_rport_field_##_attr##_fops)
113 
114 	if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115 		return;
116 
117 	sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118 	fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119 	if (!fp->dfs_rport_dir)
120 		return;
121 	if (NVME_TARGET(vha->hw, fp))
122 		debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123 				    fp, &qla_dfs_rport_dev_loss_tmo_fops);
124 
125 	QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126 	QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127 	QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128 	QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129 	QLA_CREATE_RPORT_FIELD_ATTR(flags);
130 	QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131 	QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132 	QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133 	QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134 	QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135 	QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136 	QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137 }
138 
139 void
140 qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141 {
142 	if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143 		return;
144 	debugfs_remove_recursive(fp->dfs_rport_dir);
145 	fp->dfs_rport_dir = NULL;
146 }
147 
148 static int
149 qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
150 {
151 	scsi_qla_host_t *vha = s->private;
152 	struct qla_hw_data *ha = vha->hw;
153 	unsigned long flags;
154 	struct fc_port *sess = NULL;
155 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
156 
157 	seq_printf(s, "%s\n", vha->host_str);
158 	if (tgt) {
159 		seq_puts(s, "Port ID   Port Name                Handle\n");
160 
161 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
162 		list_for_each_entry(sess, &vha->vp_fcports, list)
163 			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
164 			    sess->d_id.b.domain, sess->d_id.b.area,
165 			    sess->d_id.b.al_pa, sess->port_name,
166 			    sess->loop_id);
167 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
168 	}
169 
170 	return 0;
171 }
172 
173 static int
174 qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
175 {
176 	scsi_qla_host_t *vha = inode->i_private;
177 
178 	return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
179 }
180 
181 static const struct file_operations dfs_tgt_sess_ops = {
182 	.open		= qla2x00_dfs_tgt_sess_open,
183 	.read		= seq_read,
184 	.llseek		= seq_lseek,
185 	.release	= single_release,
186 };
187 
188 static int
189 qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
190 {
191 	scsi_qla_host_t *vha = s->private;
192 	struct qla_hw_data *ha = vha->hw;
193 	struct gid_list_info *gid_list;
194 	dma_addr_t gid_list_dma;
195 	fc_port_t fc_port;
196 	char *id_iter;
197 	int rc, i;
198 	uint16_t entries, loop_id;
199 
200 	seq_printf(s, "%s\n", vha->host_str);
201 	gid_list = dma_alloc_coherent(&ha->pdev->dev,
202 				      qla2x00_gid_list_size(ha),
203 				      &gid_list_dma, GFP_KERNEL);
204 	if (!gid_list) {
205 		ql_dbg(ql_dbg_user, vha, 0x7018,
206 		       "DMA allocation failed for %u\n",
207 		       qla2x00_gid_list_size(ha));
208 		return 0;
209 	}
210 
211 	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
212 				  &entries);
213 	if (rc != QLA_SUCCESS)
214 		goto out_free_id_list;
215 
216 	id_iter = (char *)gid_list;
217 
218 	seq_puts(s, "Port Name	Port ID		Loop ID\n");
219 
220 	for (i = 0; i < entries; i++) {
221 		struct gid_list_info *gid =
222 			(struct gid_list_info *)id_iter;
223 		loop_id = le16_to_cpu(gid->loop_id);
224 		memset(&fc_port, 0, sizeof(fc_port_t));
225 
226 		fc_port.loop_id = loop_id;
227 
228 		rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
229 		seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
230 			   fc_port.port_name, fc_port.d_id.b.domain,
231 			   fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
232 			   fc_port.loop_id);
233 		id_iter += ha->gid_list_info_size;
234 	}
235 out_free_id_list:
236 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
237 			  gid_list, gid_list_dma);
238 
239 	return 0;
240 }
241 
242 static int
243 qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
244 {
245 	scsi_qla_host_t *vha = inode->i_private;
246 
247 	return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
248 }
249 
250 static const struct file_operations dfs_tgt_port_database_ops = {
251 	.open		= qla2x00_dfs_tgt_port_database_open,
252 	.read		= seq_read,
253 	.llseek		= seq_lseek,
254 	.release	= single_release,
255 };
256 
257 static int
258 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
259 {
260 	struct scsi_qla_host *vha = s->private;
261 	uint16_t mb[MAX_IOCB_MB_REG];
262 	int rc;
263 	struct qla_hw_data *ha = vha->hw;
264 	u16 iocbs_used, i;
265 
266 	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
267 	if (rc != QLA_SUCCESS) {
268 		seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
269 	} else {
270 		seq_puts(s, "FW Resource count\n\n");
271 		seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
272 		seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
273 		seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
274 		seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
275 		seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
276 		seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
277 		seq_printf(s, "MAX VP count[%d]\n", mb[11]);
278 		seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
279 		seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
280 		    mb[20]);
281 		seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
282 		    mb[21]);
283 		seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
284 		    mb[22]);
285 		seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
286 		    mb[23]);
287 	}
288 
289 	if (ql2xenforce_iocb_limit) {
290 		/* lock is not require. It's an estimate. */
291 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
292 		for (i = 0; i < ha->max_qpairs; i++) {
293 			if (ha->queue_pair_map[i])
294 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
295 		}
296 
297 		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
298 			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
299 	}
300 
301 	return 0;
302 }
303 
304 static int
305 qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
306 {
307 	struct scsi_qla_host *vha = inode->i_private;
308 
309 	return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
310 }
311 
312 static const struct file_operations dfs_fw_resource_cnt_ops = {
313 	.open           = qla_dfs_fw_resource_cnt_open,
314 	.read           = seq_read,
315 	.llseek         = seq_lseek,
316 	.release        = single_release,
317 };
318 
319 static int
320 qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
321 {
322 	struct scsi_qla_host *vha = s->private;
323 	struct qla_qpair *qpair = vha->hw->base_qpair;
324 	uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
325 		core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
326 		num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
327 	u16 i;
328 
329 	qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
330 	core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
331 	qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
332 	core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
333 	qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
334 	core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
335 	num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
336 	num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
337 	num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
338 
339 	for (i = 0; i < vha->hw->max_qpairs; i++) {
340 		qpair = vha->hw->queue_pair_map[i];
341 		if (!qpair)
342 			continue;
343 		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
344 		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
345 		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
346 		core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
347 		qla_core_ret_sta_ctio +=
348 		    qpair->tgt_counters.qla_core_ret_sta_ctio;
349 		core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
350 		num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
351 		num_alloc_iocb_failed +=
352 		    qpair->tgt_counters.num_alloc_iocb_failed;
353 		num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
354 	}
355 
356 	seq_puts(s, "Target Counters\n");
357 	seq_printf(s, "qla_core_sbt_cmd = %lld\n",
358 		qla_core_sbt_cmd);
359 	seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
360 		qla_core_ret_sta_ctio);
361 	seq_printf(s, "qla_core_ret_ctio = %lld\n",
362 		qla_core_ret_ctio);
363 	seq_printf(s, "core_qla_que_buf = %lld\n",
364 		core_qla_que_buf);
365 	seq_printf(s, "core_qla_snd_status = %lld\n",
366 		core_qla_snd_status);
367 	seq_printf(s, "core_qla_free_cmd = %lld\n",
368 		core_qla_free_cmd);
369 	seq_printf(s, "num alloc iocb failed = %lld\n",
370 		num_alloc_iocb_failed);
371 	seq_printf(s, "num term exchange sent = %lld\n",
372 		num_term_xchg_sent);
373 	seq_printf(s, "num Q full sent = %lld\n",
374 		num_q_full_sent);
375 
376 	/* DIF stats */
377 	seq_printf(s, "DIF Inp Bytes = %lld\n",
378 		vha->qla_stats.qla_dif_stats.dif_input_bytes);
379 	seq_printf(s, "DIF Outp Bytes = %lld\n",
380 		vha->qla_stats.qla_dif_stats.dif_output_bytes);
381 	seq_printf(s, "DIF Inp Req = %lld\n",
382 		vha->qla_stats.qla_dif_stats.dif_input_requests);
383 	seq_printf(s, "DIF Outp Req = %lld\n",
384 		vha->qla_stats.qla_dif_stats.dif_output_requests);
385 	seq_printf(s, "DIF Guard err = %d\n",
386 		vha->qla_stats.qla_dif_stats.dif_guard_err);
387 	seq_printf(s, "DIF Ref tag err = %d\n",
388 		vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
389 	seq_printf(s, "DIF App tag err = %d\n",
390 		vha->qla_stats.qla_dif_stats.dif_app_tag_err);
391 	return 0;
392 }
393 
394 static int
395 qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
396 {
397 	struct scsi_qla_host *vha = inode->i_private;
398 
399 	return single_open(file, qla_dfs_tgt_counters_show, vha);
400 }
401 
402 static const struct file_operations dfs_tgt_counters_ops = {
403 	.open           = qla_dfs_tgt_counters_open,
404 	.read           = seq_read,
405 	.llseek         = seq_lseek,
406 	.release        = single_release,
407 };
408 
409 static int
410 qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
411 {
412 	scsi_qla_host_t *vha = s->private;
413 	uint32_t cnt;
414 	uint32_t *fce;
415 	uint64_t fce_start;
416 	struct qla_hw_data *ha = vha->hw;
417 
418 	mutex_lock(&ha->fce_mutex);
419 
420 	seq_puts(s, "FCE Trace Buffer\n");
421 	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
422 	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
423 	seq_puts(s, "FCE Enable Registers\n");
424 	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
425 	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
426 	    ha->fce_mb[5], ha->fce_mb[6]);
427 
428 	fce = (uint32_t *) ha->fce;
429 	fce_start = (unsigned long long) ha->fce_dma;
430 	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
431 		if (cnt % 8 == 0)
432 			seq_printf(s, "\n%llx: ",
433 			    (unsigned long long)((cnt * 4) + fce_start));
434 		else
435 			seq_putc(s, ' ');
436 		seq_printf(s, "%08x", *fce++);
437 	}
438 
439 	seq_puts(s, "\nEnd\n");
440 
441 	mutex_unlock(&ha->fce_mutex);
442 
443 	return 0;
444 }
445 
446 static int
447 qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
448 {
449 	scsi_qla_host_t *vha = inode->i_private;
450 	struct qla_hw_data *ha = vha->hw;
451 	int rval;
452 
453 	if (!ha->flags.fce_enabled)
454 		goto out;
455 
456 	mutex_lock(&ha->fce_mutex);
457 
458 	/* Pause tracing to flush FCE buffers. */
459 	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
460 	if (rval)
461 		ql_dbg(ql_dbg_user, vha, 0x705c,
462 		    "DebugFS: Unable to disable FCE (%d).\n", rval);
463 
464 	ha->flags.fce_enabled = 0;
465 
466 	mutex_unlock(&ha->fce_mutex);
467 out:
468 	return single_open(file, qla2x00_dfs_fce_show, vha);
469 }
470 
471 static int
472 qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
473 {
474 	scsi_qla_host_t *vha = inode->i_private;
475 	struct qla_hw_data *ha = vha->hw;
476 	int rval;
477 
478 	if (ha->flags.fce_enabled)
479 		goto out;
480 
481 	mutex_lock(&ha->fce_mutex);
482 
483 	/* Re-enable FCE tracing. */
484 	ha->flags.fce_enabled = 1;
485 	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
486 	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
487 	    ha->fce_mb, &ha->fce_bufs);
488 	if (rval) {
489 		ql_dbg(ql_dbg_user, vha, 0x700d,
490 		    "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
491 		ha->flags.fce_enabled = 0;
492 	}
493 
494 	mutex_unlock(&ha->fce_mutex);
495 out:
496 	return single_release(inode, file);
497 }
498 
499 static const struct file_operations dfs_fce_ops = {
500 	.open		= qla2x00_dfs_fce_open,
501 	.read		= seq_read,
502 	.llseek		= seq_lseek,
503 	.release	= qla2x00_dfs_fce_release,
504 };
505 
506 static int
507 qla_dfs_naqp_show(struct seq_file *s, void *unused)
508 {
509 	struct scsi_qla_host *vha = s->private;
510 	struct qla_hw_data *ha = vha->hw;
511 
512 	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
513 	return 0;
514 }
515 
516 static int
517 qla_dfs_naqp_open(struct inode *inode, struct file *file)
518 {
519 	struct scsi_qla_host *vha = inode->i_private;
520 
521 	return single_open(file, qla_dfs_naqp_show, vha);
522 }
523 
524 static ssize_t
525 qla_dfs_naqp_write(struct file *file, const char __user *buffer,
526     size_t count, loff_t *pos)
527 {
528 	struct seq_file *s = file->private_data;
529 	struct scsi_qla_host *vha = s->private;
530 	struct qla_hw_data *ha = vha->hw;
531 	char *buf;
532 	int rc = 0;
533 	unsigned long num_act_qp;
534 
535 	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
536 		pr_err("host%ld: this adapter does not support Multi Q.",
537 		    vha->host_no);
538 		return -EINVAL;
539 	}
540 
541 	if (!vha->flags.qpairs_available) {
542 		pr_err("host%ld: Driver is not setup with Multi Q.",
543 		    vha->host_no);
544 		return -EINVAL;
545 	}
546 	buf = memdup_user_nul(buffer, count);
547 	if (IS_ERR(buf)) {
548 		pr_err("host%ld: fail to copy user buffer.",
549 		    vha->host_no);
550 		return PTR_ERR(buf);
551 	}
552 
553 	num_act_qp = simple_strtoul(buf, NULL, 0);
554 
555 	if (num_act_qp >= vha->hw->max_qpairs) {
556 		pr_err("User set invalid number of qpairs %lu. Max = %d",
557 		    num_act_qp, vha->hw->max_qpairs);
558 		rc = -EINVAL;
559 		goto out_free;
560 	}
561 
562 	if (num_act_qp != ha->tgt.num_act_qpairs) {
563 		ha->tgt.num_act_qpairs = num_act_qp;
564 		qlt_clr_qp_table(vha);
565 	}
566 	rc = count;
567 out_free:
568 	kfree(buf);
569 	return rc;
570 }
571 
572 static const struct file_operations dfs_naqp_ops = {
573 	.open		= qla_dfs_naqp_open,
574 	.read		= seq_read,
575 	.llseek		= seq_lseek,
576 	.release	= single_release,
577 	.write		= qla_dfs_naqp_write,
578 };
579 
580 
581 int
582 qla2x00_dfs_setup(scsi_qla_host_t *vha)
583 {
584 	struct qla_hw_data *ha = vha->hw;
585 
586 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
587 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
588 		goto out;
589 	if (!ha->fce)
590 		goto out;
591 
592 	if (qla2x00_dfs_root)
593 		goto create_dir;
594 
595 	atomic_set(&qla2x00_dfs_root_count, 0);
596 	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
597 
598 create_dir:
599 	if (ha->dfs_dir)
600 		goto create_nodes;
601 
602 	mutex_init(&ha->fce_mutex);
603 	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
604 
605 	atomic_inc(&qla2x00_dfs_root_count);
606 
607 create_nodes:
608 	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
609 	    S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
610 
611 	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
612 	    ha->dfs_dir, vha, &dfs_tgt_counters_ops);
613 
614 	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
615 	    S_IRUSR,  ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
616 
617 	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
618 	    &dfs_fce_ops);
619 
620 	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
621 		S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
622 
623 	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
624 		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
625 		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
626 		if (!ha->tgt.dfs_naqp) {
627 			ql_log(ql_log_warn, vha, 0xd011,
628 			       "Unable to create debugFS naqp node.\n");
629 			goto out;
630 		}
631 	}
632 	vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
633 	if (!vha->dfs_rport_root) {
634 		ql_log(ql_log_warn, vha, 0xd012,
635 		       "Unable to create debugFS rports node.\n");
636 		goto out;
637 	}
638 out:
639 	return 0;
640 }
641 
642 int
643 qla2x00_dfs_remove(scsi_qla_host_t *vha)
644 {
645 	struct qla_hw_data *ha = vha->hw;
646 
647 	if (ha->tgt.dfs_naqp) {
648 		debugfs_remove(ha->tgt.dfs_naqp);
649 		ha->tgt.dfs_naqp = NULL;
650 	}
651 
652 	if (ha->tgt.dfs_tgt_sess) {
653 		debugfs_remove(ha->tgt.dfs_tgt_sess);
654 		ha->tgt.dfs_tgt_sess = NULL;
655 	}
656 
657 	if (ha->tgt.dfs_tgt_port_database) {
658 		debugfs_remove(ha->tgt.dfs_tgt_port_database);
659 		ha->tgt.dfs_tgt_port_database = NULL;
660 	}
661 
662 	if (ha->dfs_fw_resource_cnt) {
663 		debugfs_remove(ha->dfs_fw_resource_cnt);
664 		ha->dfs_fw_resource_cnt = NULL;
665 	}
666 
667 	if (ha->dfs_tgt_counters) {
668 		debugfs_remove(ha->dfs_tgt_counters);
669 		ha->dfs_tgt_counters = NULL;
670 	}
671 
672 	if (ha->dfs_fce) {
673 		debugfs_remove(ha->dfs_fce);
674 		ha->dfs_fce = NULL;
675 	}
676 
677 	if (vha->dfs_rport_root) {
678 		debugfs_remove_recursive(vha->dfs_rport_root);
679 		vha->dfs_rport_root = NULL;
680 	}
681 
682 	if (ha->dfs_dir) {
683 		debugfs_remove(ha->dfs_dir);
684 		ha->dfs_dir = NULL;
685 		atomic_dec(&qla2x00_dfs_root_count);
686 	}
687 
688 	if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
689 	    qla2x00_dfs_root) {
690 		debugfs_remove(qla2x00_dfs_root);
691 		qla2x00_dfs_root = NULL;
692 	}
693 
694 	return 0;
695 }
696