xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_dfs.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 
11 static struct dentry *qla2x00_dfs_root;
12 static atomic_t qla2x00_dfs_root_count;
13 
14 #define QLA_DFS_RPORT_DEVLOSS_TMO	1
15 
16 static int
qla_dfs_rport_get(struct fc_port * fp,int attr_id,u64 * val)17 qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18 {
19 	switch (attr_id) {
20 	case QLA_DFS_RPORT_DEVLOSS_TMO:
21 		/* Only supported for FC-NVMe devices that are registered. */
22 		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23 			return -EIO;
24 		*val = fp->nvme_remote_port->dev_loss_tmo;
25 		break;
26 	default:
27 		return -EINVAL;
28 	}
29 	return 0;
30 }
31 
32 static int
qla_dfs_rport_set(struct fc_port * fp,int attr_id,u64 val)33 qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34 {
35 	switch (attr_id) {
36 	case QLA_DFS_RPORT_DEVLOSS_TMO:
37 		/* Only supported for FC-NVMe devices that are registered. */
38 		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39 			return -EIO;
40 #if (IS_ENABLED(CONFIG_NVME_FC))
41 		return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42 						      val);
43 #else /* CONFIG_NVME_FC */
44 		return -EINVAL;
45 #endif /* CONFIG_NVME_FC */
46 	default:
47 		return -EINVAL;
48 	}
49 	return 0;
50 }
51 
52 #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr)		\
53 static int qla_dfs_rport_##_attr##_get(void *data, u64 *val)	\
54 {								\
55 	struct fc_port *fp = data;				\
56 	return qla_dfs_rport_get(fp, _attr_id, val);		\
57 }								\
58 static int qla_dfs_rport_##_attr##_set(void *data, u64 val)	\
59 {								\
60 	struct fc_port *fp = data;				\
61 	return qla_dfs_rport_set(fp, _attr_id, val);		\
62 }								\
63 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops,		\
64 		qla_dfs_rport_##_attr##_get,			\
65 		qla_dfs_rport_##_attr##_set, "%llu\n")
66 
67 /*
68  * Wrapper for getting fc_port fields.
69  *
70  * _attr    : Attribute name.
71  * _get_val : Accessor macro to retrieve the value.
72  */
73 #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)			\
74 static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val)	\
75 {									\
76 	struct fc_port *fp = data;					\
77 	*val = _get_val;						\
78 	return 0;							\
79 }									\
80 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops,		\
81 		qla_dfs_rport_field_##_attr##_get,			\
82 		NULL, "%llu\n")
83 
84 #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85 	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86 
87 #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88 	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89 
90 DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91 
92 DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93 DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94 DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95 DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96 DEFINE_QLA_DFS_RPORT_FIELD(flags);
97 DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98 DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99 DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100 DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101 DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102 DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103 DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104 
105 void
qla2x00_dfs_create_rport(scsi_qla_host_t * vha,struct fc_port * fp)106 qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107 {
108 	char wwn[32];
109 
110 #define QLA_CREATE_RPORT_FIELD_ATTR(_attr)			\
111 	debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir,	\
112 		fp, &qla_dfs_rport_field_##_attr##_fops)
113 
114 	if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115 		return;
116 
117 	sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118 	fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119 	if (IS_ERR(fp->dfs_rport_dir))
120 		return;
121 	if (NVME_TARGET(vha->hw, fp))
122 		debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123 				    fp, &qla_dfs_rport_dev_loss_tmo_fops);
124 
125 	QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126 	QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127 	QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128 	QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129 	QLA_CREATE_RPORT_FIELD_ATTR(flags);
130 	QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131 	QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132 	QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133 	QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134 	QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135 	QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136 	QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137 }
138 
139 void
qla2x00_dfs_remove_rport(scsi_qla_host_t * vha,struct fc_port * fp)140 qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141 {
142 	if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143 		return;
144 	debugfs_remove_recursive(fp->dfs_rport_dir);
145 	fp->dfs_rport_dir = NULL;
146 }
147 
148 static int
qla2x00_dfs_tgt_sess_show(struct seq_file * s,void * unused)149 qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
150 {
151 	scsi_qla_host_t *vha = s->private;
152 	struct qla_hw_data *ha = vha->hw;
153 	unsigned long flags;
154 	struct fc_port *sess = NULL;
155 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
156 
157 	seq_printf(s, "%s\n", vha->host_str);
158 	if (tgt) {
159 		seq_puts(s, "Port ID   Port Name                Handle\n");
160 
161 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
162 		list_for_each_entry(sess, &vha->vp_fcports, list)
163 			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
164 			    sess->d_id.b.domain, sess->d_id.b.area,
165 			    sess->d_id.b.al_pa, sess->port_name,
166 			    sess->loop_id);
167 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
168 	}
169 
170 	return 0;
171 }
172 
173 DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
174 
175 static int
qla2x00_dfs_tgt_port_database_show(struct seq_file * s,void * unused)176 qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
177 {
178 	scsi_qla_host_t *vha = s->private;
179 	struct qla_hw_data *ha = vha->hw;
180 	struct gid_list_info *gid_list;
181 	dma_addr_t gid_list_dma;
182 	fc_port_t fc_port;
183 	char *id_iter;
184 	int rc, i;
185 	uint16_t entries, loop_id;
186 
187 	seq_printf(s, "%s\n", vha->host_str);
188 	gid_list = dma_alloc_coherent(&ha->pdev->dev,
189 				      qla2x00_gid_list_size(ha),
190 				      &gid_list_dma, GFP_KERNEL);
191 	if (!gid_list) {
192 		ql_dbg(ql_dbg_user, vha, 0x7018,
193 		       "DMA allocation failed for %u\n",
194 		       qla2x00_gid_list_size(ha));
195 		return 0;
196 	}
197 
198 	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
199 				  &entries);
200 	if (rc != QLA_SUCCESS)
201 		goto out_free_id_list;
202 
203 	id_iter = (char *)gid_list;
204 
205 	seq_puts(s, "Port Name	Port ID		Loop ID\n");
206 
207 	for (i = 0; i < entries; i++) {
208 		struct gid_list_info *gid =
209 			(struct gid_list_info *)id_iter;
210 		loop_id = le16_to_cpu(gid->loop_id);
211 		memset(&fc_port, 0, sizeof(fc_port_t));
212 
213 		fc_port.loop_id = loop_id;
214 
215 		rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
216 		seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
217 			   fc_port.port_name, fc_port.d_id.b.domain,
218 			   fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
219 			   fc_port.loop_id);
220 		id_iter += ha->gid_list_info_size;
221 	}
222 out_free_id_list:
223 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
224 			  gid_list, gid_list_dma);
225 
226 	return 0;
227 }
228 
229 DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
230 
231 static int
qla_dfs_fw_resource_cnt_show(struct seq_file * s,void * unused)232 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
233 {
234 	struct scsi_qla_host *vha = s->private;
235 	uint16_t mb[MAX_IOCB_MB_REG];
236 	int rc;
237 	struct qla_hw_data *ha = vha->hw;
238 	u16 iocbs_used, i, exch_used;
239 
240 	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
241 	if (rc != QLA_SUCCESS) {
242 		seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
243 	} else {
244 		seq_puts(s, "FW Resource count\n\n");
245 		seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
246 		seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
247 		seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
248 		seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
249 		seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
250 		seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
251 		seq_printf(s, "MAX VP count[%d]\n", mb[11]);
252 		seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
253 		seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
254 		    mb[20]);
255 		seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
256 		    mb[21]);
257 		seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
258 		    mb[22]);
259 		seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
260 		    mb[23]);
261 	}
262 
263 	if (ql2xenforce_iocb_limit) {
264 		/* lock is not require. It's an estimate. */
265 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
266 		exch_used = ha->base_qpair->fwres.exch_used;
267 		for (i = 0; i < ha->max_qpairs; i++) {
268 			if (ha->queue_pair_map[i]) {
269 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
270 				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
271 			}
272 		}
273 
274 		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
275 			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
276 
277 		seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
278 			   exch_used, ha->base_qpair->fwres.exch_limit);
279 
280 		if (ql2xenforce_iocb_limit == 2) {
281 			iocbs_used = atomic_read(&ha->fwres.iocb_used);
282 			exch_used  = atomic_read(&ha->fwres.exch_used);
283 			seq_printf(s, "        estimate iocb2 used [%d] high water limit [%d]\n",
284 					iocbs_used, ha->fwres.iocb_limit);
285 
286 			seq_printf(s, "        estimate exchange2 used[%d] high water limit [%d] \n",
287 					exch_used, ha->fwres.exch_limit);
288 		}
289 	}
290 
291 	return 0;
292 }
293 
294 DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
295 
296 static int
qla_dfs_tgt_counters_show(struct seq_file * s,void * unused)297 qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
298 {
299 	struct scsi_qla_host *vha = s->private;
300 	struct qla_qpair *qpair = vha->hw->base_qpair;
301 	uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
302 		core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
303 		num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
304 	u16 i;
305 	fc_port_t *fcport = NULL;
306 
307 	if (qla2x00_chip_is_down(vha))
308 		return 0;
309 
310 	qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
311 	core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
312 	qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
313 	core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
314 	qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
315 	core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
316 	num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
317 	num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
318 	num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
319 
320 	for (i = 0; i < vha->hw->max_qpairs; i++) {
321 		qpair = vha->hw->queue_pair_map[i];
322 		if (!qpair)
323 			continue;
324 		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
325 		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
326 		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
327 		core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
328 		qla_core_ret_sta_ctio +=
329 		    qpair->tgt_counters.qla_core_ret_sta_ctio;
330 		core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
331 		num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
332 		num_alloc_iocb_failed +=
333 		    qpair->tgt_counters.num_alloc_iocb_failed;
334 		num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
335 	}
336 
337 	seq_puts(s, "Target Counters\n");
338 	seq_printf(s, "qla_core_sbt_cmd = %lld\n",
339 		qla_core_sbt_cmd);
340 	seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
341 		qla_core_ret_sta_ctio);
342 	seq_printf(s, "qla_core_ret_ctio = %lld\n",
343 		qla_core_ret_ctio);
344 	seq_printf(s, "core_qla_que_buf = %lld\n",
345 		core_qla_que_buf);
346 	seq_printf(s, "core_qla_snd_status = %lld\n",
347 		core_qla_snd_status);
348 	seq_printf(s, "core_qla_free_cmd = %lld\n",
349 		core_qla_free_cmd);
350 	seq_printf(s, "num alloc iocb failed = %lld\n",
351 		num_alloc_iocb_failed);
352 	seq_printf(s, "num term exchange sent = %lld\n",
353 		num_term_xchg_sent);
354 	seq_printf(s, "num Q full sent = %lld\n",
355 		num_q_full_sent);
356 
357 	/* DIF stats */
358 	seq_printf(s, "DIF Inp Bytes = %lld\n",
359 		vha->qla_stats.qla_dif_stats.dif_input_bytes);
360 	seq_printf(s, "DIF Outp Bytes = %lld\n",
361 		vha->qla_stats.qla_dif_stats.dif_output_bytes);
362 	seq_printf(s, "DIF Inp Req = %lld\n",
363 		vha->qla_stats.qla_dif_stats.dif_input_requests);
364 	seq_printf(s, "DIF Outp Req = %lld\n",
365 		vha->qla_stats.qla_dif_stats.dif_output_requests);
366 	seq_printf(s, "DIF Guard err = %d\n",
367 		vha->qla_stats.qla_dif_stats.dif_guard_err);
368 	seq_printf(s, "DIF Ref tag err = %d\n",
369 		vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
370 	seq_printf(s, "DIF App tag err = %d\n",
371 		vha->qla_stats.qla_dif_stats.dif_app_tag_err);
372 
373 	seq_puts(s, "\n");
374 	seq_puts(s, "Initiator Error Counters\n");
375 	seq_printf(s, "HW Error Count =		%14lld\n",
376 		   vha->hw_err_cnt);
377 	seq_printf(s, "Link Down Count =	%14lld\n",
378 		   vha->short_link_down_cnt);
379 	seq_printf(s, "Interface Err Count =	%14lld\n",
380 		   vha->interface_err_cnt);
381 	seq_printf(s, "Cmd Timeout Count =	%14lld\n",
382 		   vha->cmd_timeout_cnt);
383 	seq_printf(s, "Reset Count =		%14lld\n",
384 		   vha->reset_cmd_err_cnt);
385 	seq_puts(s, "\n");
386 
387 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
388 		if (!fcport->rport)
389 			continue;
390 
391 		seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
392 			   fcport->rport->number, fcport->tgt_short_link_down_cnt);
393 	}
394 	seq_puts(s, "\n");
395 
396 	return 0;
397 }
398 
399 DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
400 
401 static int
qla2x00_dfs_fce_show(struct seq_file * s,void * unused)402 qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
403 {
404 	scsi_qla_host_t *vha = s->private;
405 	uint32_t cnt;
406 	uint32_t *fce;
407 	uint64_t fce_start;
408 	struct qla_hw_data *ha = vha->hw;
409 
410 	mutex_lock(&ha->fce_mutex);
411 
412 	if (ha->flags.user_enabled_fce) {
413 		seq_puts(s, "FCE Trace Buffer\n");
414 		seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
415 		seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma);
416 		seq_puts(s, "FCE Enable Registers\n");
417 		seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
418 			   ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
419 			   ha->fce_mb[5], ha->fce_mb[6]);
420 
421 		fce = (uint32_t *)ha->fce;
422 		fce_start = (unsigned long long)ha->fce_dma;
423 		for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
424 			if (cnt % 8 == 0)
425 				seq_printf(s, "\n%llx: ",
426 					   (unsigned long long)((cnt * 4) + fce_start));
427 			else
428 				seq_putc(s, ' ');
429 			seq_printf(s, "%08x", *fce++);
430 		}
431 
432 		seq_puts(s, "\nEnd\n");
433 	} else {
434 		seq_puts(s, "FCE Trace is currently not enabled\n");
435 		seq_puts(s, "\techo [ 1 | 0 ] > fce\n");
436 	}
437 
438 	mutex_unlock(&ha->fce_mutex);
439 
440 	return 0;
441 }
442 
443 static int
qla2x00_dfs_fce_open(struct inode * inode,struct file * file)444 qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
445 {
446 	scsi_qla_host_t *vha = inode->i_private;
447 	struct qla_hw_data *ha = vha->hw;
448 	int rval;
449 
450 	if (!ha->flags.fce_enabled)
451 		goto out;
452 
453 	mutex_lock(&ha->fce_mutex);
454 
455 	/* Pause tracing to flush FCE buffers. */
456 	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
457 	if (rval)
458 		ql_dbg(ql_dbg_user, vha, 0x705c,
459 		    "DebugFS: Unable to disable FCE (%d).\n", rval);
460 
461 	ha->flags.fce_enabled = 0;
462 
463 	mutex_unlock(&ha->fce_mutex);
464 out:
465 	return single_open(file, qla2x00_dfs_fce_show, vha);
466 }
467 
468 static int
qla2x00_dfs_fce_release(struct inode * inode,struct file * file)469 qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
470 {
471 	scsi_qla_host_t *vha = inode->i_private;
472 	struct qla_hw_data *ha = vha->hw;
473 	int rval;
474 
475 	if (ha->flags.fce_enabled || !ha->fce)
476 		goto out;
477 
478 	mutex_lock(&ha->fce_mutex);
479 
480 	/* Re-enable FCE tracing. */
481 	ha->flags.fce_enabled = 1;
482 	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
483 	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
484 	    ha->fce_mb, &ha->fce_bufs);
485 	if (rval) {
486 		ql_dbg(ql_dbg_user, vha, 0x700d,
487 		    "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
488 		ha->flags.fce_enabled = 0;
489 	}
490 
491 	mutex_unlock(&ha->fce_mutex);
492 out:
493 	return single_release(inode, file);
494 }
495 
496 static ssize_t
qla2x00_dfs_fce_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)497 qla2x00_dfs_fce_write(struct file *file, const char __user *buffer,
498 		      size_t count, loff_t *pos)
499 {
500 	struct seq_file *s = file->private_data;
501 	struct scsi_qla_host *vha = s->private;
502 	struct qla_hw_data *ha = vha->hw;
503 	char *buf;
504 	int rc = 0;
505 	unsigned long enable;
506 
507 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
508 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
509 		ql_dbg(ql_dbg_user, vha, 0xd034,
510 		       "this adapter does not support FCE.");
511 		return -EINVAL;
512 	}
513 
514 	buf = memdup_user_nul(buffer, count);
515 	if (IS_ERR(buf)) {
516 		ql_dbg(ql_dbg_user, vha, 0xd037,
517 		    "fail to copy user buffer.");
518 		return PTR_ERR(buf);
519 	}
520 
521 	enable = kstrtoul(buf, 0, 0);
522 	rc = count;
523 
524 	mutex_lock(&ha->fce_mutex);
525 
526 	if (enable) {
527 		if (ha->flags.user_enabled_fce) {
528 			mutex_unlock(&ha->fce_mutex);
529 			goto out_free;
530 		}
531 		ha->flags.user_enabled_fce = 1;
532 		if (!ha->fce) {
533 			rc = qla2x00_alloc_fce_trace(vha);
534 			if (rc) {
535 				ha->flags.user_enabled_fce = 0;
536 				mutex_unlock(&ha->fce_mutex);
537 				goto out_free;
538 			}
539 
540 			/* adjust fw dump buffer to take into account of this feature */
541 			if (!ha->flags.fce_dump_buf_alloced)
542 				qla2x00_alloc_fw_dump(vha);
543 		}
544 
545 		if (!ha->flags.fce_enabled)
546 			qla_enable_fce_trace(vha);
547 
548 		ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n");
549 	} else {
550 		if (!ha->flags.user_enabled_fce) {
551 			mutex_unlock(&ha->fce_mutex);
552 			goto out_free;
553 		}
554 		ha->flags.user_enabled_fce = 0;
555 		if (ha->flags.fce_enabled) {
556 			qla2x00_disable_fce_trace(vha, NULL, NULL);
557 			ha->flags.fce_enabled = 0;
558 		}
559 
560 		qla2x00_free_fce_trace(ha);
561 		/* no need to re-adjust fw dump buffer */
562 
563 		ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n");
564 	}
565 
566 	mutex_unlock(&ha->fce_mutex);
567 out_free:
568 	kfree(buf);
569 	return rc;
570 }
571 
572 static const struct file_operations dfs_fce_ops = {
573 	.open		= qla2x00_dfs_fce_open,
574 	.read		= seq_read,
575 	.llseek		= seq_lseek,
576 	.release	= qla2x00_dfs_fce_release,
577 	.write		= qla2x00_dfs_fce_write,
578 };
579 
580 static int
qla_dfs_naqp_show(struct seq_file * s,void * unused)581 qla_dfs_naqp_show(struct seq_file *s, void *unused)
582 {
583 	struct scsi_qla_host *vha = s->private;
584 	struct qla_hw_data *ha = vha->hw;
585 
586 	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
587 	return 0;
588 }
589 
590 /*
591  * Helper macros for setting up debugfs entries.
592  * _name: The name of the debugfs entry
593  * _ctx_struct: The context that was passed when creating the debugfs file
594  *
595  * QLA_DFS_SETUP_RD could be used when there is only a show function.
596  * - show function take the name qla_dfs_<sysfs-name>_show
597  *
598  * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
599  * - show function take the name  qla_dfs_<sysfs-name>_show
600  * - write function take the name qla_dfs_<sysfs-name>_write
601  *
602  * To have a new debugfs entry, do:
603  * 1. Create a "struct dentry *" in the appropriate structure in the format
604  * dfs_<sysfs-name>
605  * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
606  * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
607  * or QLA_DFS_ROOT_CREATE_FILE
608  * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
609  * or QLA_DFS_ROOT_REMOVE_FILE
610  *
611  * Example for creating "TEST" sysfs file:
612  * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
613  * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
614  * 3. In qla2x00_dfs_setup():
615  * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
616  * 4. In qla2x00_dfs_remove():
617  * QLA_DFS_REMOVE_FILE(ha, TEST);
618  */
619 #define QLA_DFS_SETUP_RD(_name, _ctx_struct)				\
620 static int								\
621 qla_dfs_##_name##_open(struct inode *inode, struct file *file)		\
622 {									\
623 	_ctx_struct *__ctx = inode->i_private;				\
624 									\
625 	return single_open(file, qla_dfs_##_name##_show, __ctx);	\
626 }									\
627 									\
628 static const struct file_operations qla_dfs_##_name##_ops = {		\
629 	.open           = qla_dfs_##_name##_open,			\
630 	.read           = seq_read,					\
631 	.llseek         = seq_lseek,					\
632 	.release        = single_release,				\
633 };
634 
635 #define QLA_DFS_SETUP_RW(_name, _ctx_struct)				\
636 static int								\
637 qla_dfs_##_name##_open(struct inode *inode, struct file *file)		\
638 {									\
639 	_ctx_struct *__ctx = inode->i_private;				\
640 									\
641 	return single_open(file, qla_dfs_##_name##_show, __ctx);	\
642 }									\
643 									\
644 static const struct file_operations qla_dfs_##_name##_ops = {		\
645 	.open           = qla_dfs_##_name##_open,			\
646 	.read           = seq_read,					\
647 	.llseek         = seq_lseek,					\
648 	.release        = single_release,				\
649 	.write		= qla_dfs_##_name##_write,			\
650 };
651 
652 #define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx)			\
653 	do {								\
654 		if (!qla_dfs_##_name)					\
655 			qla_dfs_##_name = debugfs_create_file(#_name,	\
656 					_perm, qla2x00_dfs_root, _ctx,	\
657 					&qla_dfs_##_name##_ops);	\
658 	} while (0)
659 
660 #define QLA_DFS_ROOT_REMOVE_FILE(_name)					\
661 	do {								\
662 		if (qla_dfs_##_name) {					\
663 			debugfs_remove(qla_dfs_##_name);		\
664 			qla_dfs_##_name = NULL;				\
665 		}							\
666 	} while (0)
667 
668 #define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx)	\
669 	do {								\
670 		(_struct)->dfs_##_name = debugfs_create_file(#_name,	\
671 					_perm, _parent, _ctx,		\
672 					&qla_dfs_##_name##_ops)		\
673 	} while (0)
674 
675 #define QLA_DFS_REMOVE_FILE(_struct, _name)				\
676 	do {								\
677 		if ((_struct)->dfs_##_name) {				\
678 			debugfs_remove((_struct)->dfs_##_name);		\
679 			(_struct)->dfs_##_name = NULL;			\
680 		}							\
681 	} while (0)
682 
683 static int
qla_dfs_naqp_open(struct inode * inode,struct file * file)684 qla_dfs_naqp_open(struct inode *inode, struct file *file)
685 {
686 	struct scsi_qla_host *vha = inode->i_private;
687 
688 	return single_open(file, qla_dfs_naqp_show, vha);
689 }
690 
691 static ssize_t
qla_dfs_naqp_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)692 qla_dfs_naqp_write(struct file *file, const char __user *buffer,
693     size_t count, loff_t *pos)
694 {
695 	struct seq_file *s = file->private_data;
696 	struct scsi_qla_host *vha = s->private;
697 	struct qla_hw_data *ha = vha->hw;
698 	char *buf;
699 	int rc = 0;
700 	unsigned long num_act_qp;
701 
702 	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
703 		pr_err("host%ld: this adapter does not support Multi Q.",
704 		    vha->host_no);
705 		return -EINVAL;
706 	}
707 
708 	if (!vha->flags.qpairs_available) {
709 		pr_err("host%ld: Driver is not setup with Multi Q.",
710 		    vha->host_no);
711 		return -EINVAL;
712 	}
713 	buf = memdup_user_nul(buffer, count);
714 	if (IS_ERR(buf)) {
715 		pr_err("host%ld: fail to copy user buffer.",
716 		    vha->host_no);
717 		return PTR_ERR(buf);
718 	}
719 
720 	num_act_qp = simple_strtoul(buf, NULL, 0);
721 
722 	if (num_act_qp >= vha->hw->max_qpairs) {
723 		pr_err("User set invalid number of qpairs %lu. Max = %d",
724 		    num_act_qp, vha->hw->max_qpairs);
725 		rc = -EINVAL;
726 		goto out_free;
727 	}
728 
729 	if (num_act_qp != ha->tgt.num_act_qpairs) {
730 		ha->tgt.num_act_qpairs = num_act_qp;
731 		qlt_clr_qp_table(vha);
732 	}
733 	rc = count;
734 out_free:
735 	kfree(buf);
736 	return rc;
737 }
738 
739 static const struct file_operations dfs_naqp_ops = {
740 	.open		= qla_dfs_naqp_open,
741 	.read		= seq_read,
742 	.llseek		= seq_lseek,
743 	.release	= single_release,
744 	.write		= qla_dfs_naqp_write,
745 };
746 
747 
748 int
qla2x00_dfs_setup(scsi_qla_host_t * vha)749 qla2x00_dfs_setup(scsi_qla_host_t *vha)
750 {
751 	struct qla_hw_data *ha = vha->hw;
752 
753 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
754 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
755 		goto out;
756 
757 	if (qla2x00_dfs_root)
758 		goto create_dir;
759 
760 	atomic_set(&qla2x00_dfs_root_count, 0);
761 	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
762 
763 create_dir:
764 	if (ha->dfs_dir)
765 		goto create_nodes;
766 
767 	mutex_init(&ha->fce_mutex);
768 	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
769 
770 	atomic_inc(&qla2x00_dfs_root_count);
771 
772 create_nodes:
773 	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
774 	    S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
775 
776 	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
777 	    ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
778 
779 	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
780 	    S_IRUSR,  ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
781 
782 	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
783 	    &dfs_fce_ops);
784 
785 	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
786 		S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
787 
788 	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
789 		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
790 		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
791 		if (IS_ERR(ha->tgt.dfs_naqp)) {
792 			ql_log(ql_log_warn, vha, 0xd011,
793 			       "Unable to create debugFS naqp node.\n");
794 			goto out;
795 		}
796 	}
797 	vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
798 	if (IS_ERR(vha->dfs_rport_root)) {
799 		ql_log(ql_log_warn, vha, 0xd012,
800 		       "Unable to create debugFS rports node.\n");
801 		goto out;
802 	}
803 out:
804 	return 0;
805 }
806 
807 int
qla2x00_dfs_remove(scsi_qla_host_t * vha)808 qla2x00_dfs_remove(scsi_qla_host_t *vha)
809 {
810 	struct qla_hw_data *ha = vha->hw;
811 
812 	if (ha->tgt.dfs_naqp) {
813 		debugfs_remove(ha->tgt.dfs_naqp);
814 		ha->tgt.dfs_naqp = NULL;
815 	}
816 
817 	if (ha->tgt.dfs_tgt_sess) {
818 		debugfs_remove(ha->tgt.dfs_tgt_sess);
819 		ha->tgt.dfs_tgt_sess = NULL;
820 	}
821 
822 	if (ha->tgt.dfs_tgt_port_database) {
823 		debugfs_remove(ha->tgt.dfs_tgt_port_database);
824 		ha->tgt.dfs_tgt_port_database = NULL;
825 	}
826 
827 	if (ha->dfs_fw_resource_cnt) {
828 		debugfs_remove(ha->dfs_fw_resource_cnt);
829 		ha->dfs_fw_resource_cnt = NULL;
830 	}
831 
832 	if (ha->dfs_tgt_counters) {
833 		debugfs_remove(ha->dfs_tgt_counters);
834 		ha->dfs_tgt_counters = NULL;
835 	}
836 
837 	if (ha->dfs_fce) {
838 		debugfs_remove(ha->dfs_fce);
839 		ha->dfs_fce = NULL;
840 	}
841 
842 	if (vha->dfs_rport_root) {
843 		debugfs_remove_recursive(vha->dfs_rport_root);
844 		vha->dfs_rport_root = NULL;
845 	}
846 
847 	if (ha->dfs_dir) {
848 		debugfs_remove(ha->dfs_dir);
849 		ha->dfs_dir = NULL;
850 		atomic_dec(&qla2x00_dfs_root_count);
851 	}
852 
853 	if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
854 	    qla2x00_dfs_root) {
855 		debugfs_remove(qla2x00_dfs_root);
856 		qla2x00_dfs_root = NULL;
857 	}
858 
859 	return 0;
860 }
861