1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/debugfs.h>
34 #include <linux/mlx5/qp.h>
35 #include <linux/mlx5/cq.h>
36 #include <linux/mlx5/driver.h>
37 #include "mlx5_core.h"
38 #include "lib/eq.h"
39 
40 enum {
41 	QP_PID,
42 	QP_STATE,
43 	QP_XPORT,
44 	QP_MTU,
45 	QP_N_RECV,
46 	QP_RECV_SZ,
47 	QP_N_SEND,
48 	QP_LOG_PG_SZ,
49 	QP_RQPN,
50 };
51 
52 static char *qp_fields[] = {
53 	[QP_PID]	= "pid",
54 	[QP_STATE]	= "state",
55 	[QP_XPORT]	= "transport",
56 	[QP_MTU]	= "mtu",
57 	[QP_N_RECV]	= "num_recv",
58 	[QP_RECV_SZ]	= "rcv_wqe_sz",
59 	[QP_N_SEND]	= "num_send",
60 	[QP_LOG_PG_SZ]	= "log2_page_sz",
61 	[QP_RQPN]	= "remote_qpn",
62 };
63 
64 enum {
65 	EQ_NUM_EQES,
66 	EQ_INTR,
67 	EQ_LOG_PG_SZ,
68 };
69 
70 static char *eq_fields[] = {
71 	[EQ_NUM_EQES]	= "num_eqes",
72 	[EQ_INTR]	= "intr",
73 	[EQ_LOG_PG_SZ]	= "log_page_size",
74 };
75 
76 enum {
77 	CQ_PID,
78 	CQ_NUM_CQES,
79 	CQ_LOG_PG_SZ,
80 };
81 
82 static char *cq_fields[] = {
83 	[CQ_PID]	= "pid",
84 	[CQ_NUM_CQES]	= "num_cqes",
85 	[CQ_LOG_PG_SZ]	= "log_page_size",
86 };
87 
88 struct dentry *mlx5_debugfs_root;
89 EXPORT_SYMBOL(mlx5_debugfs_root);
90 
91 void mlx5_register_debugfs(void)
92 {
93 	mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 }
95 
96 void mlx5_unregister_debugfs(void)
97 {
98 	debugfs_remove(mlx5_debugfs_root);
99 }
100 
101 struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
102 {
103 	return dev->priv.dbg.dbg_root;
104 }
105 EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
106 
107 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
108 {
109 	dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
110 }
111 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
112 
113 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
114 {
115 	debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
116 }
117 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
118 
119 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
120 {
121 	dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
122 }
123 
124 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
125 {
126 	debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
127 }
128 
129 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
130 			    loff_t *pos)
131 {
132 	struct mlx5_cmd_stats *stats;
133 	u64 field = 0;
134 	int ret;
135 	char tbuf[22];
136 
137 	stats = filp->private_data;
138 	spin_lock_irq(&stats->lock);
139 	if (stats->n)
140 		field = div64_u64(stats->sum, stats->n);
141 	spin_unlock_irq(&stats->lock);
142 	ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
143 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
144 }
145 
146 static ssize_t average_write(struct file *filp, const char __user *buf,
147 			     size_t count, loff_t *pos)
148 {
149 	struct mlx5_cmd_stats *stats;
150 
151 	stats = filp->private_data;
152 	spin_lock_irq(&stats->lock);
153 	stats->sum = 0;
154 	stats->n = 0;
155 	spin_unlock_irq(&stats->lock);
156 
157 	*pos += count;
158 
159 	return count;
160 }
161 
162 static const struct file_operations stats_fops = {
163 	.owner	= THIS_MODULE,
164 	.open	= simple_open,
165 	.read	= average_read,
166 	.write	= average_write,
167 };
168 
169 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
170 {
171 	struct mlx5_cmd_stats *stats;
172 	struct dentry **cmd;
173 	const char *namep;
174 	int i;
175 
176 	cmd = &dev->priv.dbg.cmdif_debugfs;
177 	*cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
178 
179 	for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
180 		stats = &dev->cmd.stats[i];
181 		namep = mlx5_command_str(i);
182 		if (strcmp(namep, "unknown command opcode")) {
183 			stats->root = debugfs_create_dir(namep, *cmd);
184 
185 			debugfs_create_file("average", 0400, stats->root, stats,
186 					    &stats_fops);
187 			debugfs_create_u64("n", 0400, stats->root, &stats->n);
188 			debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
189 			debugfs_create_u64("failed_mbox_status", 0400, stats->root,
190 					   &stats->failed_mbox_status);
191 			debugfs_create_u32("last_failed_errno", 0400, stats->root,
192 					   &stats->last_failed_errno);
193 			debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
194 					  &stats->last_failed_mbox_status);
195 		}
196 	}
197 }
198 
199 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
200 {
201 	debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
202 }
203 
204 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
205 {
206 	dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
207 }
208 
209 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
210 {
211 	debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
212 }
213 
214 void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
215 {
216 	struct dentry *pages;
217 
218 	dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
219 	pages = dev->priv.dbg.pages_debugfs;
220 
221 	debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
222 	debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
223 	debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
224 	debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
225 	debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
226 	debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
227 			   &dev->priv.reclaim_pages_discard);
228 }
229 
230 void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
231 {
232 	debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
233 }
234 
235 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
236 			 int index, int *is_str)
237 {
238 	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
239 	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
240 	u64 param = 0;
241 	u32 *out;
242 	int state;
243 	u32 *qpc;
244 	int err;
245 
246 	out = kzalloc(outlen, GFP_KERNEL);
247 	if (!out)
248 		return 0;
249 
250 	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
251 	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
252 	err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
253 	if (err)
254 		goto out;
255 
256 	*is_str = 0;
257 
258 	qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
259 	switch (index) {
260 	case QP_PID:
261 		param = qp->pid;
262 		break;
263 	case QP_STATE:
264 		state = MLX5_GET(qpc, qpc, state);
265 		param = (unsigned long)mlx5_qp_state_str(state);
266 		*is_str = 1;
267 		break;
268 	case QP_XPORT:
269 		param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
270 		*is_str = 1;
271 		break;
272 	case QP_MTU:
273 		switch (MLX5_GET(qpc, qpc, mtu)) {
274 		case IB_MTU_256:
275 			param = 256;
276 			break;
277 		case IB_MTU_512:
278 			param = 512;
279 			break;
280 		case IB_MTU_1024:
281 			param = 1024;
282 			break;
283 		case IB_MTU_2048:
284 			param = 2048;
285 			break;
286 		case IB_MTU_4096:
287 			param = 4096;
288 			break;
289 		default:
290 			param = 0;
291 		}
292 		break;
293 	case QP_N_RECV:
294 		param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
295 		break;
296 	case QP_RECV_SZ:
297 		param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
298 		break;
299 	case QP_N_SEND:
300 		if (!MLX5_GET(qpc, qpc, no_sq))
301 			param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
302 		break;
303 	case QP_LOG_PG_SZ:
304 		param = MLX5_GET(qpc, qpc, log_page_size) + 12;
305 		break;
306 	case QP_RQPN:
307 		param = MLX5_GET(qpc, qpc, remote_qpn);
308 		break;
309 	}
310 out:
311 	kfree(out);
312 	return param;
313 }
314 
315 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
316 			 int index)
317 {
318 	int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
319 	u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
320 	u64 param = 0;
321 	void *ctx;
322 	u32 *out;
323 	int err;
324 
325 	out = kzalloc(outlen, GFP_KERNEL);
326 	if (!out)
327 		return param;
328 
329 	MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
330 	MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
331 	err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
332 	if (err) {
333 		mlx5_core_warn(dev, "failed to query eq\n");
334 		goto out;
335 	}
336 	ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
337 
338 	switch (index) {
339 	case EQ_NUM_EQES:
340 		param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
341 		break;
342 	case EQ_INTR:
343 		param = MLX5_GET(eqc, ctx, intr);
344 		break;
345 	case EQ_LOG_PG_SZ:
346 		param = MLX5_GET(eqc, ctx, log_page_size) + 12;
347 		break;
348 	}
349 
350 out:
351 	kfree(out);
352 	return param;
353 }
354 
355 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
356 			 int index)
357 {
358 	int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
359 	u64 param = 0;
360 	void *ctx;
361 	u32 *out;
362 	int err;
363 
364 	out = kvzalloc(outlen, GFP_KERNEL);
365 	if (!out)
366 		return param;
367 
368 	err = mlx5_core_query_cq(dev, cq, out);
369 	if (err) {
370 		mlx5_core_warn(dev, "failed to query cq\n");
371 		goto out;
372 	}
373 	ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
374 
375 	switch (index) {
376 	case CQ_PID:
377 		param = cq->pid;
378 		break;
379 	case CQ_NUM_CQES:
380 		param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
381 		break;
382 	case CQ_LOG_PG_SZ:
383 		param = MLX5_GET(cqc, ctx, log_page_size);
384 		break;
385 	}
386 
387 out:
388 	kvfree(out);
389 	return param;
390 }
391 
392 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
393 			loff_t *pos)
394 {
395 	struct mlx5_field_desc *desc;
396 	struct mlx5_rsc_debug *d;
397 	char tbuf[18];
398 	int is_str = 0;
399 	u64 field;
400 	int ret;
401 
402 	desc = filp->private_data;
403 	d = (void *)(desc - desc->i) - sizeof(*d);
404 	switch (d->type) {
405 	case MLX5_DBG_RSC_QP:
406 		field = qp_read_field(d->dev, d->object, desc->i, &is_str);
407 		break;
408 
409 	case MLX5_DBG_RSC_EQ:
410 		field = eq_read_field(d->dev, d->object, desc->i);
411 		break;
412 
413 	case MLX5_DBG_RSC_CQ:
414 		field = cq_read_field(d->dev, d->object, desc->i);
415 		break;
416 
417 	default:
418 		mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
419 		return -EINVAL;
420 	}
421 
422 	if (is_str)
423 		ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
424 	else
425 		ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
426 
427 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
428 }
429 
430 static const struct file_operations fops = {
431 	.owner	= THIS_MODULE,
432 	.open	= simple_open,
433 	.read	= dbg_read,
434 };
435 
436 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
437 			struct dentry *root, struct mlx5_rsc_debug **dbg,
438 			int rsn, char **field, int nfile, void *data)
439 {
440 	struct mlx5_rsc_debug *d;
441 	char resn[32];
442 	int i;
443 
444 	d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
445 	if (!d)
446 		return -ENOMEM;
447 
448 	d->dev = dev;
449 	d->object = data;
450 	d->type = type;
451 	sprintf(resn, "0x%x", rsn);
452 	d->root = debugfs_create_dir(resn,  root);
453 
454 	for (i = 0; i < nfile; i++) {
455 		d->fields[i].i = i;
456 		debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
457 				    &fops);
458 	}
459 	*dbg = d;
460 
461 	return 0;
462 }
463 
464 static void rem_res_tree(struct mlx5_rsc_debug *d)
465 {
466 	debugfs_remove_recursive(d->root);
467 	kfree(d);
468 }
469 
470 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
471 {
472 	int err;
473 
474 	if (!mlx5_debugfs_root)
475 		return 0;
476 
477 	err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
478 			   &qp->dbg, qp->qpn, qp_fields,
479 			   ARRAY_SIZE(qp_fields), qp);
480 	if (err)
481 		qp->dbg = NULL;
482 
483 	return err;
484 }
485 EXPORT_SYMBOL(mlx5_debug_qp_add);
486 
487 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
488 {
489 	if (!mlx5_debugfs_root)
490 		return;
491 
492 	if (qp->dbg)
493 		rem_res_tree(qp->dbg);
494 }
495 EXPORT_SYMBOL(mlx5_debug_qp_remove);
496 
497 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
498 {
499 	int err;
500 
501 	if (!mlx5_debugfs_root)
502 		return 0;
503 
504 	err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
505 			   &eq->dbg, eq->eqn, eq_fields,
506 			   ARRAY_SIZE(eq_fields), eq);
507 	if (err)
508 		eq->dbg = NULL;
509 
510 	return err;
511 }
512 
513 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
514 {
515 	if (!mlx5_debugfs_root)
516 		return;
517 
518 	if (eq->dbg)
519 		rem_res_tree(eq->dbg);
520 }
521 
522 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
523 {
524 	int err;
525 
526 	if (!mlx5_debugfs_root)
527 		return 0;
528 
529 	err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
530 			   &cq->dbg, cq->cqn, cq_fields,
531 			   ARRAY_SIZE(cq_fields), cq);
532 	if (err)
533 		cq->dbg = NULL;
534 
535 	return err;
536 }
537 
538 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
539 {
540 	if (!mlx5_debugfs_root)
541 		return;
542 
543 	if (cq->dbg) {
544 		rem_res_tree(cq->dbg);
545 		cq->dbg = NULL;
546 	}
547 }
548