1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <linux/ctype.h>
35 
36 #include "qib.h"
37 #include "qib_mad.h"
38 
39 /* start of per-port functions */
40 /*
41  * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
42  */
43 static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
44 {
45 	struct qib_devdata *dd = ppd->dd;
46 
47 	return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
48 }
49 
50 static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
51 			       size_t count)
52 {
53 	struct qib_devdata *dd = ppd->dd;
54 	int ret;
55 	u16 val;
56 
57 	ret = kstrtou16(buf, 0, &val);
58 	if (ret) {
59 		qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
60 		return ret;
61 	}
62 
63 	/*
64 	 * Set the "intentional" heartbeat enable per either of
65 	 * "Enable" and "Auto", as these are normally set together.
66 	 * This bit is consulted when leaving loopback mode,
67 	 * because entering loopback mode overrides it and automatically
68 	 * disables heartbeat.
69 	 */
70 	ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
71 	return ret < 0 ? ret : count;
72 }
73 
74 static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
75 			      size_t count)
76 {
77 	struct qib_devdata *dd = ppd->dd;
78 	int ret = count, r;
79 
80 	r = dd->f_set_ib_loopback(ppd, buf);
81 	if (r < 0)
82 		ret = r;
83 
84 	return ret;
85 }
86 
87 static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
88 				  size_t count)
89 {
90 	struct qib_devdata *dd = ppd->dd;
91 	int ret;
92 	u16 val;
93 
94 	ret = kstrtou16(buf, 0, &val);
95 	if (ret) {
96 		qib_dev_err(dd, "attempt to set invalid LED override\n");
97 		return ret;
98 	}
99 
100 	qib_set_led_override(ppd, val);
101 	return count;
102 }
103 
104 static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
105 {
106 	if (!ppd->statusp)
107 		return -EINVAL;
108 
109 	return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
110 }
111 
112 /*
113  * For userland compatibility, these offsets must remain fixed.
114  * They are strings for QIB_STATUS_*
115  */
116 static const char * const qib_status_str[] = {
117 	"Initted",
118 	"",
119 	"",
120 	"",
121 	"",
122 	"Present",
123 	"IB_link_up",
124 	"IB_configured",
125 	"",
126 	"Fatal_Hardware_Error",
127 	NULL,
128 };
129 
130 static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
131 {
132 	int i, any;
133 	u64 s;
134 	ssize_t ret;
135 
136 	if (!ppd->statusp) {
137 		ret = -EINVAL;
138 		goto bail;
139 	}
140 
141 	s = *(ppd->statusp);
142 	*buf = '\0';
143 	for (any = i = 0; s && qib_status_str[i]; i++) {
144 		if (s & 1) {
145 			/* if overflow */
146 			if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
147 				break;
148 			if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
149 					PAGE_SIZE)
150 				break;
151 			any = 1;
152 		}
153 		s >>= 1;
154 	}
155 	if (any)
156 		strlcat(buf, "\n", PAGE_SIZE);
157 
158 	ret = strlen(buf);
159 
160 bail:
161 	return ret;
162 }
163 
164 /* end of per-port functions */
165 
166 /*
167  * Start of per-port file structures and support code
168  * Because we are fitting into other infrastructure, we have to supply the
169  * full set of kobject/sysfs_ops structures and routines.
170  */
171 #define QIB_PORT_ATTR(name, mode, show, store) \
172 	static struct qib_port_attr qib_port_attr_##name = \
173 		__ATTR(name, mode, show, store)
174 
175 struct qib_port_attr {
176 	struct attribute attr;
177 	ssize_t (*show)(struct qib_pportdata *, char *);
178 	ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
179 };
180 
181 QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
182 QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
183 QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
184 	      store_hrtbt_enb);
185 QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
186 QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
187 
188 static struct attribute *port_default_attributes[] = {
189 	&qib_port_attr_loopback.attr,
190 	&qib_port_attr_led_override.attr,
191 	&qib_port_attr_hrtbt_enable.attr,
192 	&qib_port_attr_status.attr,
193 	&qib_port_attr_status_str.attr,
194 	NULL
195 };
196 
197 /*
198  * Start of per-port congestion control structures and support code
199  */
200 
201 /*
202  * Congestion control table size followed by table entries
203  */
204 static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
205 		struct bin_attribute *bin_attr,
206 		char *buf, loff_t pos, size_t count)
207 {
208 	int ret;
209 	struct qib_pportdata *ppd =
210 		container_of(kobj, struct qib_pportdata, pport_cc_kobj);
211 
212 	if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
213 		return -EINVAL;
214 
215 	ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
216 		 + sizeof(__be16);
217 
218 	if (pos > ret)
219 		return -EINVAL;
220 
221 	if (count > ret - pos)
222 		count = ret - pos;
223 
224 	if (!count)
225 		return count;
226 
227 	spin_lock(&ppd->cc_shadow_lock);
228 	memcpy(buf, ppd->ccti_entries_shadow, count);
229 	spin_unlock(&ppd->cc_shadow_lock);
230 
231 	return count;
232 }
233 
234 static void qib_port_release(struct kobject *kobj)
235 {
236 	/* nothing to do since memory is freed by qib_free_devdata() */
237 }
238 
239 static struct kobj_type qib_port_cc_ktype = {
240 	.release = qib_port_release,
241 };
242 
243 static const struct bin_attribute cc_table_bin_attr = {
244 	.attr = {.name = "cc_table_bin", .mode = 0444},
245 	.read = read_cc_table_bin,
246 	.size = PAGE_SIZE,
247 };
248 
249 /*
250  * Congestion settings: port control, control map and an array of 16
251  * entries for the congestion entries - increase, timer, event log
252  * trigger threshold and the minimum injection rate delay.
253  */
254 static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
255 		struct bin_attribute *bin_attr,
256 		char *buf, loff_t pos, size_t count)
257 {
258 	int ret;
259 	struct qib_pportdata *ppd =
260 		container_of(kobj, struct qib_pportdata, pport_cc_kobj);
261 
262 	if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
263 		return -EINVAL;
264 
265 	ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
266 
267 	if (pos > ret)
268 		return -EINVAL;
269 	if (count > ret - pos)
270 		count = ret - pos;
271 
272 	if (!count)
273 		return count;
274 
275 	spin_lock(&ppd->cc_shadow_lock);
276 	memcpy(buf, ppd->congestion_entries_shadow, count);
277 	spin_unlock(&ppd->cc_shadow_lock);
278 
279 	return count;
280 }
281 
282 static const struct bin_attribute cc_setting_bin_attr = {
283 	.attr = {.name = "cc_settings_bin", .mode = 0444},
284 	.read = read_cc_setting_bin,
285 	.size = PAGE_SIZE,
286 };
287 
288 
289 static ssize_t qib_portattr_show(struct kobject *kobj,
290 	struct attribute *attr, char *buf)
291 {
292 	struct qib_port_attr *pattr =
293 		container_of(attr, struct qib_port_attr, attr);
294 	struct qib_pportdata *ppd =
295 		container_of(kobj, struct qib_pportdata, pport_kobj);
296 
297 	if (!pattr->show)
298 		return -EIO;
299 
300 	return pattr->show(ppd, buf);
301 }
302 
303 static ssize_t qib_portattr_store(struct kobject *kobj,
304 	struct attribute *attr, const char *buf, size_t len)
305 {
306 	struct qib_port_attr *pattr =
307 		container_of(attr, struct qib_port_attr, attr);
308 	struct qib_pportdata *ppd =
309 		container_of(kobj, struct qib_pportdata, pport_kobj);
310 
311 	if (!pattr->store)
312 		return -EIO;
313 
314 	return pattr->store(ppd, buf, len);
315 }
316 
317 
318 static const struct sysfs_ops qib_port_ops = {
319 	.show = qib_portattr_show,
320 	.store = qib_portattr_store,
321 };
322 
323 static struct kobj_type qib_port_ktype = {
324 	.release = qib_port_release,
325 	.sysfs_ops = &qib_port_ops,
326 	.default_attrs = port_default_attributes
327 };
328 
329 /* Start sl2vl */
330 
331 #define QIB_SL2VL_ATTR(N) \
332 	static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
333 		.attr = { .name = __stringify(N), .mode = 0444 }, \
334 		.sl = N \
335 	}
336 
337 struct qib_sl2vl_attr {
338 	struct attribute attr;
339 	int sl;
340 };
341 
342 QIB_SL2VL_ATTR(0);
343 QIB_SL2VL_ATTR(1);
344 QIB_SL2VL_ATTR(2);
345 QIB_SL2VL_ATTR(3);
346 QIB_SL2VL_ATTR(4);
347 QIB_SL2VL_ATTR(5);
348 QIB_SL2VL_ATTR(6);
349 QIB_SL2VL_ATTR(7);
350 QIB_SL2VL_ATTR(8);
351 QIB_SL2VL_ATTR(9);
352 QIB_SL2VL_ATTR(10);
353 QIB_SL2VL_ATTR(11);
354 QIB_SL2VL_ATTR(12);
355 QIB_SL2VL_ATTR(13);
356 QIB_SL2VL_ATTR(14);
357 QIB_SL2VL_ATTR(15);
358 
359 static struct attribute *sl2vl_default_attributes[] = {
360 	&qib_sl2vl_attr_0.attr,
361 	&qib_sl2vl_attr_1.attr,
362 	&qib_sl2vl_attr_2.attr,
363 	&qib_sl2vl_attr_3.attr,
364 	&qib_sl2vl_attr_4.attr,
365 	&qib_sl2vl_attr_5.attr,
366 	&qib_sl2vl_attr_6.attr,
367 	&qib_sl2vl_attr_7.attr,
368 	&qib_sl2vl_attr_8.attr,
369 	&qib_sl2vl_attr_9.attr,
370 	&qib_sl2vl_attr_10.attr,
371 	&qib_sl2vl_attr_11.attr,
372 	&qib_sl2vl_attr_12.attr,
373 	&qib_sl2vl_attr_13.attr,
374 	&qib_sl2vl_attr_14.attr,
375 	&qib_sl2vl_attr_15.attr,
376 	NULL
377 };
378 
379 static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
380 			       char *buf)
381 {
382 	struct qib_sl2vl_attr *sattr =
383 		container_of(attr, struct qib_sl2vl_attr, attr);
384 	struct qib_pportdata *ppd =
385 		container_of(kobj, struct qib_pportdata, sl2vl_kobj);
386 	struct qib_ibport *qibp = &ppd->ibport_data;
387 
388 	return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
389 }
390 
391 static const struct sysfs_ops qib_sl2vl_ops = {
392 	.show = sl2vl_attr_show,
393 };
394 
395 static struct kobj_type qib_sl2vl_ktype = {
396 	.release = qib_port_release,
397 	.sysfs_ops = &qib_sl2vl_ops,
398 	.default_attrs = sl2vl_default_attributes
399 };
400 
401 /* End sl2vl */
402 
403 /* Start diag_counters */
404 
405 #define QIB_DIAGC_ATTR(N) \
406 	static struct qib_diagc_attr qib_diagc_attr_##N = { \
407 		.attr = { .name = __stringify(N), .mode = 0664 }, \
408 		.counter = offsetof(struct qib_ibport, rvp.n_##N) \
409 	}
410 
411 #define QIB_DIAGC_ATTR_PER_CPU(N) \
412 	static struct qib_diagc_attr qib_diagc_attr_##N = { \
413 		.attr = { .name = __stringify(N), .mode = 0664 }, \
414 		.counter = offsetof(struct qib_ibport, rvp.z_##N) \
415 	}
416 
417 struct qib_diagc_attr {
418 	struct attribute attr;
419 	size_t counter;
420 };
421 
422 QIB_DIAGC_ATTR_PER_CPU(rc_acks);
423 QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
424 QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
425 
426 QIB_DIAGC_ATTR(rc_resends);
427 QIB_DIAGC_ATTR(seq_naks);
428 QIB_DIAGC_ATTR(rdma_seq);
429 QIB_DIAGC_ATTR(rnr_naks);
430 QIB_DIAGC_ATTR(other_naks);
431 QIB_DIAGC_ATTR(rc_timeouts);
432 QIB_DIAGC_ATTR(loop_pkts);
433 QIB_DIAGC_ATTR(pkt_drops);
434 QIB_DIAGC_ATTR(dmawait);
435 QIB_DIAGC_ATTR(unaligned);
436 QIB_DIAGC_ATTR(rc_dupreq);
437 QIB_DIAGC_ATTR(rc_seqnak);
438 QIB_DIAGC_ATTR(rc_crwaits);
439 
440 static struct attribute *diagc_default_attributes[] = {
441 	&qib_diagc_attr_rc_resends.attr,
442 	&qib_diagc_attr_rc_acks.attr,
443 	&qib_diagc_attr_rc_qacks.attr,
444 	&qib_diagc_attr_rc_delayed_comp.attr,
445 	&qib_diagc_attr_seq_naks.attr,
446 	&qib_diagc_attr_rdma_seq.attr,
447 	&qib_diagc_attr_rnr_naks.attr,
448 	&qib_diagc_attr_other_naks.attr,
449 	&qib_diagc_attr_rc_timeouts.attr,
450 	&qib_diagc_attr_loop_pkts.attr,
451 	&qib_diagc_attr_pkt_drops.attr,
452 	&qib_diagc_attr_dmawait.attr,
453 	&qib_diagc_attr_unaligned.attr,
454 	&qib_diagc_attr_rc_dupreq.attr,
455 	&qib_diagc_attr_rc_seqnak.attr,
456 	&qib_diagc_attr_rc_crwaits.attr,
457 	NULL
458 };
459 
460 static u64 get_all_cpu_total(u64 __percpu *cntr)
461 {
462 	int cpu;
463 	u64 counter = 0;
464 
465 	for_each_possible_cpu(cpu)
466 		counter += *per_cpu_ptr(cntr, cpu);
467 	return counter;
468 }
469 
470 #define def_write_per_cpu(cntr) \
471 static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data)	\
472 {									\
473 	struct qib_devdata *dd = ppd->dd;				\
474 	struct qib_ibport *qibp = &ppd->ibport_data;			\
475 	/*  A write can only zero the counter */			\
476 	if (data == 0)							\
477 		qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
478 	else								\
479 		qib_dev_err(dd, "Per CPU cntrs can only be zeroed");	\
480 }
481 
482 def_write_per_cpu(rc_acks)
483 def_write_per_cpu(rc_qacks)
484 def_write_per_cpu(rc_delayed_comp)
485 
486 #define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
487 							qibp->rvp.z_##cntr)
488 
489 static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
490 			       char *buf)
491 {
492 	struct qib_diagc_attr *dattr =
493 		container_of(attr, struct qib_diagc_attr, attr);
494 	struct qib_pportdata *ppd =
495 		container_of(kobj, struct qib_pportdata, diagc_kobj);
496 	struct qib_ibport *qibp = &ppd->ibport_data;
497 	u64 val;
498 
499 	if (!strncmp(dattr->attr.name, "rc_acks", 7))
500 		val = READ_PER_CPU_CNTR(rc_acks);
501 	else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
502 		val = READ_PER_CPU_CNTR(rc_qacks);
503 	else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
504 		val = READ_PER_CPU_CNTR(rc_delayed_comp);
505 	else
506 		val = *(u32 *)((char *)qibp + dattr->counter);
507 
508 	return sysfs_emit(buf, "%llu\n", val);
509 }
510 
511 static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
512 				const char *buf, size_t size)
513 {
514 	struct qib_diagc_attr *dattr =
515 		container_of(attr, struct qib_diagc_attr, attr);
516 	struct qib_pportdata *ppd =
517 		container_of(kobj, struct qib_pportdata, diagc_kobj);
518 	struct qib_ibport *qibp = &ppd->ibport_data;
519 	u32 val;
520 	int ret;
521 
522 	ret = kstrtou32(buf, 0, &val);
523 	if (ret)
524 		return ret;
525 
526 	if (!strncmp(dattr->attr.name, "rc_acks", 7))
527 		write_per_cpu_rc_acks(ppd, val);
528 	else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
529 		write_per_cpu_rc_qacks(ppd, val);
530 	else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
531 		write_per_cpu_rc_delayed_comp(ppd, val);
532 	else
533 		*(u32 *)((char *)qibp + dattr->counter) = val;
534 	return size;
535 }
536 
537 static const struct sysfs_ops qib_diagc_ops = {
538 	.show = diagc_attr_show,
539 	.store = diagc_attr_store,
540 };
541 
542 static struct kobj_type qib_diagc_ktype = {
543 	.release = qib_port_release,
544 	.sysfs_ops = &qib_diagc_ops,
545 	.default_attrs = diagc_default_attributes
546 };
547 
548 /* End diag_counters */
549 
550 /* end of per-port file structures and support code */
551 
552 /*
553  * Start of per-unit (or driver, in some cases, but replicated
554  * per unit) functions (these get a device *)
555  */
556 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
557 			   char *buf)
558 {
559 	struct qib_ibdev *dev =
560 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
561 
562 	return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
563 }
564 static DEVICE_ATTR_RO(hw_rev);
565 
566 static ssize_t hca_type_show(struct device *device,
567 			     struct device_attribute *attr, char *buf)
568 {
569 	struct qib_ibdev *dev =
570 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
571 	struct qib_devdata *dd = dd_from_dev(dev);
572 
573 	if (!dd->boardname)
574 		return -EINVAL;
575 	return sysfs_emit(buf, "%s\n", dd->boardname);
576 }
577 static DEVICE_ATTR_RO(hca_type);
578 static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
579 
580 static ssize_t version_show(struct device *device,
581 			    struct device_attribute *attr, char *buf)
582 {
583 	/* The string printed here is already newline-terminated. */
584 	return sysfs_emit(buf, "%s", (char *)ib_qib_version);
585 }
586 static DEVICE_ATTR_RO(version);
587 
588 static ssize_t boardversion_show(struct device *device,
589 				 struct device_attribute *attr, char *buf)
590 {
591 	struct qib_ibdev *dev =
592 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
593 	struct qib_devdata *dd = dd_from_dev(dev);
594 
595 	/* The string printed here is already newline-terminated. */
596 	return sysfs_emit(buf, "%s", dd->boardversion);
597 }
598 static DEVICE_ATTR_RO(boardversion);
599 
600 static ssize_t localbus_info_show(struct device *device,
601 				  struct device_attribute *attr, char *buf)
602 {
603 	struct qib_ibdev *dev =
604 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
605 	struct qib_devdata *dd = dd_from_dev(dev);
606 
607 	/* The string printed here is already newline-terminated. */
608 	return sysfs_emit(buf, "%s", dd->lbus_info);
609 }
610 static DEVICE_ATTR_RO(localbus_info);
611 
612 static ssize_t nctxts_show(struct device *device,
613 			   struct device_attribute *attr, char *buf)
614 {
615 	struct qib_ibdev *dev =
616 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
617 	struct qib_devdata *dd = dd_from_dev(dev);
618 
619 	/* Return the number of user ports (contexts) available. */
620 	/* The calculation below deals with a special case where
621 	 * cfgctxts is set to 1 on a single-port board. */
622 	return sysfs_emit(buf, "%u\n",
623 			  (dd->first_user_ctxt > dd->cfgctxts) ?
624 				  0 :
625 				  (dd->cfgctxts - dd->first_user_ctxt));
626 }
627 static DEVICE_ATTR_RO(nctxts);
628 
629 static ssize_t nfreectxts_show(struct device *device,
630 			       struct device_attribute *attr, char *buf)
631 {
632 	struct qib_ibdev *dev =
633 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
634 	struct qib_devdata *dd = dd_from_dev(dev);
635 
636 	/* Return the number of free user ports (contexts) available. */
637 	return sysfs_emit(buf, "%u\n", dd->freectxts);
638 }
639 static DEVICE_ATTR_RO(nfreectxts);
640 
641 static ssize_t serial_show(struct device *device, struct device_attribute *attr,
642 			   char *buf)
643 {
644 	struct qib_ibdev *dev =
645 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
646 	struct qib_devdata *dd = dd_from_dev(dev);
647 	const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
648 	int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
649 
650 	return sysfs_emit(buf, ".%*s\n", size, dd->serial);
651 }
652 static DEVICE_ATTR_RO(serial);
653 
654 static ssize_t chip_reset_store(struct device *device,
655 				struct device_attribute *attr, const char *buf,
656 				size_t count)
657 {
658 	struct qib_ibdev *dev =
659 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
660 	struct qib_devdata *dd = dd_from_dev(dev);
661 	int ret;
662 
663 	if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
664 		ret = -EINVAL;
665 		goto bail;
666 	}
667 
668 	ret = qib_reset_device(dd->unit);
669 bail:
670 	return ret < 0 ? ret : count;
671 }
672 static DEVICE_ATTR_WO(chip_reset);
673 
674 /*
675  * Dump tempsense regs. in decimal, to ease shell-scripts.
676  */
677 static ssize_t tempsense_show(struct device *device,
678 			      struct device_attribute *attr, char *buf)
679 {
680 	struct qib_ibdev *dev =
681 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
682 	struct qib_devdata *dd = dd_from_dev(dev);
683 	int i;
684 	u8 regvals[8];
685 
686 	for (i = 0; i < 8; i++) {
687 		int ret;
688 
689 		if (i == 6)
690 			continue;
691 		ret = dd->f_tempsense_rd(dd, i);
692 		if (ret < 0)
693 			return ret;	/* return error on bad read */
694 		regvals[i] = ret;
695 	}
696 	return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
697 			  (signed char)regvals[0],
698 			  (signed char)regvals[1],
699 			  regvals[2],
700 			  regvals[3],
701 			  (signed char)regvals[5],
702 			  (signed char)regvals[7]);
703 }
704 static DEVICE_ATTR_RO(tempsense);
705 
706 /*
707  * end of per-unit (or driver, in some cases, but replicated
708  * per unit) functions
709  */
710 
711 /* start of per-unit file structures and support code */
712 static struct attribute *qib_attributes[] = {
713 	&dev_attr_hw_rev.attr,
714 	&dev_attr_hca_type.attr,
715 	&dev_attr_board_id.attr,
716 	&dev_attr_version.attr,
717 	&dev_attr_nctxts.attr,
718 	&dev_attr_nfreectxts.attr,
719 	&dev_attr_serial.attr,
720 	&dev_attr_boardversion.attr,
721 	&dev_attr_tempsense.attr,
722 	&dev_attr_localbus_info.attr,
723 	&dev_attr_chip_reset.attr,
724 	NULL,
725 };
726 
727 const struct attribute_group qib_attr_group = {
728 	.attrs = qib_attributes,
729 };
730 
731 int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
732 			  struct kobject *kobj)
733 {
734 	struct qib_pportdata *ppd;
735 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
736 	int ret;
737 
738 	if (!port_num || port_num > dd->num_pports) {
739 		qib_dev_err(dd,
740 			"Skipping infiniband class with invalid port %u\n",
741 			port_num);
742 		ret = -ENODEV;
743 		goto bail;
744 	}
745 	ppd = &dd->pport[port_num - 1];
746 
747 	ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
748 				   "linkcontrol");
749 	if (ret) {
750 		qib_dev_err(dd,
751 			"Skipping linkcontrol sysfs info, (err %d) port %u\n",
752 			ret, port_num);
753 		goto bail_link;
754 	}
755 	kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
756 
757 	ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
758 				   "sl2vl");
759 	if (ret) {
760 		qib_dev_err(dd,
761 			"Skipping sl2vl sysfs info, (err %d) port %u\n",
762 			ret, port_num);
763 		goto bail_sl;
764 	}
765 	kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
766 
767 	ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
768 				   "diag_counters");
769 	if (ret) {
770 		qib_dev_err(dd,
771 			"Skipping diag_counters sysfs info, (err %d) port %u\n",
772 			ret, port_num);
773 		goto bail_diagc;
774 	}
775 	kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
776 
777 	if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
778 		return 0;
779 
780 	ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
781 				kobj, "CCMgtA");
782 	if (ret) {
783 		qib_dev_err(dd,
784 		 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
785 		 ret, port_num);
786 		goto bail_cc;
787 	}
788 
789 	kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
790 
791 	ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
792 				&cc_setting_bin_attr);
793 	if (ret) {
794 		qib_dev_err(dd,
795 		 "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
796 		 ret, port_num);
797 		goto bail_cc;
798 	}
799 
800 	ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
801 				&cc_table_bin_attr);
802 	if (ret) {
803 		qib_dev_err(dd,
804 		 "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
805 		 ret, port_num);
806 		goto bail_cc_entry_bin;
807 	}
808 
809 	qib_devinfo(dd->pcidev,
810 		"IB%u: Congestion Control Agent enabled for port %d\n",
811 		dd->unit, port_num);
812 
813 	return 0;
814 
815 bail_cc_entry_bin:
816 	sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
817 bail_cc:
818 	kobject_put(&ppd->pport_cc_kobj);
819 bail_diagc:
820 	kobject_put(&ppd->diagc_kobj);
821 bail_sl:
822 	kobject_put(&ppd->sl2vl_kobj);
823 bail_link:
824 	kobject_put(&ppd->pport_kobj);
825 bail:
826 	return ret;
827 }
828 
829 /*
830  * Unregister and remove our files in /sys/class/infiniband.
831  */
832 void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
833 {
834 	struct qib_pportdata *ppd;
835 	int i;
836 
837 	for (i = 0; i < dd->num_pports; i++) {
838 		ppd = &dd->pport[i];
839 		if (qib_cc_table_size &&
840 			ppd->congestion_entries_shadow) {
841 			sysfs_remove_bin_file(&ppd->pport_cc_kobj,
842 				&cc_setting_bin_attr);
843 			sysfs_remove_bin_file(&ppd->pport_cc_kobj,
844 				&cc_table_bin_attr);
845 			kobject_put(&ppd->pport_cc_kobj);
846 		}
847 		kobject_put(&ppd->diagc_kobj);
848 		kobject_put(&ppd->sl2vl_kobj);
849 		kobject_put(&ppd->pport_kobj);
850 	}
851 }
852