xref: /openbmc/linux/drivers/infiniband/hw/hfi1/qsfp.c (revision 63705da3)
1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3  * Copyright(c) 2015, 2016 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/pci.h>
8 #include <linux/vmalloc.h>
9 
10 #include "hfi.h"
11 
12 /* for the given bus number, return the CSR for reading an i2c line */
13 static inline u32 i2c_in_csr(u32 bus_num)
14 {
15 	return bus_num ? ASIC_QSFP2_IN : ASIC_QSFP1_IN;
16 }
17 
18 /* for the given bus number, return the CSR for writing an i2c line */
19 static inline u32 i2c_oe_csr(u32 bus_num)
20 {
21 	return bus_num ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
22 }
23 
24 static void hfi1_setsda(void *data, int state)
25 {
26 	struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
27 	struct hfi1_devdata *dd = bus->controlling_dd;
28 	u64 reg;
29 	u32 target_oe;
30 
31 	target_oe = i2c_oe_csr(bus->num);
32 	reg = read_csr(dd, target_oe);
33 	/*
34 	 * The OE bit value is inverted and connected to the pin.  When
35 	 * OE is 0 the pin is left to be pulled up, when the OE is 1
36 	 * the pin is driven low.  This matches the "open drain" or "open
37 	 * collector" convention.
38 	 */
39 	if (state)
40 		reg &= ~QSFP_HFI0_I2CDAT;
41 	else
42 		reg |= QSFP_HFI0_I2CDAT;
43 	write_csr(dd, target_oe, reg);
44 	/* do a read to force the write into the chip */
45 	(void)read_csr(dd, target_oe);
46 }
47 
48 static void hfi1_setscl(void *data, int state)
49 {
50 	struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
51 	struct hfi1_devdata *dd = bus->controlling_dd;
52 	u64 reg;
53 	u32 target_oe;
54 
55 	target_oe = i2c_oe_csr(bus->num);
56 	reg = read_csr(dd, target_oe);
57 	/*
58 	 * The OE bit value is inverted and connected to the pin.  When
59 	 * OE is 0 the pin is left to be pulled up, when the OE is 1
60 	 * the pin is driven low.  This matches the "open drain" or "open
61 	 * collector" convention.
62 	 */
63 	if (state)
64 		reg &= ~QSFP_HFI0_I2CCLK;
65 	else
66 		reg |= QSFP_HFI0_I2CCLK;
67 	write_csr(dd, target_oe, reg);
68 	/* do a read to force the write into the chip */
69 	(void)read_csr(dd, target_oe);
70 }
71 
72 static int hfi1_getsda(void *data)
73 {
74 	struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
75 	u64 reg;
76 	u32 target_in;
77 
78 	hfi1_setsda(data, 1);	/* clear OE so we do not pull line down */
79 	udelay(2);		/* 1us pull up + 250ns hold */
80 
81 	target_in = i2c_in_csr(bus->num);
82 	reg = read_csr(bus->controlling_dd, target_in);
83 	return !!(reg & QSFP_HFI0_I2CDAT);
84 }
85 
86 static int hfi1_getscl(void *data)
87 {
88 	struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
89 	u64 reg;
90 	u32 target_in;
91 
92 	hfi1_setscl(data, 1);	/* clear OE so we do not pull line down */
93 	udelay(2);		/* 1us pull up + 250ns hold */
94 
95 	target_in = i2c_in_csr(bus->num);
96 	reg = read_csr(bus->controlling_dd, target_in);
97 	return !!(reg & QSFP_HFI0_I2CCLK);
98 }
99 
100 /*
101  * Allocate and initialize the given i2c bus number.
102  * Returns NULL on failure.
103  */
104 static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
105 					 struct hfi1_asic_data *ad, int num)
106 {
107 	struct hfi1_i2c_bus *bus;
108 	int ret;
109 
110 	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
111 	if (!bus)
112 		return NULL;
113 
114 	bus->controlling_dd = dd;
115 	bus->num = num;	/* our bus number */
116 
117 	bus->algo.setsda = hfi1_setsda;
118 	bus->algo.setscl = hfi1_setscl;
119 	bus->algo.getsda = hfi1_getsda;
120 	bus->algo.getscl = hfi1_getscl;
121 	bus->algo.udelay = 5;
122 	bus->algo.timeout = usecs_to_jiffies(100000);
123 	bus->algo.data = bus;
124 
125 	bus->adapter.owner = THIS_MODULE;
126 	bus->adapter.algo_data = &bus->algo;
127 	bus->adapter.dev.parent = &dd->pcidev->dev;
128 	snprintf(bus->adapter.name, sizeof(bus->adapter.name),
129 		 "hfi1_i2c%d", num);
130 
131 	ret = i2c_bit_add_bus(&bus->adapter);
132 	if (ret) {
133 		dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n",
134 			    __func__, num, ret);
135 		kfree(bus);
136 		return NULL;
137 	}
138 
139 	return bus;
140 }
141 
142 /*
143  * Initialize i2c buses.
144  * Return 0 on success, -errno on error.
145  */
146 int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
147 {
148 	ad->i2c_bus0 = init_i2c_bus(dd, ad, 0);
149 	ad->i2c_bus1 = init_i2c_bus(dd, ad, 1);
150 	if (!ad->i2c_bus0 || !ad->i2c_bus1)
151 		return -ENOMEM;
152 	return 0;
153 };
154 
155 static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
156 {
157 	if (bus) {
158 		i2c_del_adapter(&bus->adapter);
159 		kfree(bus);
160 	}
161 }
162 
163 void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
164 {
165 	if (!ad)
166 		return;
167 	clean_i2c_bus(ad->i2c_bus0);
168 	ad->i2c_bus0 = NULL;
169 	clean_i2c_bus(ad->i2c_bus1);
170 	ad->i2c_bus1 = NULL;
171 }
172 
173 static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
174 			 u8 slave_addr, int offset, int offset_size,
175 			 u8 *data, u16 len)
176 {
177 	int ret;
178 	int num_msgs;
179 	u8 offset_bytes[2];
180 	struct i2c_msg msgs[2];
181 
182 	switch (offset_size) {
183 	case 0:
184 		num_msgs = 1;
185 		msgs[0].addr = slave_addr;
186 		msgs[0].flags = 0;
187 		msgs[0].len = len;
188 		msgs[0].buf = data;
189 		break;
190 	case 2:
191 		offset_bytes[1] = (offset >> 8) & 0xff;
192 		fallthrough;
193 	case 1:
194 		num_msgs = 2;
195 		offset_bytes[0] = offset & 0xff;
196 
197 		msgs[0].addr = slave_addr;
198 		msgs[0].flags = 0;
199 		msgs[0].len = offset_size;
200 		msgs[0].buf = offset_bytes;
201 
202 		msgs[1].addr = slave_addr;
203 		msgs[1].flags = I2C_M_NOSTART;
204 		msgs[1].len = len;
205 		msgs[1].buf = data;
206 		break;
207 	default:
208 		return -EINVAL;
209 	}
210 
211 	i2c->controlling_dd = dd;
212 	ret = i2c_transfer(&i2c->adapter, msgs, num_msgs);
213 	if (ret != num_msgs) {
214 		dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n",
215 			   __func__, i2c->num, slave_addr, offset, len, ret);
216 		return ret < 0 ? ret : -EIO;
217 	}
218 	return 0;
219 }
220 
221 static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
222 			u8 slave_addr, int offset, int offset_size,
223 			u8 *data, u16 len)
224 {
225 	int ret;
226 	int num_msgs;
227 	u8 offset_bytes[2];
228 	struct i2c_msg msgs[2];
229 
230 	switch (offset_size) {
231 	case 0:
232 		num_msgs = 1;
233 		msgs[0].addr = slave_addr;
234 		msgs[0].flags = I2C_M_RD;
235 		msgs[0].len = len;
236 		msgs[0].buf = data;
237 		break;
238 	case 2:
239 		offset_bytes[1] = (offset >> 8) & 0xff;
240 		fallthrough;
241 	case 1:
242 		num_msgs = 2;
243 		offset_bytes[0] = offset & 0xff;
244 
245 		msgs[0].addr = slave_addr;
246 		msgs[0].flags = 0;
247 		msgs[0].len = offset_size;
248 		msgs[0].buf = offset_bytes;
249 
250 		msgs[1].addr = slave_addr;
251 		msgs[1].flags = I2C_M_RD;
252 		msgs[1].len = len;
253 		msgs[1].buf = data;
254 		break;
255 	default:
256 		return -EINVAL;
257 	}
258 
259 	bus->controlling_dd = dd;
260 	ret = i2c_transfer(&bus->adapter, msgs, num_msgs);
261 	if (ret != num_msgs) {
262 		dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n",
263 			   __func__, bus->num, slave_addr, offset, len, ret);
264 		return ret < 0 ? ret : -EIO;
265 	}
266 	return 0;
267 }
268 
269 /*
270  * Raw i2c write.  No set-up or lock checking.
271  *
272  * Return 0 on success, -errno on error.
273  */
274 static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
275 		       int offset, void *bp, int len)
276 {
277 	struct hfi1_devdata *dd = ppd->dd;
278 	struct hfi1_i2c_bus *bus;
279 	u8 slave_addr;
280 	int offset_size;
281 
282 	bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
283 	slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
284 	offset_size = (i2c_addr >> 8) & 0x3;
285 	return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len);
286 }
287 
288 /*
289  * Caller must hold the i2c chain resource.
290  *
291  * Return number of bytes written, or -errno.
292  */
293 int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
294 	      void *bp, int len)
295 {
296 	int ret;
297 
298 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
299 		return -EACCES;
300 
301 	ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
302 	if (ret)
303 		return ret;
304 
305 	return len;
306 }
307 
308 /*
309  * Raw i2c read.  No set-up or lock checking.
310  *
311  * Return 0 on success, -errno on error.
312  */
313 static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
314 		      int offset, void *bp, int len)
315 {
316 	struct hfi1_devdata *dd = ppd->dd;
317 	struct hfi1_i2c_bus *bus;
318 	u8 slave_addr;
319 	int offset_size;
320 
321 	bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
322 	slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
323 	offset_size = (i2c_addr >> 8) & 0x3;
324 	return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len);
325 }
326 
327 /*
328  * Caller must hold the i2c chain resource.
329  *
330  * Return number of bytes read, or -errno.
331  */
332 int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
333 	     void *bp, int len)
334 {
335 	int ret;
336 
337 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
338 		return -EACCES;
339 
340 	ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
341 	if (ret)
342 		return ret;
343 
344 	return len;
345 }
346 
347 /*
348  * Write page n, offset m of QSFP memory as defined by SFF 8636
349  * by writing @addr = ((256 * n) + m)
350  *
351  * Caller must hold the i2c chain resource.
352  *
353  * Return number of bytes written or -errno.
354  */
355 int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
356 	       int len)
357 {
358 	int count = 0;
359 	int offset;
360 	int nwrite;
361 	int ret = 0;
362 	u8 page;
363 
364 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
365 		return -EACCES;
366 
367 	while (count < len) {
368 		/*
369 		 * Set the qsfp page based on a zero-based address
370 		 * and a page size of QSFP_PAGESIZE bytes.
371 		 */
372 		page = (u8)(addr / QSFP_PAGESIZE);
373 
374 		ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
375 				  QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
376 		/* QSFPs require a 5-10msec delay after write operations */
377 		mdelay(5);
378 		if (ret) {
379 			hfi1_dev_porterr(ppd->dd, ppd->port,
380 					 "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
381 					 target, ret);
382 			break;
383 		}
384 
385 		offset = addr % QSFP_PAGESIZE;
386 		nwrite = len - count;
387 		/* truncate write to boundary if crossing boundary */
388 		if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY)
389 			nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
390 
391 		ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
392 				  offset, bp + count, nwrite);
393 		/* QSFPs require a 5-10msec delay after write operations */
394 		mdelay(5);
395 		if (ret)	/* stop on error */
396 			break;
397 
398 		count += nwrite;
399 		addr += nwrite;
400 	}
401 
402 	if (ret < 0)
403 		return ret;
404 	return count;
405 }
406 
407 /*
408  * Perform a stand-alone single QSFP write.  Acquire the resource, do the
409  * write, then release the resource.
410  */
411 int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
412 		   int len)
413 {
414 	struct hfi1_devdata *dd = ppd->dd;
415 	u32 resource = qsfp_resource(dd);
416 	int ret;
417 
418 	ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
419 	if (ret)
420 		return ret;
421 	ret = qsfp_write(ppd, target, addr, bp, len);
422 	release_chip_resource(dd, resource);
423 
424 	return ret;
425 }
426 
427 /*
428  * Access page n, offset m of QSFP memory as defined by SFF 8636
429  * by reading @addr = ((256 * n) + m)
430  *
431  * Caller must hold the i2c chain resource.
432  *
433  * Return the number of bytes read or -errno.
434  */
435 int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
436 	      int len)
437 {
438 	int count = 0;
439 	int offset;
440 	int nread;
441 	int ret = 0;
442 	u8 page;
443 
444 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
445 		return -EACCES;
446 
447 	while (count < len) {
448 		/*
449 		 * Set the qsfp page based on a zero-based address
450 		 * and a page size of QSFP_PAGESIZE bytes.
451 		 */
452 		page = (u8)(addr / QSFP_PAGESIZE);
453 		ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
454 				  QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
455 		/* QSFPs require a 5-10msec delay after write operations */
456 		mdelay(5);
457 		if (ret) {
458 			hfi1_dev_porterr(ppd->dd, ppd->port,
459 					 "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
460 					 target, ret);
461 			break;
462 		}
463 
464 		offset = addr % QSFP_PAGESIZE;
465 		nread = len - count;
466 		/* truncate read to boundary if crossing boundary */
467 		if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
468 			nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
469 
470 		ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
471 				 offset, bp + count, nread);
472 		if (ret)	/* stop on error */
473 			break;
474 
475 		count += nread;
476 		addr += nread;
477 	}
478 
479 	if (ret < 0)
480 		return ret;
481 	return count;
482 }
483 
484 /*
485  * Perform a stand-alone single QSFP read.  Acquire the resource, do the
486  * read, then release the resource.
487  */
488 int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
489 		  int len)
490 {
491 	struct hfi1_devdata *dd = ppd->dd;
492 	u32 resource = qsfp_resource(dd);
493 	int ret;
494 
495 	ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
496 	if (ret)
497 		return ret;
498 	ret = qsfp_read(ppd, target, addr, bp, len);
499 	release_chip_resource(dd, resource);
500 
501 	return ret;
502 }
503 
504 /*
505  * This function caches the QSFP memory range in 128 byte chunks.
506  * As an example, the next byte after address 255 is byte 128 from
507  * upper page 01H (if existing) rather than byte 0 from lower page 00H.
508  * Access page n, offset m of QSFP memory as defined by SFF 8636
509  * in the cache by reading byte ((128 * n) + m)
510  * The calls to qsfp_{read,write} in this function correctly handle the
511  * address map difference between this mapping and the mapping implemented
512  * by those functions
513  *
514  * The caller must be holding the QSFP i2c chain resource.
515  */
516 int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
517 {
518 	u32 target = ppd->dd->hfi1_id;
519 	int ret;
520 	unsigned long flags;
521 	u8 *cache = &cp->cache[0];
522 
523 	/* ensure sane contents on invalid reads, for cable swaps */
524 	memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
525 	spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
526 	ppd->qsfp_info.cache_valid = 0;
527 	spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
528 
529 	if (!qsfp_mod_present(ppd)) {
530 		ret = -ENODEV;
531 		goto bail;
532 	}
533 
534 	ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE);
535 	if (ret != QSFP_PAGESIZE) {
536 		dd_dev_info(ppd->dd,
537 			    "%s: Page 0 read failed, expected %d, got %d\n",
538 			    __func__, QSFP_PAGESIZE, ret);
539 		goto bail;
540 	}
541 
542 	/* Is paging enabled? */
543 	if (!(cache[2] & 4)) {
544 		/* Paging enabled, page 03 required */
545 		if ((cache[195] & 0xC0) == 0xC0) {
546 			/* all */
547 			ret = qsfp_read(ppd, target, 384, cache + 256, 128);
548 			if (ret <= 0 || ret != 128) {
549 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
550 				goto bail;
551 			}
552 			ret = qsfp_read(ppd, target, 640, cache + 384, 128);
553 			if (ret <= 0 || ret != 128) {
554 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
555 				goto bail;
556 			}
557 			ret = qsfp_read(ppd, target, 896, cache + 512, 128);
558 			if (ret <= 0 || ret != 128) {
559 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
560 				goto bail;
561 			}
562 		} else if ((cache[195] & 0x80) == 0x80) {
563 			/* only page 2 and 3 */
564 			ret = qsfp_read(ppd, target, 640, cache + 384, 128);
565 			if (ret <= 0 || ret != 128) {
566 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
567 				goto bail;
568 			}
569 			ret = qsfp_read(ppd, target, 896, cache + 512, 128);
570 			if (ret <= 0 || ret != 128) {
571 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
572 				goto bail;
573 			}
574 		} else if ((cache[195] & 0x40) == 0x40) {
575 			/* only page 1 and 3 */
576 			ret = qsfp_read(ppd, target, 384, cache + 256, 128);
577 			if (ret <= 0 || ret != 128) {
578 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
579 				goto bail;
580 			}
581 			ret = qsfp_read(ppd, target, 896, cache + 512, 128);
582 			if (ret <= 0 || ret != 128) {
583 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
584 				goto bail;
585 			}
586 		} else {
587 			/* only page 3 */
588 			ret = qsfp_read(ppd, target, 896, cache + 512, 128);
589 			if (ret <= 0 || ret != 128) {
590 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
591 				goto bail;
592 			}
593 		}
594 	}
595 
596 	spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
597 	ppd->qsfp_info.cache_valid = 1;
598 	ppd->qsfp_info.cache_refresh_required = 0;
599 	spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
600 
601 	return 0;
602 
603 bail:
604 	memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
605 	return ret;
606 }
607 
608 const char * const hfi1_qsfp_devtech[16] = {
609 	"850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
610 	"1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
611 	"Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
612 	"Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
613 };
614 
615 #define QSFP_DUMP_CHUNK 16 /* Holds longest string */
616 #define QSFP_DEFAULT_HDR_CNT 224
617 
618 #define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
619 #define QSFP_HIGH_PWR(pbyte) ((pbyte) & 3)
620 /* For use with QSFP_HIGH_PWR macro */
621 #define QSFP_HIGH_PWR_UNUSED	0 /* Bits [1:0] = 00 implies low power module */
622 
623 /*
624  * Takes power class byte [Page 00 Byte 129] in SFF 8636
625  * Returns power class as integer (1 through 7, per SFF 8636 rev 2.4)
626  */
627 int get_qsfp_power_class(u8 power_byte)
628 {
629 	if (QSFP_HIGH_PWR(power_byte) == QSFP_HIGH_PWR_UNUSED)
630 		/* power classes count from 1, their bit encodings from 0 */
631 		return (QSFP_PWR(power_byte) + 1);
632 	/*
633 	 * 00 in the high power classes stands for unused, bringing
634 	 * balance to the off-by-1 offset above, we add 4 here to
635 	 * account for the difference between the low and high power
636 	 * groups
637 	 */
638 	return (QSFP_HIGH_PWR(power_byte) + 4);
639 }
640 
641 int qsfp_mod_present(struct hfi1_pportdata *ppd)
642 {
643 	struct hfi1_devdata *dd = ppd->dd;
644 	u64 reg;
645 
646 	reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
647 	return !(reg & QSFP_HFI0_MODPRST_N);
648 }
649 
650 /*
651  * This function maps QSFP memory addresses in 128 byte chunks in the following
652  * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
653  * spec
654  * For addr 000-127, lower page 00h
655  * For addr 128-255, upper page 00h
656  * For addr 256-383, upper page 01h
657  * For addr 384-511, upper page 02h
658  * For addr 512-639, upper page 03h
659  *
660  * For addresses beyond this range, it returns the invalid range of data buffer
661  * set to 0.
662  * For upper pages that are optional, if they are not valid, returns the
663  * particular range of bytes in the data buffer set to 0.
664  */
665 int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
666 		   u8 *data)
667 {
668 	struct hfi1_pportdata *ppd;
669 	u32 excess_len = len;
670 	int ret = 0, offset = 0;
671 
672 	if (port_num > dd->num_pports || port_num < 1) {
673 		dd_dev_info(dd, "%s: Invalid port number %d\n",
674 			    __func__, port_num);
675 		ret = -EINVAL;
676 		goto set_zeroes;
677 	}
678 
679 	ppd = dd->pport + (port_num - 1);
680 	if (!qsfp_mod_present(ppd)) {
681 		ret = -ENODEV;
682 		goto set_zeroes;
683 	}
684 
685 	if (!ppd->qsfp_info.cache_valid) {
686 		ret = -EINVAL;
687 		goto set_zeroes;
688 	}
689 
690 	if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
691 		ret = -ERANGE;
692 		goto set_zeroes;
693 	}
694 
695 	if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
696 		excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
697 		memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
698 		data += (len - excess_len);
699 		goto set_zeroes;
700 	}
701 
702 	memcpy(data, &ppd->qsfp_info.cache[addr], len);
703 
704 	if (addr <= QSFP_MONITOR_VAL_END &&
705 	    (addr + len) >= QSFP_MONITOR_VAL_START) {
706 		/* Overlap with the dynamic channel monitor range */
707 		if (addr < QSFP_MONITOR_VAL_START) {
708 			if (addr + len <= QSFP_MONITOR_VAL_END)
709 				len = addr + len - QSFP_MONITOR_VAL_START;
710 			else
711 				len = QSFP_MONITOR_RANGE;
712 			offset = QSFP_MONITOR_VAL_START - addr;
713 			addr = QSFP_MONITOR_VAL_START;
714 		} else if (addr == QSFP_MONITOR_VAL_START) {
715 			offset = 0;
716 			if (addr + len > QSFP_MONITOR_VAL_END)
717 				len = QSFP_MONITOR_RANGE;
718 		} else {
719 			offset = 0;
720 			if (addr + len > QSFP_MONITOR_VAL_END)
721 				len = QSFP_MONITOR_VAL_END - addr + 1;
722 		}
723 		/* Refresh the values of the dynamic monitors from the cable */
724 		ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
725 		if (ret != len) {
726 			ret = -EAGAIN;
727 			goto set_zeroes;
728 		}
729 	}
730 
731 	return 0;
732 
733 set_zeroes:
734 	memset(data, 0, excess_len);
735 	return ret;
736 }
737 
738 static const char *pwr_codes[8] = {"N/AW",
739 				  "1.5W",
740 				  "2.0W",
741 				  "2.5W",
742 				  "3.5W",
743 				  "4.0W",
744 				  "4.5W",
745 				  "5.0W"
746 				 };
747 
748 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
749 {
750 	u8 *cache = &ppd->qsfp_info.cache[0];
751 	u8 bin_buff[QSFP_DUMP_CHUNK];
752 	char lenstr[6];
753 	int sofar;
754 	int bidx = 0;
755 	u8 *atten = &cache[QSFP_ATTEN_OFFS];
756 	u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
757 	u8 power_byte = 0;
758 
759 	sofar = 0;
760 	lenstr[0] = ' ';
761 	lenstr[1] = '\0';
762 
763 	if (ppd->qsfp_info.cache_valid) {
764 		if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
765 			snprintf(lenstr, sizeof(lenstr), "%dM ",
766 				 cache[QSFP_MOD_LEN_OFFS]);
767 
768 		power_byte = cache[QSFP_MOD_PWR_OFFS];
769 		sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
770 				pwr_codes[get_qsfp_power_class(power_byte)]);
771 
772 		sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
773 				lenstr,
774 			hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
775 
776 		sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
777 				   QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
778 
779 		sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
780 				   QSFP_OUI(vendor_oui));
781 
782 		sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
783 				   QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
784 
785 		sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
786 				   QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
787 
788 		if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
789 			sofar += scnprintf(buf + sofar, len - sofar,
790 				"Atten:%d, %d\n",
791 				QSFP_ATTEN_SDR(atten),
792 				QSFP_ATTEN_DDR(atten));
793 
794 		sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
795 				   QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
796 
797 		sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
798 				   QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
799 
800 		sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
801 				   QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
802 
803 		while (bidx < QSFP_DEFAULT_HDR_CNT) {
804 			int iidx;
805 
806 			memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
807 			for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
808 				sofar += scnprintf(buf + sofar, len - sofar,
809 					" %02X", bin_buff[iidx]);
810 			}
811 			sofar += scnprintf(buf + sofar, len - sofar, "\n");
812 			bidx += QSFP_DUMP_CHUNK;
813 		}
814 	}
815 	return sofar;
816 }
817