xref: /openbmc/linux/drivers/thunderbolt/eeprom.c (revision 078b39c9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - eeprom access
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/property.h>
12 #include <linux/slab.h>
13 #include "tb.h"
14 
15 /*
16  * tb_eeprom_ctl_write() - write control word
17  */
18 static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
19 {
20 	return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1);
21 }
22 
23 /*
24  * tb_eeprom_ctl_write() - read control word
25  */
26 static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
27 {
28 	return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1);
29 }
30 
31 enum tb_eeprom_transfer {
32 	TB_EEPROM_IN,
33 	TB_EEPROM_OUT,
34 };
35 
36 /*
37  * tb_eeprom_active - enable rom access
38  *
39  * WARNING: Always disable access after usage. Otherwise the controller will
40  * fail to reprobe.
41  */
42 static int tb_eeprom_active(struct tb_switch *sw, bool enable)
43 {
44 	struct tb_eeprom_ctl ctl;
45 	int res = tb_eeprom_ctl_read(sw, &ctl);
46 	if (res)
47 		return res;
48 	if (enable) {
49 		ctl.bit_banging_enable = 1;
50 		res = tb_eeprom_ctl_write(sw, &ctl);
51 		if (res)
52 			return res;
53 		ctl.fl_cs = 0;
54 		return tb_eeprom_ctl_write(sw, &ctl);
55 	} else {
56 		ctl.fl_cs = 1;
57 		res = tb_eeprom_ctl_write(sw, &ctl);
58 		if (res)
59 			return res;
60 		ctl.bit_banging_enable = 0;
61 		return tb_eeprom_ctl_write(sw, &ctl);
62 	}
63 }
64 
65 /*
66  * tb_eeprom_transfer - transfer one bit
67  *
68  * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->fl_do.
69  * If TB_EEPROM_OUT is passed, then ctl->fl_di will be written.
70  */
71 static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
72 			      enum tb_eeprom_transfer direction)
73 {
74 	int res;
75 	if (direction == TB_EEPROM_OUT) {
76 		res = tb_eeprom_ctl_write(sw, ctl);
77 		if (res)
78 			return res;
79 	}
80 	ctl->fl_sk = 1;
81 	res = tb_eeprom_ctl_write(sw, ctl);
82 	if (res)
83 		return res;
84 	if (direction == TB_EEPROM_IN) {
85 		res = tb_eeprom_ctl_read(sw, ctl);
86 		if (res)
87 			return res;
88 	}
89 	ctl->fl_sk = 0;
90 	return tb_eeprom_ctl_write(sw, ctl);
91 }
92 
93 /*
94  * tb_eeprom_out - write one byte to the bus
95  */
96 static int tb_eeprom_out(struct tb_switch *sw, u8 val)
97 {
98 	struct tb_eeprom_ctl ctl;
99 	int i;
100 	int res = tb_eeprom_ctl_read(sw, &ctl);
101 	if (res)
102 		return res;
103 	for (i = 0; i < 8; i++) {
104 		ctl.fl_di = val & 0x80;
105 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
106 		if (res)
107 			return res;
108 		val <<= 1;
109 	}
110 	return 0;
111 }
112 
113 /*
114  * tb_eeprom_in - read one byte from the bus
115  */
116 static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
117 {
118 	struct tb_eeprom_ctl ctl;
119 	int i;
120 	int res = tb_eeprom_ctl_read(sw, &ctl);
121 	if (res)
122 		return res;
123 	*val = 0;
124 	for (i = 0; i < 8; i++) {
125 		*val <<= 1;
126 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
127 		if (res)
128 			return res;
129 		*val |= ctl.fl_do;
130 	}
131 	return 0;
132 }
133 
134 /*
135  * tb_eeprom_get_drom_offset - get drom offset within eeprom
136  */
137 static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
138 {
139 	struct tb_cap_plug_events cap;
140 	int res;
141 
142 	if (!sw->cap_plug_events) {
143 		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
144 		return -ENODEV;
145 	}
146 	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
147 			     sizeof(cap) / 4);
148 	if (res)
149 		return res;
150 
151 	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
152 		tb_sw_warn(sw, "no NVM\n");
153 		return -ENODEV;
154 	}
155 
156 	if (cap.drom_offset > 0xffff) {
157 		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
158 				cap.drom_offset);
159 		return -ENXIO;
160 	}
161 	*offset = cap.drom_offset;
162 	return 0;
163 }
164 
165 /*
166  * tb_eeprom_read_n - read count bytes from offset into val
167  */
168 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
169 		size_t count)
170 {
171 	u16 drom_offset;
172 	int i, res;
173 
174 	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
175 	if (res)
176 		return res;
177 
178 	offset += drom_offset;
179 
180 	res = tb_eeprom_active(sw, true);
181 	if (res)
182 		return res;
183 	res = tb_eeprom_out(sw, 3);
184 	if (res)
185 		return res;
186 	res = tb_eeprom_out(sw, offset >> 8);
187 	if (res)
188 		return res;
189 	res = tb_eeprom_out(sw, offset);
190 	if (res)
191 		return res;
192 	for (i = 0; i < count; i++) {
193 		res = tb_eeprom_in(sw, val + i);
194 		if (res)
195 			return res;
196 	}
197 	return tb_eeprom_active(sw, false);
198 }
199 
200 static u8 tb_crc8(u8 *data, int len)
201 {
202 	int i, j;
203 	u8 val = 0xff;
204 	for (i = 0; i < len; i++) {
205 		val ^= data[i];
206 		for (j = 0; j < 8; j++)
207 			val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
208 	}
209 	return val;
210 }
211 
212 static u32 tb_crc32(void *data, size_t len)
213 {
214 	return ~__crc32c_le(~0, data, len);
215 }
216 
217 #define TB_DROM_DATA_START		13
218 #define TB_DROM_HEADER_SIZE		22
219 #define USB4_DROM_HEADER_SIZE		16
220 
221 struct tb_drom_header {
222 	/* BYTE 0 */
223 	u8 uid_crc8; /* checksum for uid */
224 	/* BYTES 1-8 */
225 	u64 uid;
226 	/* BYTES 9-12 */
227 	u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
228 	/* BYTE 13 */
229 	u8 device_rom_revision; /* should be <= 1 */
230 	u16 data_len:12;
231 	u8 reserved:4;
232 	/* BYTES 16-21 - Only for TBT DROM, nonexistent in USB4 DROM */
233 	u16 vendor_id;
234 	u16 model_id;
235 	u8 model_rev;
236 	u8 eeprom_rev;
237 } __packed;
238 
239 enum tb_drom_entry_type {
240 	/* force unsigned to prevent "one-bit signed bitfield" warning */
241 	TB_DROM_ENTRY_GENERIC = 0U,
242 	TB_DROM_ENTRY_PORT,
243 };
244 
245 struct tb_drom_entry_header {
246 	u8 len;
247 	u8 index:6;
248 	bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
249 	enum tb_drom_entry_type type:1;
250 } __packed;
251 
252 struct tb_drom_entry_generic {
253 	struct tb_drom_entry_header header;
254 	u8 data[];
255 } __packed;
256 
257 struct tb_drom_entry_port {
258 	/* BYTES 0-1 */
259 	struct tb_drom_entry_header header;
260 	/* BYTE 2 */
261 	u8 dual_link_port_rid:4;
262 	u8 link_nr:1;
263 	u8 unknown1:2;
264 	bool has_dual_link_port:1;
265 
266 	/* BYTE 3 */
267 	u8 dual_link_port_nr:6;
268 	u8 unknown2:2;
269 
270 	/* BYTES 4 - 5 TODO decode */
271 	u8 micro2:4;
272 	u8 micro1:4;
273 	u8 micro3;
274 
275 	/* BYTES 6-7, TODO: verify (find hardware that has these set) */
276 	u8 peer_port_rid:4;
277 	u8 unknown3:3;
278 	bool has_peer_port:1;
279 	u8 peer_port_nr:6;
280 	u8 unknown4:2;
281 } __packed;
282 
283 /* USB4 product descriptor */
284 struct tb_drom_entry_desc {
285 	struct tb_drom_entry_header header;
286 	u16 bcdUSBSpec;
287 	u16 idVendor;
288 	u16 idProduct;
289 	u16 bcdProductFWRevision;
290 	u32 TID;
291 	u8 productHWRevision;
292 };
293 
294 /**
295  * tb_drom_read_uid_only() - Read UID directly from DROM
296  * @sw: Router whose UID to read
297  * @uid: UID is placed here
298  *
299  * Does not use the cached copy in sw->drom. Used during resume to check switch
300  * identity.
301  */
302 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
303 {
304 	u8 data[9];
305 	u8 crc;
306 	int res;
307 
308 	/* read uid */
309 	res = tb_eeprom_read_n(sw, 0, data, 9);
310 	if (res)
311 		return res;
312 
313 	crc = tb_crc8(data + 1, 8);
314 	if (crc != data[0]) {
315 		tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
316 				data[0], crc);
317 		return -EIO;
318 	}
319 
320 	*uid = *(u64 *)(data+1);
321 	return 0;
322 }
323 
324 static int tb_drom_parse_entry_generic(struct tb_switch *sw,
325 		struct tb_drom_entry_header *header)
326 {
327 	const struct tb_drom_entry_generic *entry =
328 		(const struct tb_drom_entry_generic *)header;
329 
330 	switch (header->index) {
331 	case 1:
332 		/* Length includes 2 bytes header so remove it before copy */
333 		sw->vendor_name = kstrndup(entry->data,
334 			header->len - sizeof(*header), GFP_KERNEL);
335 		if (!sw->vendor_name)
336 			return -ENOMEM;
337 		break;
338 
339 	case 2:
340 		sw->device_name = kstrndup(entry->data,
341 			header->len - sizeof(*header), GFP_KERNEL);
342 		if (!sw->device_name)
343 			return -ENOMEM;
344 		break;
345 	case 9: {
346 		const struct tb_drom_entry_desc *desc =
347 			(const struct tb_drom_entry_desc *)entry;
348 
349 		if (!sw->vendor && !sw->device) {
350 			sw->vendor = desc->idVendor;
351 			sw->device = desc->idProduct;
352 		}
353 		break;
354 	}
355 	}
356 
357 	return 0;
358 }
359 
360 static int tb_drom_parse_entry_port(struct tb_switch *sw,
361 				    struct tb_drom_entry_header *header)
362 {
363 	struct tb_port *port;
364 	int res;
365 	enum tb_port_type type;
366 
367 	/*
368 	 * Some DROMs list more ports than the controller actually has
369 	 * so we skip those but allow the parser to continue.
370 	 */
371 	if (header->index > sw->config.max_port_number) {
372 		dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
373 		return 0;
374 	}
375 
376 	port = &sw->ports[header->index];
377 	port->disabled = header->port_disabled;
378 	if (port->disabled)
379 		return 0;
380 
381 	res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
382 	if (res)
383 		return res;
384 	type &= 0xffffff;
385 
386 	if (type == TB_TYPE_PORT) {
387 		struct tb_drom_entry_port *entry = (void *) header;
388 		if (header->len != sizeof(*entry)) {
389 			tb_sw_warn(sw,
390 				"port entry has size %#x (expected %#zx)\n",
391 				header->len, sizeof(struct tb_drom_entry_port));
392 			return -EIO;
393 		}
394 		port->link_nr = entry->link_nr;
395 		if (entry->has_dual_link_port)
396 			port->dual_link_port =
397 				&port->sw->ports[entry->dual_link_port_nr];
398 	}
399 	return 0;
400 }
401 
402 /*
403  * tb_drom_parse_entries - parse the linked list of drom entries
404  *
405  * Drom must have been copied to sw->drom.
406  */
407 static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
408 {
409 	struct tb_drom_header *header = (void *) sw->drom;
410 	u16 pos = header_size;
411 	u16 drom_size = header->data_len + TB_DROM_DATA_START;
412 	int res;
413 
414 	while (pos < drom_size) {
415 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
416 		if (pos + 1 == drom_size || pos + entry->len > drom_size
417 				|| !entry->len) {
418 			tb_sw_warn(sw, "DROM buffer overrun\n");
419 			return -EIO;
420 		}
421 
422 		switch (entry->type) {
423 		case TB_DROM_ENTRY_GENERIC:
424 			res = tb_drom_parse_entry_generic(sw, entry);
425 			break;
426 		case TB_DROM_ENTRY_PORT:
427 			res = tb_drom_parse_entry_port(sw, entry);
428 			break;
429 		}
430 		if (res)
431 			return res;
432 
433 		pos += entry->len;
434 	}
435 	return 0;
436 }
437 
438 /*
439  * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
440  */
441 static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
442 {
443 	struct device *dev = &sw->tb->nhi->pdev->dev;
444 	int len, res;
445 
446 	len = device_property_count_u8(dev, "ThunderboltDROM");
447 	if (len < 0 || len < sizeof(struct tb_drom_header))
448 		return -EINVAL;
449 
450 	sw->drom = kmalloc(len, GFP_KERNEL);
451 	if (!sw->drom)
452 		return -ENOMEM;
453 
454 	res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
455 									len);
456 	if (res)
457 		goto err;
458 
459 	*size = ((struct tb_drom_header *)sw->drom)->data_len +
460 							  TB_DROM_DATA_START;
461 	if (*size > len)
462 		goto err;
463 
464 	return 0;
465 
466 err:
467 	kfree(sw->drom);
468 	sw->drom = NULL;
469 	return -EINVAL;
470 }
471 
472 static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
473 {
474 	u16 drom_offset;
475 	int ret;
476 
477 	if (!sw->dma_port)
478 		return -ENODEV;
479 
480 	ret = tb_eeprom_get_drom_offset(sw, &drom_offset);
481 	if (ret)
482 		return ret;
483 
484 	if (!drom_offset)
485 		return -ENODEV;
486 
487 	ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
488 				  sizeof(*size));
489 	if (ret)
490 		return ret;
491 
492 	/* Size includes CRC8 + UID + CRC32 */
493 	*size += 1 + 8 + 4;
494 	sw->drom = kzalloc(*size, GFP_KERNEL);
495 	if (!sw->drom)
496 		return -ENOMEM;
497 
498 	ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
499 	if (ret)
500 		goto err_free;
501 
502 	/*
503 	 * Read UID from the minimal DROM because the one in NVM is just
504 	 * a placeholder.
505 	 */
506 	tb_drom_read_uid_only(sw, &sw->uid);
507 	return 0;
508 
509 err_free:
510 	kfree(sw->drom);
511 	sw->drom = NULL;
512 	return ret;
513 }
514 
515 static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
516 {
517 	int ret;
518 
519 	ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
520 	if (ret)
521 		return ret;
522 
523 	/* Size includes CRC8 + UID + CRC32 */
524 	*size += 1 + 8 + 4;
525 	sw->drom = kzalloc(*size, GFP_KERNEL);
526 	if (!sw->drom)
527 		return -ENOMEM;
528 
529 	ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
530 	if (ret) {
531 		kfree(sw->drom);
532 		sw->drom = NULL;
533 	}
534 
535 	return ret;
536 }
537 
538 static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size)
539 {
540 	int ret;
541 
542 	ret = tb_eeprom_read_n(sw, 14, (u8 *)size, 2);
543 	if (ret)
544 		return ret;
545 
546 	*size &= 0x3ff;
547 	*size += TB_DROM_DATA_START;
548 
549 	tb_sw_dbg(sw, "reading DROM (length: %#x)\n", *size);
550 	if (*size < sizeof(struct tb_drom_header)) {
551 		tb_sw_warn(sw, "DROM too small, aborting\n");
552 		return -EIO;
553 	}
554 
555 	sw->drom = kzalloc(*size, GFP_KERNEL);
556 	if (!sw->drom)
557 		return -ENOMEM;
558 
559 	ret = tb_eeprom_read_n(sw, 0, sw->drom, *size);
560 	if (ret)
561 		goto err;
562 
563 	return 0;
564 
565 err:
566 	kfree(sw->drom);
567 	sw->drom = NULL;
568 	return ret;
569 }
570 
571 static int tb_drom_parse_v1(struct tb_switch *sw)
572 {
573 	const struct tb_drom_header *header =
574 		(const struct tb_drom_header *)sw->drom;
575 	u32 crc;
576 
577 	crc = tb_crc8((u8 *) &header->uid, 8);
578 	if (crc != header->uid_crc8) {
579 		tb_sw_warn(sw,
580 			"DROM UID CRC8 mismatch (expected: %#x, got: %#x)\n",
581 			header->uid_crc8, crc);
582 		return -EIO;
583 	}
584 	if (!sw->uid)
585 		sw->uid = header->uid;
586 	sw->vendor = header->vendor_id;
587 	sw->device = header->model_id;
588 
589 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
590 	if (crc != header->data_crc32) {
591 		tb_sw_warn(sw,
592 			"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
593 			header->data_crc32, crc);
594 	}
595 
596 	return tb_drom_parse_entries(sw, TB_DROM_HEADER_SIZE);
597 }
598 
599 static int usb4_drom_parse(struct tb_switch *sw)
600 {
601 	const struct tb_drom_header *header =
602 		(const struct tb_drom_header *)sw->drom;
603 	u32 crc;
604 
605 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
606 	if (crc != header->data_crc32) {
607 		tb_sw_warn(sw,
608 			   "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
609 			   header->data_crc32, crc);
610 	}
611 
612 	return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
613 }
614 
615 static int tb_drom_parse(struct tb_switch *sw, u16 size)
616 {
617 	const struct tb_drom_header *header = (const void *)sw->drom;
618 	int ret;
619 
620 	if (header->data_len + TB_DROM_DATA_START != size) {
621 		tb_sw_warn(sw, "DROM size mismatch\n");
622 		ret = -EIO;
623 		goto err;
624 	}
625 
626 	tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision);
627 
628 	switch (header->device_rom_revision) {
629 	case 3:
630 		ret = usb4_drom_parse(sw);
631 		break;
632 	default:
633 		tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n",
634 			   header->device_rom_revision);
635 		fallthrough;
636 	case 1:
637 		ret = tb_drom_parse_v1(sw);
638 		break;
639 	}
640 
641 	if (ret) {
642 		tb_sw_warn(sw, "parsing DROM failed\n");
643 		goto err;
644 	}
645 
646 	return 0;
647 
648 err:
649 	kfree(sw->drom);
650 	sw->drom = NULL;
651 
652 	return ret;
653 }
654 
655 static int tb_drom_host_read(struct tb_switch *sw)
656 {
657 	u16 size;
658 
659 	if (tb_switch_is_usb4(sw)) {
660 		usb4_switch_read_uid(sw, &sw->uid);
661 		if (!usb4_copy_drom(sw, &size))
662 			return tb_drom_parse(sw, size);
663 	} else {
664 		if (!tb_drom_copy_efi(sw, &size))
665 			return tb_drom_parse(sw, size);
666 
667 		if (!tb_drom_copy_nvm(sw, &size))
668 			return tb_drom_parse(sw, size);
669 
670 		tb_drom_read_uid_only(sw, &sw->uid);
671 	}
672 
673 	return 0;
674 }
675 
676 static int tb_drom_device_read(struct tb_switch *sw)
677 {
678 	u16 size;
679 	int ret;
680 
681 	if (tb_switch_is_usb4(sw)) {
682 		usb4_switch_read_uid(sw, &sw->uid);
683 		ret = usb4_copy_drom(sw, &size);
684 	} else {
685 		ret = tb_drom_bit_bang(sw, &size);
686 	}
687 
688 	if (ret)
689 		return ret;
690 
691 	return tb_drom_parse(sw, size);
692 }
693 
694 /**
695  * tb_drom_read() - Copy DROM to sw->drom and parse it
696  * @sw: Router whose DROM to read and parse
697  *
698  * This function reads router DROM and if successful parses the entries and
699  * populates the fields in @sw accordingly. Can be called for any router
700  * generation.
701  *
702  * Returns %0 in case of success and negative errno otherwise.
703  */
704 int tb_drom_read(struct tb_switch *sw)
705 {
706 	if (sw->drom)
707 		return 0;
708 
709 	if (!tb_route(sw))
710 		return tb_drom_host_read(sw);
711 	return tb_drom_device_read(sw);
712 }
713