xref: /openbmc/linux/drivers/thunderbolt/eeprom.c (revision 27ab1c1c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - eeprom access
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/property.h>
12 #include <linux/slab.h>
13 #include "tb.h"
14 
15 /**
16  * tb_eeprom_ctl_write() - write control word
17  */
18 static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
19 {
20 	return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
21 }
22 
23 /**
24  * tb_eeprom_ctl_write() - read control word
25  */
26 static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
27 {
28 	return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
29 }
30 
31 enum tb_eeprom_transfer {
32 	TB_EEPROM_IN,
33 	TB_EEPROM_OUT,
34 };
35 
36 /**
37  * tb_eeprom_active - enable rom access
38  *
39  * WARNING: Always disable access after usage. Otherwise the controller will
40  * fail to reprobe.
41  */
42 static int tb_eeprom_active(struct tb_switch *sw, bool enable)
43 {
44 	struct tb_eeprom_ctl ctl;
45 	int res = tb_eeprom_ctl_read(sw, &ctl);
46 	if (res)
47 		return res;
48 	if (enable) {
49 		ctl.access_high = 1;
50 		res = tb_eeprom_ctl_write(sw, &ctl);
51 		if (res)
52 			return res;
53 		ctl.access_low = 0;
54 		return tb_eeprom_ctl_write(sw, &ctl);
55 	} else {
56 		ctl.access_low = 1;
57 		res = tb_eeprom_ctl_write(sw, &ctl);
58 		if (res)
59 			return res;
60 		ctl.access_high = 0;
61 		return tb_eeprom_ctl_write(sw, &ctl);
62 	}
63 }
64 
65 /**
66  * tb_eeprom_transfer - transfer one bit
67  *
68  * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
69  * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
70  */
71 static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
72 			      enum tb_eeprom_transfer direction)
73 {
74 	int res;
75 	if (direction == TB_EEPROM_OUT) {
76 		res = tb_eeprom_ctl_write(sw, ctl);
77 		if (res)
78 			return res;
79 	}
80 	ctl->clock = 1;
81 	res = tb_eeprom_ctl_write(sw, ctl);
82 	if (res)
83 		return res;
84 	if (direction == TB_EEPROM_IN) {
85 		res = tb_eeprom_ctl_read(sw, ctl);
86 		if (res)
87 			return res;
88 	}
89 	ctl->clock = 0;
90 	return tb_eeprom_ctl_write(sw, ctl);
91 }
92 
93 /**
94  * tb_eeprom_out - write one byte to the bus
95  */
96 static int tb_eeprom_out(struct tb_switch *sw, u8 val)
97 {
98 	struct tb_eeprom_ctl ctl;
99 	int i;
100 	int res = tb_eeprom_ctl_read(sw, &ctl);
101 	if (res)
102 		return res;
103 	for (i = 0; i < 8; i++) {
104 		ctl.data_out = val & 0x80;
105 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
106 		if (res)
107 			return res;
108 		val <<= 1;
109 	}
110 	return 0;
111 }
112 
113 /**
114  * tb_eeprom_in - read one byte from the bus
115  */
116 static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
117 {
118 	struct tb_eeprom_ctl ctl;
119 	int i;
120 	int res = tb_eeprom_ctl_read(sw, &ctl);
121 	if (res)
122 		return res;
123 	*val = 0;
124 	for (i = 0; i < 8; i++) {
125 		*val <<= 1;
126 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
127 		if (res)
128 			return res;
129 		*val |= ctl.data_in;
130 	}
131 	return 0;
132 }
133 
134 /**
135  * tb_eeprom_get_drom_offset - get drom offset within eeprom
136  */
137 static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
138 {
139 	struct tb_cap_plug_events cap;
140 	int res;
141 
142 	if (!sw->cap_plug_events) {
143 		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
144 		return -ENODEV;
145 	}
146 	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
147 			     sizeof(cap) / 4);
148 	if (res)
149 		return res;
150 
151 	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
152 		tb_sw_warn(sw, "no NVM\n");
153 		return -ENODEV;
154 	}
155 
156 	if (cap.drom_offset > 0xffff) {
157 		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
158 				cap.drom_offset);
159 		return -ENXIO;
160 	}
161 	*offset = cap.drom_offset;
162 	return 0;
163 }
164 
165 /**
166  * tb_eeprom_read_n - read count bytes from offset into val
167  */
168 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
169 		size_t count)
170 {
171 	u16 drom_offset;
172 	int i, res;
173 
174 	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
175 	if (res)
176 		return res;
177 
178 	offset += drom_offset;
179 
180 	res = tb_eeprom_active(sw, true);
181 	if (res)
182 		return res;
183 	res = tb_eeprom_out(sw, 3);
184 	if (res)
185 		return res;
186 	res = tb_eeprom_out(sw, offset >> 8);
187 	if (res)
188 		return res;
189 	res = tb_eeprom_out(sw, offset);
190 	if (res)
191 		return res;
192 	for (i = 0; i < count; i++) {
193 		res = tb_eeprom_in(sw, val + i);
194 		if (res)
195 			return res;
196 	}
197 	return tb_eeprom_active(sw, false);
198 }
199 
200 static u8 tb_crc8(u8 *data, int len)
201 {
202 	int i, j;
203 	u8 val = 0xff;
204 	for (i = 0; i < len; i++) {
205 		val ^= data[i];
206 		for (j = 0; j < 8; j++)
207 			val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
208 	}
209 	return val;
210 }
211 
212 static u32 tb_crc32(void *data, size_t len)
213 {
214 	return ~__crc32c_le(~0, data, len);
215 }
216 
217 #define TB_DROM_DATA_START 13
218 struct tb_drom_header {
219 	/* BYTE 0 */
220 	u8 uid_crc8; /* checksum for uid */
221 	/* BYTES 1-8 */
222 	u64 uid;
223 	/* BYTES 9-12 */
224 	u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
225 	/* BYTE 13 */
226 	u8 device_rom_revision; /* should be <= 1 */
227 	u16 data_len:10;
228 	u8 __unknown1:6;
229 	/* BYTES 16-21 */
230 	u16 vendor_id;
231 	u16 model_id;
232 	u8 model_rev;
233 	u8 eeprom_rev;
234 } __packed;
235 
236 enum tb_drom_entry_type {
237 	/* force unsigned to prevent "one-bit signed bitfield" warning */
238 	TB_DROM_ENTRY_GENERIC = 0U,
239 	TB_DROM_ENTRY_PORT,
240 };
241 
242 struct tb_drom_entry_header {
243 	u8 len;
244 	u8 index:6;
245 	bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
246 	enum tb_drom_entry_type type:1;
247 } __packed;
248 
249 struct tb_drom_entry_generic {
250 	struct tb_drom_entry_header header;
251 	u8 data[];
252 } __packed;
253 
254 struct tb_drom_entry_port {
255 	/* BYTES 0-1 */
256 	struct tb_drom_entry_header header;
257 	/* BYTE 2 */
258 	u8 dual_link_port_rid:4;
259 	u8 link_nr:1;
260 	u8 unknown1:2;
261 	bool has_dual_link_port:1;
262 
263 	/* BYTE 3 */
264 	u8 dual_link_port_nr:6;
265 	u8 unknown2:2;
266 
267 	/* BYTES 4 - 5 TODO decode */
268 	u8 micro2:4;
269 	u8 micro1:4;
270 	u8 micro3;
271 
272 	/* BYTES 6-7, TODO: verify (find hardware that has these set) */
273 	u8 peer_port_rid:4;
274 	u8 unknown3:3;
275 	bool has_peer_port:1;
276 	u8 peer_port_nr:6;
277 	u8 unknown4:2;
278 } __packed;
279 
280 
281 /**
282  * tb_drom_read_uid_only - read uid directly from drom
283  *
284  * Does not use the cached copy in sw->drom. Used during resume to check switch
285  * identity.
286  */
287 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
288 {
289 	u8 data[9];
290 	u8 crc;
291 	int res;
292 
293 	/* read uid */
294 	res = tb_eeprom_read_n(sw, 0, data, 9);
295 	if (res)
296 		return res;
297 
298 	crc = tb_crc8(data + 1, 8);
299 	if (crc != data[0]) {
300 		tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
301 				data[0], crc);
302 		return -EIO;
303 	}
304 
305 	*uid = *(u64 *)(data+1);
306 	return 0;
307 }
308 
309 static int tb_drom_parse_entry_generic(struct tb_switch *sw,
310 		struct tb_drom_entry_header *header)
311 {
312 	const struct tb_drom_entry_generic *entry =
313 		(const struct tb_drom_entry_generic *)header;
314 
315 	switch (header->index) {
316 	case 1:
317 		/* Length includes 2 bytes header so remove it before copy */
318 		sw->vendor_name = kstrndup(entry->data,
319 			header->len - sizeof(*header), GFP_KERNEL);
320 		if (!sw->vendor_name)
321 			return -ENOMEM;
322 		break;
323 
324 	case 2:
325 		sw->device_name = kstrndup(entry->data,
326 			header->len - sizeof(*header), GFP_KERNEL);
327 		if (!sw->device_name)
328 			return -ENOMEM;
329 		break;
330 	}
331 
332 	return 0;
333 }
334 
335 static int tb_drom_parse_entry_port(struct tb_switch *sw,
336 				    struct tb_drom_entry_header *header)
337 {
338 	struct tb_port *port;
339 	int res;
340 	enum tb_port_type type;
341 
342 	/*
343 	 * Some DROMs list more ports than the controller actually has
344 	 * so we skip those but allow the parser to continue.
345 	 */
346 	if (header->index > sw->config.max_port_number) {
347 		dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
348 		return 0;
349 	}
350 
351 	port = &sw->ports[header->index];
352 	port->disabled = header->port_disabled;
353 	if (port->disabled)
354 		return 0;
355 
356 	res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
357 	if (res)
358 		return res;
359 	type &= 0xffffff;
360 
361 	if (type == TB_TYPE_PORT) {
362 		struct tb_drom_entry_port *entry = (void *) header;
363 		if (header->len != sizeof(*entry)) {
364 			tb_sw_warn(sw,
365 				"port entry has size %#x (expected %#zx)\n",
366 				header->len, sizeof(struct tb_drom_entry_port));
367 			return -EIO;
368 		}
369 		port->link_nr = entry->link_nr;
370 		if (entry->has_dual_link_port)
371 			port->dual_link_port =
372 				&port->sw->ports[entry->dual_link_port_nr];
373 	}
374 	return 0;
375 }
376 
377 /**
378  * tb_drom_parse_entries - parse the linked list of drom entries
379  *
380  * Drom must have been copied to sw->drom.
381  */
382 static int tb_drom_parse_entries(struct tb_switch *sw)
383 {
384 	struct tb_drom_header *header = (void *) sw->drom;
385 	u16 pos = sizeof(*header);
386 	u16 drom_size = header->data_len + TB_DROM_DATA_START;
387 	int res;
388 
389 	while (pos < drom_size) {
390 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
391 		if (pos + 1 == drom_size || pos + entry->len > drom_size
392 				|| !entry->len) {
393 			tb_sw_warn(sw, "DROM buffer overrun\n");
394 			return -EILSEQ;
395 		}
396 
397 		switch (entry->type) {
398 		case TB_DROM_ENTRY_GENERIC:
399 			res = tb_drom_parse_entry_generic(sw, entry);
400 			break;
401 		case TB_DROM_ENTRY_PORT:
402 			res = tb_drom_parse_entry_port(sw, entry);
403 			break;
404 		}
405 		if (res)
406 			return res;
407 
408 		pos += entry->len;
409 	}
410 	return 0;
411 }
412 
413 /**
414  * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
415  */
416 static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
417 {
418 	struct device *dev = &sw->tb->nhi->pdev->dev;
419 	int len, res;
420 
421 	len = device_property_count_u8(dev, "ThunderboltDROM");
422 	if (len < 0 || len < sizeof(struct tb_drom_header))
423 		return -EINVAL;
424 
425 	sw->drom = kmalloc(len, GFP_KERNEL);
426 	if (!sw->drom)
427 		return -ENOMEM;
428 
429 	res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
430 									len);
431 	if (res)
432 		goto err;
433 
434 	*size = ((struct tb_drom_header *)sw->drom)->data_len +
435 							  TB_DROM_DATA_START;
436 	if (*size > len)
437 		goto err;
438 
439 	return 0;
440 
441 err:
442 	kfree(sw->drom);
443 	sw->drom = NULL;
444 	return -EINVAL;
445 }
446 
447 static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
448 {
449 	u32 drom_offset;
450 	int ret;
451 
452 	if (!sw->dma_port)
453 		return -ENODEV;
454 
455 	ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
456 			 sw->cap_plug_events + 12, 1);
457 	if (ret)
458 		return ret;
459 
460 	if (!drom_offset)
461 		return -ENODEV;
462 
463 	ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
464 				  sizeof(*size));
465 	if (ret)
466 		return ret;
467 
468 	/* Size includes CRC8 + UID + CRC32 */
469 	*size += 1 + 8 + 4;
470 	sw->drom = kzalloc(*size, GFP_KERNEL);
471 	if (!sw->drom)
472 		return -ENOMEM;
473 
474 	ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
475 	if (ret)
476 		goto err_free;
477 
478 	/*
479 	 * Read UID from the minimal DROM because the one in NVM is just
480 	 * a placeholder.
481 	 */
482 	tb_drom_read_uid_only(sw, &sw->uid);
483 	return 0;
484 
485 err_free:
486 	kfree(sw->drom);
487 	sw->drom = NULL;
488 	return ret;
489 }
490 
491 static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
492 {
493 	int ret;
494 
495 	ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
496 	if (ret)
497 		return ret;
498 
499 	/* Size includes CRC8 + UID + CRC32 */
500 	*size += 1 + 8 + 4;
501 	sw->drom = kzalloc(*size, GFP_KERNEL);
502 	if (!sw->drom)
503 		return -ENOMEM;
504 
505 	ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
506 	if (ret) {
507 		kfree(sw->drom);
508 		sw->drom = NULL;
509 	}
510 
511 	return ret;
512 }
513 
514 static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
515 			  size_t count)
516 {
517 	if (tb_switch_is_usb4(sw))
518 		return usb4_switch_drom_read(sw, offset, val, count);
519 	return tb_eeprom_read_n(sw, offset, val, count);
520 }
521 
522 /**
523  * tb_drom_read - copy drom to sw->drom and parse it
524  */
525 int tb_drom_read(struct tb_switch *sw)
526 {
527 	u16 size;
528 	u32 crc;
529 	struct tb_drom_header *header;
530 	int res, retries = 1;
531 
532 	if (sw->drom)
533 		return 0;
534 
535 	if (tb_route(sw) == 0) {
536 		/*
537 		 * Apple's NHI EFI driver supplies a DROM for the root switch
538 		 * in a device property. Use it if available.
539 		 */
540 		if (tb_drom_copy_efi(sw, &size) == 0)
541 			goto parse;
542 
543 		/* Non-Apple hardware has the DROM as part of NVM */
544 		if (tb_drom_copy_nvm(sw, &size) == 0)
545 			goto parse;
546 
547 		/*
548 		 * USB4 hosts may support reading DROM through router
549 		 * operations.
550 		 */
551 		if (tb_switch_is_usb4(sw)) {
552 			usb4_switch_read_uid(sw, &sw->uid);
553 			if (!usb4_copy_host_drom(sw, &size))
554 				goto parse;
555 		} else {
556 			/*
557 			 * The root switch contains only a dummy drom
558 			 * (header only, no entries). Hardcode the
559 			 * configuration here.
560 			 */
561 			tb_drom_read_uid_only(sw, &sw->uid);
562 		}
563 
564 		return 0;
565 	}
566 
567 	res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
568 	if (res)
569 		return res;
570 	size &= 0x3ff;
571 	size += TB_DROM_DATA_START;
572 	tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
573 	if (size < sizeof(*header)) {
574 		tb_sw_warn(sw, "drom too small, aborting\n");
575 		return -EIO;
576 	}
577 
578 	sw->drom = kzalloc(size, GFP_KERNEL);
579 	if (!sw->drom)
580 		return -ENOMEM;
581 	res = tb_drom_read_n(sw, 0, sw->drom, size);
582 	if (res)
583 		goto err;
584 
585 parse:
586 	header = (void *) sw->drom;
587 
588 	if (header->data_len + TB_DROM_DATA_START != size) {
589 		tb_sw_warn(sw, "drom size mismatch, aborting\n");
590 		goto err;
591 	}
592 
593 	crc = tb_crc8((u8 *) &header->uid, 8);
594 	if (crc != header->uid_crc8) {
595 		tb_sw_warn(sw,
596 			"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
597 			header->uid_crc8, crc);
598 		goto err;
599 	}
600 	if (!sw->uid)
601 		sw->uid = header->uid;
602 	sw->vendor = header->vendor_id;
603 	sw->device = header->model_id;
604 	tb_check_quirks(sw);
605 
606 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
607 	if (crc != header->data_crc32) {
608 		tb_sw_warn(sw,
609 			"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
610 			header->data_crc32, crc);
611 	}
612 
613 	if (header->device_rom_revision > 2)
614 		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
615 			header->device_rom_revision);
616 
617 	res = tb_drom_parse_entries(sw);
618 	/* If the DROM parsing fails, wait a moment and retry once */
619 	if (res == -EILSEQ && retries--) {
620 		tb_sw_warn(sw, "parsing DROM failed, retrying\n");
621 		msleep(100);
622 		res = tb_drom_read_n(sw, 0, sw->drom, size);
623 		if (!res)
624 			goto parse;
625 	}
626 
627 	return res;
628 err:
629 	kfree(sw->drom);
630 	sw->drom = NULL;
631 	return -EIO;
632 
633 }
634