1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/firmware.h>
49 #include <linux/mutex.h>
50 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/crc32.h>
53 
54 #include "hfi.h"
55 #include "trace.h"
56 
57 /*
58  * Make it easy to toggle firmware file name and if it gets loaded by
59  * editing the following. This may be something we do while in development
60  * but not necessarily something a user would ever need to use.
61  */
62 #define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin"
63 #define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw"
64 #define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
65 #define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
66 #define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
67 #define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
68 #define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
69 #define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
70 #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
71 #define HOST_INTERFACE_VERSION 1
72 
73 MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
74 MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
75 MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
76 MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
77 
78 static uint fw_8051_load = 1;
79 static uint fw_fabric_serdes_load = 1;
80 static uint fw_pcie_serdes_load = 1;
81 static uint fw_sbus_load = 1;
82 
83 /* Firmware file names get set in hfi1_firmware_init() based on the above */
84 static char *fw_8051_name;
85 static char *fw_fabric_serdes_name;
86 static char *fw_sbus_name;
87 static char *fw_pcie_serdes_name;
88 
89 #define SBUS_MAX_POLL_COUNT 100
90 #define SBUS_COUNTER(reg, name) \
91 	(((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
92 	 ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
93 
94 /*
95  * Firmware security header.
96  */
97 struct css_header {
98 	u32 module_type;
99 	u32 header_len;
100 	u32 header_version;
101 	u32 module_id;
102 	u32 module_vendor;
103 	u32 date;		/* BCD yyyymmdd */
104 	u32 size;		/* in DWORDs */
105 	u32 key_size;		/* in DWORDs */
106 	u32 modulus_size;	/* in DWORDs */
107 	u32 exponent_size;	/* in DWORDs */
108 	u32 reserved[22];
109 };
110 
111 /* expected field values */
112 #define CSS_MODULE_TYPE	   0x00000006
113 #define CSS_HEADER_LEN	   0x000000a1
114 #define CSS_HEADER_VERSION 0x00010000
115 #define CSS_MODULE_VENDOR  0x00008086
116 
117 #define KEY_SIZE      256
118 #define MU_SIZE		8
119 #define EXPONENT_SIZE	4
120 
121 /* size of platform configuration partition */
122 #define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
123 
124 /* size of file of plaform configuration encoded in format version 4 */
125 #define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
126 
127 /* the file itself */
128 struct firmware_file {
129 	struct css_header css_header;
130 	u8 modulus[KEY_SIZE];
131 	u8 exponent[EXPONENT_SIZE];
132 	u8 signature[KEY_SIZE];
133 	u8 firmware[];
134 };
135 
136 struct augmented_firmware_file {
137 	struct css_header css_header;
138 	u8 modulus[KEY_SIZE];
139 	u8 exponent[EXPONENT_SIZE];
140 	u8 signature[KEY_SIZE];
141 	u8 r2[KEY_SIZE];
142 	u8 mu[MU_SIZE];
143 	u8 firmware[];
144 };
145 
146 /* augmented file size difference */
147 #define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
148 						sizeof(struct firmware_file))
149 
150 struct firmware_details {
151 	/* Linux core piece */
152 	const struct firmware *fw;
153 
154 	struct css_header *css_header;
155 	u8 *firmware_ptr;		/* pointer to binary data */
156 	u32 firmware_len;		/* length in bytes */
157 	u8 *modulus;			/* pointer to the modulus */
158 	u8 *exponent;			/* pointer to the exponent */
159 	u8 *signature;			/* pointer to the signature */
160 	u8 *r2;				/* pointer to r2 */
161 	u8 *mu;				/* pointer to mu */
162 	struct augmented_firmware_file dummy_header;
163 };
164 
165 /*
166  * The mutex protects fw_state, fw_err, and all of the firmware_details
167  * variables.
168  */
169 static DEFINE_MUTEX(fw_mutex);
170 enum fw_state {
171 	FW_EMPTY,
172 	FW_TRY,
173 	FW_FINAL,
174 	FW_ERR
175 };
176 
177 static enum fw_state fw_state = FW_EMPTY;
178 static int fw_err;
179 static struct firmware_details fw_8051;
180 static struct firmware_details fw_fabric;
181 static struct firmware_details fw_pcie;
182 static struct firmware_details fw_sbus;
183 
184 /* flags for turn_off_spicos() */
185 #define SPICO_SBUS   0x1
186 #define SPICO_FABRIC 0x2
187 #define ENABLE_SPICO_SMASK 0x1
188 
189 /* security block commands */
190 #define RSA_CMD_INIT  0x1
191 #define RSA_CMD_START 0x2
192 
193 /* security block status */
194 #define RSA_STATUS_IDLE   0x0
195 #define RSA_STATUS_ACTIVE 0x1
196 #define RSA_STATUS_DONE   0x2
197 #define RSA_STATUS_FAILED 0x3
198 
199 /* RSA engine timeout, in ms */
200 #define RSA_ENGINE_TIMEOUT 100 /* ms */
201 
202 /* hardware mutex timeout, in ms */
203 #define HM_TIMEOUT 10 /* ms */
204 
205 /* 8051 memory access timeout, in us */
206 #define DC8051_ACCESS_TIMEOUT 100 /* us */
207 
208 /* the number of fabric SerDes on the SBus */
209 #define NUM_FABRIC_SERDES 4
210 
211 /* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
212 #define SBUS_READ_COMPLETE 0x4
213 
214 /* SBus fabric SerDes addresses, one set per HFI */
215 static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
216 	{ 0x01, 0x02, 0x03, 0x04 },
217 	{ 0x28, 0x29, 0x2a, 0x2b }
218 };
219 
220 /* SBus PCIe SerDes addresses, one set per HFI */
221 static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
222 	{ 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
223 	  0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
224 	{ 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
225 	  0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
226 };
227 
228 /* SBus PCIe PCS addresses, one set per HFI */
229 const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
230 	{ 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
231 	  0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
232 	{ 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
233 	  0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
234 };
235 
236 /* SBus fabric SerDes broadcast addresses, one per HFI */
237 static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
238 static const u8 all_fabric_serdes_broadcast = 0xe1;
239 
240 /* SBus PCIe SerDes broadcast addresses, one per HFI */
241 const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
242 static const u8 all_pcie_serdes_broadcast = 0xe0;
243 
244 static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
245 	0,
246 	SYSTEM_TABLE_MAX,
247 	PORT_TABLE_MAX,
248 	RX_PRESET_TABLE_MAX,
249 	TX_PRESET_TABLE_MAX,
250 	QSFP_ATTEN_TABLE_MAX,
251 	VARIABLE_SETTINGS_TABLE_MAX
252 };
253 
254 /* forwards */
255 static void dispose_one_firmware(struct firmware_details *fdet);
256 static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
257 				       struct firmware_details *fdet);
258 static void dump_fw_version(struct hfi1_devdata *dd);
259 
260 /*
261  * Read a single 64-bit value from 8051 data memory.
262  *
263  * Expects:
264  * o caller to have already set up data read, no auto increment
265  * o caller to turn off read enable when finished
266  *
267  * The address argument is a byte offset.  Bits 0:2 in the address are
268  * ignored - i.e. the hardware will always do aligned 8-byte reads as if
269  * the lower bits are zero.
270  *
271  * Return 0 on success, -ENXIO on a read error (timeout).
272  */
273 static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
274 {
275 	u64 reg;
276 	int count;
277 
278 	/* step 1: set the address, clear enable */
279 	reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
280 			<< DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
281 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
282 	/* step 2: enable */
283 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
284 		  reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
285 
286 	/* wait until ACCESS_COMPLETED is set */
287 	count = 0;
288 	while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
289 		    & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
290 		    == 0) {
291 		count++;
292 		if (count > DC8051_ACCESS_TIMEOUT) {
293 			dd_dev_err(dd, "timeout reading 8051 data\n");
294 			return -ENXIO;
295 		}
296 		ndelay(10);
297 	}
298 
299 	/* gather the data */
300 	*result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
301 
302 	return 0;
303 }
304 
305 /*
306  * Read 8051 data starting at addr, for len bytes.  Will read in 8-byte chunks.
307  * Return 0 on success, -errno on error.
308  */
309 int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
310 {
311 	unsigned long flags;
312 	u32 done;
313 	int ret = 0;
314 
315 	spin_lock_irqsave(&dd->dc8051_memlock, flags);
316 
317 	/* data read set-up, no auto-increment */
318 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
319 
320 	for (done = 0; done < len; addr += 8, done += 8, result++) {
321 		ret = __read_8051_data(dd, addr, result);
322 		if (ret)
323 			break;
324 	}
325 
326 	/* turn off read enable */
327 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
328 
329 	spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
330 
331 	return ret;
332 }
333 
334 /*
335  * Write data or code to the 8051 code or data RAM.
336  */
337 static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
338 		      const u8 *data, u32 len)
339 {
340 	u64 reg;
341 	u32 offset;
342 	int aligned, count;
343 
344 	/* check alignment */
345 	aligned = ((unsigned long)data & 0x7) == 0;
346 
347 	/* write set-up */
348 	reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
349 		| DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
350 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
351 
352 	reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
353 			<< DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
354 		| DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
355 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
356 
357 	/* write */
358 	for (offset = 0; offset < len; offset += 8) {
359 		int bytes = len - offset;
360 
361 		if (bytes < 8) {
362 			reg = 0;
363 			memcpy(&reg, &data[offset], bytes);
364 		} else if (aligned) {
365 			reg = *(u64 *)&data[offset];
366 		} else {
367 			memcpy(&reg, &data[offset], 8);
368 		}
369 		write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
370 
371 		/* wait until ACCESS_COMPLETED is set */
372 		count = 0;
373 		while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
374 		    & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
375 		    == 0) {
376 			count++;
377 			if (count > DC8051_ACCESS_TIMEOUT) {
378 				dd_dev_err(dd, "timeout writing 8051 data\n");
379 				return -ENXIO;
380 			}
381 			udelay(1);
382 		}
383 	}
384 
385 	/* turn off write access, auto increment (also sets to data access) */
386 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
387 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
388 
389 	return 0;
390 }
391 
392 /* return 0 if values match, non-zero and complain otherwise */
393 static int invalid_header(struct hfi1_devdata *dd, const char *what,
394 			  u32 actual, u32 expected)
395 {
396 	if (actual == expected)
397 		return 0;
398 
399 	dd_dev_err(dd,
400 		   "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
401 		   what, expected, actual);
402 	return 1;
403 }
404 
405 /*
406  * Verify that the static fields in the CSS header match.
407  */
408 static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
409 {
410 	/* verify CSS header fields (most sizes are in DW, so add /4) */
411 	if (invalid_header(dd, "module_type", css->module_type,
412 			   CSS_MODULE_TYPE) ||
413 	    invalid_header(dd, "header_len", css->header_len,
414 			   (sizeof(struct firmware_file) / 4)) ||
415 	    invalid_header(dd, "header_version", css->header_version,
416 			   CSS_HEADER_VERSION) ||
417 	    invalid_header(dd, "module_vendor", css->module_vendor,
418 			   CSS_MODULE_VENDOR) ||
419 	    invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
420 	    invalid_header(dd, "modulus_size", css->modulus_size,
421 			   KEY_SIZE / 4) ||
422 	    invalid_header(dd, "exponent_size", css->exponent_size,
423 			   EXPONENT_SIZE / 4)) {
424 		return -EINVAL;
425 	}
426 	return 0;
427 }
428 
429 /*
430  * Make sure there are at least some bytes after the prefix.
431  */
432 static int payload_check(struct hfi1_devdata *dd, const char *name,
433 			 long file_size, long prefix_size)
434 {
435 	/* make sure we have some payload */
436 	if (prefix_size >= file_size) {
437 		dd_dev_err(dd,
438 			   "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
439 			   name, file_size, prefix_size);
440 		return -EINVAL;
441 	}
442 
443 	return 0;
444 }
445 
446 /*
447  * Request the firmware from the system.  Extract the pieces and fill in
448  * fdet.  If successful, the caller will need to call dispose_one_firmware().
449  * Returns 0 on success, -ERRNO on error.
450  */
451 static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
452 			       struct firmware_details *fdet)
453 {
454 	struct css_header *css;
455 	int ret;
456 
457 	memset(fdet, 0, sizeof(*fdet));
458 
459 	ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
460 	if (ret) {
461 		dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
462 			    name, ret);
463 		return ret;
464 	}
465 
466 	/* verify the firmware */
467 	if (fdet->fw->size < sizeof(struct css_header)) {
468 		dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
469 		ret = -EINVAL;
470 		goto done;
471 	}
472 	css = (struct css_header *)fdet->fw->data;
473 
474 	hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
475 	hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
476 	hfi1_cdbg(FIRMWARE, "CSS structure:");
477 	hfi1_cdbg(FIRMWARE, "  module_type    0x%x", css->module_type);
478 	hfi1_cdbg(FIRMWARE, "  header_len     0x%03x (0x%03x bytes)",
479 		  css->header_len, 4 * css->header_len);
480 	hfi1_cdbg(FIRMWARE, "  header_version 0x%x", css->header_version);
481 	hfi1_cdbg(FIRMWARE, "  module_id      0x%x", css->module_id);
482 	hfi1_cdbg(FIRMWARE, "  module_vendor  0x%x", css->module_vendor);
483 	hfi1_cdbg(FIRMWARE, "  date           0x%x", css->date);
484 	hfi1_cdbg(FIRMWARE, "  size           0x%03x (0x%03x bytes)",
485 		  css->size, 4 * css->size);
486 	hfi1_cdbg(FIRMWARE, "  key_size       0x%03x (0x%03x bytes)",
487 		  css->key_size, 4 * css->key_size);
488 	hfi1_cdbg(FIRMWARE, "  modulus_size   0x%03x (0x%03x bytes)",
489 		  css->modulus_size, 4 * css->modulus_size);
490 	hfi1_cdbg(FIRMWARE, "  exponent_size  0x%03x (0x%03x bytes)",
491 		  css->exponent_size, 4 * css->exponent_size);
492 	hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
493 		  fdet->fw->size - sizeof(struct firmware_file));
494 
495 	/*
496 	 * If the file does not have a valid CSS header, fail.
497 	 * Otherwise, check the CSS size field for an expected size.
498 	 * The augmented file has r2 and mu inserted after the header
499 	 * was generated, so there will be a known difference between
500 	 * the CSS header size and the actual file size.  Use this
501 	 * difference to identify an augmented file.
502 	 *
503 	 * Note: css->size is in DWORDs, multiply by 4 to get bytes.
504 	 */
505 	ret = verify_css_header(dd, css);
506 	if (ret) {
507 		dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
508 	} else if ((css->size * 4) == fdet->fw->size) {
509 		/* non-augmented firmware file */
510 		struct firmware_file *ff = (struct firmware_file *)
511 							fdet->fw->data;
512 
513 		/* make sure there are bytes in the payload */
514 		ret = payload_check(dd, name, fdet->fw->size,
515 				    sizeof(struct firmware_file));
516 		if (ret == 0) {
517 			fdet->css_header = css;
518 			fdet->modulus = ff->modulus;
519 			fdet->exponent = ff->exponent;
520 			fdet->signature = ff->signature;
521 			fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
522 			fdet->mu = fdet->dummy_header.mu; /* use dummy space */
523 			fdet->firmware_ptr = ff->firmware;
524 			fdet->firmware_len = fdet->fw->size -
525 						sizeof(struct firmware_file);
526 			/*
527 			 * Header does not include r2 and mu - generate here.
528 			 * For now, fail.
529 			 */
530 			dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
531 			ret = -EINVAL;
532 		}
533 	} else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
534 		/* augmented firmware file */
535 		struct augmented_firmware_file *aff =
536 			(struct augmented_firmware_file *)fdet->fw->data;
537 
538 		/* make sure there are bytes in the payload */
539 		ret = payload_check(dd, name, fdet->fw->size,
540 				    sizeof(struct augmented_firmware_file));
541 		if (ret == 0) {
542 			fdet->css_header = css;
543 			fdet->modulus = aff->modulus;
544 			fdet->exponent = aff->exponent;
545 			fdet->signature = aff->signature;
546 			fdet->r2 = aff->r2;
547 			fdet->mu = aff->mu;
548 			fdet->firmware_ptr = aff->firmware;
549 			fdet->firmware_len = fdet->fw->size -
550 					sizeof(struct augmented_firmware_file);
551 		}
552 	} else {
553 		/* css->size check failed */
554 		dd_dev_err(dd,
555 			   "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
556 			   fdet->fw->size / 4,
557 			   (fdet->fw->size - AUGMENT_SIZE) / 4,
558 			   css->size);
559 
560 		ret = -EINVAL;
561 	}
562 
563 done:
564 	/* if returning an error, clean up after ourselves */
565 	if (ret)
566 		dispose_one_firmware(fdet);
567 	return ret;
568 }
569 
570 static void dispose_one_firmware(struct firmware_details *fdet)
571 {
572 	release_firmware(fdet->fw);
573 	/* erase all previous information */
574 	memset(fdet, 0, sizeof(*fdet));
575 }
576 
577 /*
578  * Obtain the 4 firmwares from the OS.  All must be obtained at once or not
579  * at all.  If called with the firmware state in FW_TRY, use alternate names.
580  * On exit, this routine will have set the firmware state to one of FW_TRY,
581  * FW_FINAL, or FW_ERR.
582  *
583  * Must be holding fw_mutex.
584  */
585 static void __obtain_firmware(struct hfi1_devdata *dd)
586 {
587 	int err = 0;
588 
589 	if (fw_state == FW_FINAL)	/* nothing more to obtain */
590 		return;
591 	if (fw_state == FW_ERR)		/* already in error */
592 		return;
593 
594 	/* fw_state is FW_EMPTY or FW_TRY */
595 retry:
596 	if (fw_state == FW_TRY) {
597 		/*
598 		 * We tried the original and it failed.  Move to the
599 		 * alternate.
600 		 */
601 		dd_dev_warn(dd, "using alternate firmware names\n");
602 		/*
603 		 * Let others run.  Some systems, when missing firmware, does
604 		 * something that holds for 30 seconds.  If we do that twice
605 		 * in a row it triggers task blocked warning.
606 		 */
607 		cond_resched();
608 		if (fw_8051_load)
609 			dispose_one_firmware(&fw_8051);
610 		if (fw_fabric_serdes_load)
611 			dispose_one_firmware(&fw_fabric);
612 		if (fw_sbus_load)
613 			dispose_one_firmware(&fw_sbus);
614 		if (fw_pcie_serdes_load)
615 			dispose_one_firmware(&fw_pcie);
616 		fw_8051_name = ALT_FW_8051_NAME_ASIC;
617 		fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
618 		fw_sbus_name = ALT_FW_SBUS_NAME;
619 		fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
620 
621 		/*
622 		 * Add a delay before obtaining and loading debug firmware.
623 		 * Authorization will fail if the delay between firmware
624 		 * authorization events is shorter than 50us. Add 100us to
625 		 * make a delay time safe.
626 		 */
627 		usleep_range(100, 120);
628 	}
629 
630 	if (fw_sbus_load) {
631 		err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
632 		if (err)
633 			goto done;
634 	}
635 
636 	if (fw_pcie_serdes_load) {
637 		err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
638 		if (err)
639 			goto done;
640 	}
641 
642 	if (fw_fabric_serdes_load) {
643 		err = obtain_one_firmware(dd, fw_fabric_serdes_name,
644 					  &fw_fabric);
645 		if (err)
646 			goto done;
647 	}
648 
649 	if (fw_8051_load) {
650 		err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
651 		if (err)
652 			goto done;
653 	}
654 
655 done:
656 	if (err) {
657 		/* oops, had problems obtaining a firmware */
658 		if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
659 			/* retry with alternate (RTL only) */
660 			fw_state = FW_TRY;
661 			goto retry;
662 		}
663 		dd_dev_err(dd, "unable to obtain working firmware\n");
664 		fw_state = FW_ERR;
665 		fw_err = -ENOENT;
666 	} else {
667 		/* success */
668 		if (fw_state == FW_EMPTY &&
669 		    dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
670 			fw_state = FW_TRY;	/* may retry later */
671 		else
672 			fw_state = FW_FINAL;	/* cannot try again */
673 	}
674 }
675 
676 /*
677  * Called by all HFIs when loading their firmware - i.e. device probe time.
678  * The first one will do the actual firmware load.  Use a mutex to resolve
679  * any possible race condition.
680  *
681  * The call to this routine cannot be moved to driver load because the kernel
682  * call request_firmware() requires a device which is only available after
683  * the first device probe.
684  */
685 static int obtain_firmware(struct hfi1_devdata *dd)
686 {
687 	unsigned long timeout;
688 
689 	mutex_lock(&fw_mutex);
690 
691 	/* 40s delay due to long delay on missing firmware on some systems */
692 	timeout = jiffies + msecs_to_jiffies(40000);
693 	while (fw_state == FW_TRY) {
694 		/*
695 		 * Another device is trying the firmware.  Wait until it
696 		 * decides what works (or not).
697 		 */
698 		if (time_after(jiffies, timeout)) {
699 			/* waited too long */
700 			dd_dev_err(dd, "Timeout waiting for firmware try");
701 			fw_state = FW_ERR;
702 			fw_err = -ETIMEDOUT;
703 			break;
704 		}
705 		mutex_unlock(&fw_mutex);
706 		msleep(20);	/* arbitrary delay */
707 		mutex_lock(&fw_mutex);
708 	}
709 	/* not in FW_TRY state */
710 
711 	/* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
712 	if (fw_state == FW_EMPTY)
713 		__obtain_firmware(dd);
714 
715 	mutex_unlock(&fw_mutex);
716 	return fw_err;
717 }
718 
719 /*
720  * Called when the driver unloads.  The timing is asymmetric with its
721  * counterpart, obtain_firmware().  If called at device remove time,
722  * then it is conceivable that another device could probe while the
723  * firmware is being disposed.  The mutexes can be moved to do that
724  * safely, but then the firmware would be requested from the OS multiple
725  * times.
726  *
727  * No mutex is needed as the driver is unloading and there cannot be any
728  * other callers.
729  */
730 void dispose_firmware(void)
731 {
732 	dispose_one_firmware(&fw_8051);
733 	dispose_one_firmware(&fw_fabric);
734 	dispose_one_firmware(&fw_pcie);
735 	dispose_one_firmware(&fw_sbus);
736 
737 	/* retain the error state, otherwise revert to empty */
738 	if (fw_state != FW_ERR)
739 		fw_state = FW_EMPTY;
740 }
741 
742 /*
743  * Called with the result of a firmware download.
744  *
745  * Return 1 to retry loading the firmware, 0 to stop.
746  */
747 static int retry_firmware(struct hfi1_devdata *dd, int load_result)
748 {
749 	int retry;
750 
751 	mutex_lock(&fw_mutex);
752 
753 	if (load_result == 0) {
754 		/*
755 		 * The load succeeded, so expect all others to do the same.
756 		 * Do not retry again.
757 		 */
758 		if (fw_state == FW_TRY)
759 			fw_state = FW_FINAL;
760 		retry = 0;	/* do NOT retry */
761 	} else if (fw_state == FW_TRY) {
762 		/* load failed, obtain alternate firmware */
763 		__obtain_firmware(dd);
764 		retry = (fw_state == FW_FINAL);
765 	} else {
766 		/* else in FW_FINAL or FW_ERR, no retry in either case */
767 		retry = 0;
768 	}
769 
770 	mutex_unlock(&fw_mutex);
771 	return retry;
772 }
773 
774 /*
775  * Write a block of data to a given array CSR.  All calls will be in
776  * multiples of 8 bytes.
777  */
778 static void write_rsa_data(struct hfi1_devdata *dd, int what,
779 			   const u8 *data, int nbytes)
780 {
781 	int qw_size = nbytes / 8;
782 	int i;
783 
784 	if (((unsigned long)data & 0x7) == 0) {
785 		/* aligned */
786 		u64 *ptr = (u64 *)data;
787 
788 		for (i = 0; i < qw_size; i++, ptr++)
789 			write_csr(dd, what + (8 * i), *ptr);
790 	} else {
791 		/* not aligned */
792 		for (i = 0; i < qw_size; i++, data += 8) {
793 			u64 value;
794 
795 			memcpy(&value, data, 8);
796 			write_csr(dd, what + (8 * i), value);
797 		}
798 	}
799 }
800 
801 /*
802  * Write a block of data to a given CSR as a stream of writes.  All calls will
803  * be in multiples of 8 bytes.
804  */
805 static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
806 				    const u8 *data, int nbytes)
807 {
808 	u64 *ptr = (u64 *)data;
809 	int qw_size = nbytes / 8;
810 
811 	for (; qw_size > 0; qw_size--, ptr++)
812 		write_csr(dd, what, *ptr);
813 }
814 
815 /*
816  * Download the signature and start the RSA mechanism.  Wait for
817  * RSA_ENGINE_TIMEOUT before giving up.
818  */
819 static int run_rsa(struct hfi1_devdata *dd, const char *who,
820 		   const u8 *signature)
821 {
822 	unsigned long timeout;
823 	u64 reg;
824 	u32 status;
825 	int ret = 0;
826 
827 	/* write the signature */
828 	write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
829 
830 	/* initialize RSA */
831 	write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
832 
833 	/*
834 	 * Make sure the engine is idle and insert a delay between the two
835 	 * writes to MISC_CFG_RSA_CMD.
836 	 */
837 	status = (read_csr(dd, MISC_CFG_FW_CTRL)
838 			   & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
839 			     >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
840 	if (status != RSA_STATUS_IDLE) {
841 		dd_dev_err(dd, "%s security engine not idle - giving up\n",
842 			   who);
843 		return -EBUSY;
844 	}
845 
846 	/* start RSA */
847 	write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
848 
849 	/*
850 	 * Look for the result.
851 	 *
852 	 * The RSA engine is hooked up to two MISC errors.  The driver
853 	 * masks these errors as they do not respond to the standard
854 	 * error "clear down" mechanism.  Look for these errors here and
855 	 * clear them when possible.  This routine will exit with the
856 	 * errors of the current run still set.
857 	 *
858 	 * MISC_FW_AUTH_FAILED_ERR
859 	 *	Firmware authorization failed.  This can be cleared by
860 	 *	re-initializing the RSA engine, then clearing the status bit.
861 	 *	Do not re-init the RSA angine immediately after a successful
862 	 *	run - this will reset the current authorization.
863 	 *
864 	 * MISC_KEY_MISMATCH_ERR
865 	 *	Key does not match.  The only way to clear this is to load
866 	 *	a matching key then clear the status bit.  If this error
867 	 *	is raised, it will persist outside of this routine until a
868 	 *	matching key is loaded.
869 	 */
870 	timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
871 	while (1) {
872 		status = (read_csr(dd, MISC_CFG_FW_CTRL)
873 			   & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
874 			     >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
875 
876 		if (status == RSA_STATUS_IDLE) {
877 			/* should not happen */
878 			dd_dev_err(dd, "%s firmware security bad idle state\n",
879 				   who);
880 			ret = -EINVAL;
881 			break;
882 		} else if (status == RSA_STATUS_DONE) {
883 			/* finished successfully */
884 			break;
885 		} else if (status == RSA_STATUS_FAILED) {
886 			/* finished unsuccessfully */
887 			ret = -EINVAL;
888 			break;
889 		}
890 		/* else still active */
891 
892 		if (time_after(jiffies, timeout)) {
893 			/*
894 			 * Timed out while active.  We can't reset the engine
895 			 * if it is stuck active, but run through the
896 			 * error code to see what error bits are set.
897 			 */
898 			dd_dev_err(dd, "%s firmware security time out\n", who);
899 			ret = -ETIMEDOUT;
900 			break;
901 		}
902 
903 		msleep(20);
904 	}
905 
906 	/*
907 	 * Arrive here on success or failure.  Clear all RSA engine
908 	 * errors.  All current errors will stick - the RSA logic is keeping
909 	 * error high.  All previous errors will clear - the RSA logic
910 	 * is not keeping the error high.
911 	 */
912 	write_csr(dd, MISC_ERR_CLEAR,
913 		  MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
914 		  MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
915 	/*
916 	 * All that is left are the current errors.  Print warnings on
917 	 * authorization failure details, if any.  Firmware authorization
918 	 * can be retried, so these are only warnings.
919 	 */
920 	reg = read_csr(dd, MISC_ERR_STATUS);
921 	if (ret) {
922 		if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
923 			dd_dev_warn(dd, "%s firmware authorization failed\n",
924 				    who);
925 		if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
926 			dd_dev_warn(dd, "%s firmware key mismatch\n", who);
927 	}
928 
929 	return ret;
930 }
931 
932 static void load_security_variables(struct hfi1_devdata *dd,
933 				    struct firmware_details *fdet)
934 {
935 	/* Security variables a.  Write the modulus */
936 	write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
937 	/* Security variables b.  Write the r2 */
938 	write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
939 	/* Security variables c.  Write the mu */
940 	write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
941 	/* Security variables d.  Write the header */
942 	write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
943 				(u8 *)fdet->css_header,
944 				sizeof(struct css_header));
945 }
946 
947 /* return the 8051 firmware state */
948 static inline u32 get_firmware_state(struct hfi1_devdata *dd)
949 {
950 	u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
951 
952 	return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
953 				& DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
954 }
955 
956 /*
957  * Wait until the firmware is up and ready to take host requests.
958  * Return 0 on success, -ETIMEDOUT on timeout.
959  */
960 int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
961 {
962 	unsigned long timeout;
963 
964 	/* in the simulator, the fake 8051 is always ready */
965 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
966 		return 0;
967 
968 	timeout = msecs_to_jiffies(mstimeout) + jiffies;
969 	while (1) {
970 		if (get_firmware_state(dd) == 0xa0)	/* ready */
971 			return 0;
972 		if (time_after(jiffies, timeout))	/* timed out */
973 			return -ETIMEDOUT;
974 		usleep_range(1950, 2050); /* sleep 2ms-ish */
975 	}
976 }
977 
978 /*
979  * Clear all reset bits, releasing the 8051.
980  * Wait for firmware to be ready to accept host requests.
981  * Then, set host version bit.
982  *
983  * This function executes even if the 8051 is in reset mode when
984  * dd->dc_shutdown == 1.
985  *
986  * Expects dd->dc8051_lock to be held.
987  */
988 int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
989 {
990 	int ret;
991 
992 	lockdep_assert_held(&dd->dc8051_lock);
993 	/* clear all reset bits, releasing the 8051 */
994 	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
995 
996 	/*
997 	 * Wait for firmware to be ready to accept host
998 	 * requests.
999 	 */
1000 	ret = wait_fm_ready(dd, TIMEOUT_8051_START);
1001 	if (ret) {
1002 		dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
1003 			   get_firmware_state(dd));
1004 		return ret;
1005 	}
1006 
1007 	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
1008 	if (ret != HCMD_SUCCESS) {
1009 		dd_dev_err(dd,
1010 			   "Failed to set host interface version, return 0x%x\n",
1011 			   ret);
1012 		return -EIO;
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 /*
1019  * Load the 8051 firmware.
1020  */
1021 static int load_8051_firmware(struct hfi1_devdata *dd,
1022 			      struct firmware_details *fdet)
1023 {
1024 	u64 reg;
1025 	int ret;
1026 	u8 ver_major;
1027 	u8 ver_minor;
1028 	u8 ver_patch;
1029 
1030 	/*
1031 	 * DC Reset sequence
1032 	 * Load DC 8051 firmware
1033 	 */
1034 	/*
1035 	 * DC reset step 1: Reset DC8051
1036 	 */
1037 	reg = DC_DC8051_CFG_RST_M8051W_SMASK
1038 		| DC_DC8051_CFG_RST_CRAM_SMASK
1039 		| DC_DC8051_CFG_RST_DRAM_SMASK
1040 		| DC_DC8051_CFG_RST_IRAM_SMASK
1041 		| DC_DC8051_CFG_RST_SFR_SMASK;
1042 	write_csr(dd, DC_DC8051_CFG_RST, reg);
1043 
1044 	/*
1045 	 * DC reset step 2 (optional): Load 8051 data memory with link
1046 	 * configuration
1047 	 */
1048 
1049 	/*
1050 	 * DC reset step 3: Load DC8051 firmware
1051 	 */
1052 	/* release all but the core reset */
1053 	reg = DC_DC8051_CFG_RST_M8051W_SMASK;
1054 	write_csr(dd, DC_DC8051_CFG_RST, reg);
1055 
1056 	/* Firmware load step 1 */
1057 	load_security_variables(dd, fdet);
1058 
1059 	/*
1060 	 * Firmware load step 2.  Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
1061 	 */
1062 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
1063 
1064 	/* Firmware load steps 3-5 */
1065 	ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
1066 			 fdet->firmware_len);
1067 	if (ret)
1068 		return ret;
1069 
1070 	/*
1071 	 * DC reset step 4. Host starts the DC8051 firmware
1072 	 */
1073 	/*
1074 	 * Firmware load step 6.  Set MISC_CFG_FW_CTRL.FW_8051_LOADED
1075 	 */
1076 	write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
1077 
1078 	/* Firmware load steps 7-10 */
1079 	ret = run_rsa(dd, "8051", fdet->signature);
1080 	if (ret)
1081 		return ret;
1082 
1083 	/*
1084 	 * Clear all reset bits, releasing the 8051.
1085 	 * DC reset step 5. Wait for firmware to be ready to accept host
1086 	 * requests.
1087 	 * Then, set host version bit.
1088 	 */
1089 	mutex_lock(&dd->dc8051_lock);
1090 	ret = release_and_wait_ready_8051_firmware(dd);
1091 	mutex_unlock(&dd->dc8051_lock);
1092 	if (ret)
1093 		return ret;
1094 
1095 	read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
1096 	dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
1097 		    (int)ver_major, (int)ver_minor, (int)ver_patch);
1098 	dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
1099 
1100 	return 0;
1101 }
1102 
1103 /*
1104  * Write the SBus request register
1105  *
1106  * No need for masking - the arguments are sized exactly.
1107  */
1108 void sbus_request(struct hfi1_devdata *dd,
1109 		  u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
1110 {
1111 	write_csr(dd, ASIC_CFG_SBUS_REQUEST,
1112 		  ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
1113 		  ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
1114 		  ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
1115 		  ((u64)receiver_addr <<
1116 		   ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
1117 }
1118 
1119 /*
1120  * Read a value from the SBus.
1121  *
1122  * Requires the caller to be in fast mode
1123  */
1124 static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
1125 		     u32 data_in)
1126 {
1127 	u64 reg;
1128 	int retries;
1129 	int success = 0;
1130 	u32 result = 0;
1131 	u32 result_code = 0;
1132 
1133 	sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
1134 
1135 	for (retries = 0; retries < 100; retries++) {
1136 		usleep_range(1000, 1200); /* arbitrary */
1137 		reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
1138 		result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
1139 				& ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
1140 		if (result_code != SBUS_READ_COMPLETE)
1141 			continue;
1142 
1143 		success = 1;
1144 		result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
1145 			   & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
1146 		break;
1147 	}
1148 
1149 	if (!success) {
1150 		dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
1151 			   result_code);
1152 	}
1153 
1154 	return result;
1155 }
1156 
1157 /*
1158  * Turn off the SBus and fabric serdes spicos.
1159  *
1160  * + Must be called with Sbus fast mode turned on.
1161  * + Must be called after fabric serdes broadcast is set up.
1162  * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
1163  *   when using MISC_CFG_FW_CTRL.
1164  */
1165 static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
1166 {
1167 	/* only needed on A0 */
1168 	if (!is_ax(dd))
1169 		return;
1170 
1171 	dd_dev_info(dd, "Turning off spicos:%s%s\n",
1172 		    flags & SPICO_SBUS ? " SBus" : "",
1173 		    flags & SPICO_FABRIC ? " fabric" : "");
1174 
1175 	write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
1176 	/* disable SBus spico */
1177 	if (flags & SPICO_SBUS)
1178 		sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
1179 			     WRITE_SBUS_RECEIVER, 0x00000040);
1180 
1181 	/* disable the fabric serdes spicos */
1182 	if (flags & SPICO_FABRIC)
1183 		sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
1184 			     0x07, WRITE_SBUS_RECEIVER, 0x00000000);
1185 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
1186 }
1187 
1188 /*
1189  * Reset all of the fabric serdes for this HFI in preparation to take the
1190  * link to Polling.
1191  *
1192  * To do a reset, we need to write to to the serdes registers.  Unfortunately,
1193  * the fabric serdes download to the other HFI on the ASIC will have turned
1194  * off the firmware validation on this HFI.  This means we can't write to the
1195  * registers to reset the serdes.  Work around this by performing a complete
1196  * re-download and validation of the fabric serdes firmware.  This, as a
1197  * by-product, will reset the serdes.  NOTE: the re-download requires that
1198  * the 8051 be in the Offline state.  I.e. not actively trying to use the
1199  * serdes.  This routine is called at the point where the link is Offline and
1200  * is getting ready to go to Polling.
1201  */
1202 void fabric_serdes_reset(struct hfi1_devdata *dd)
1203 {
1204 	int ret;
1205 
1206 	if (!fw_fabric_serdes_load)
1207 		return;
1208 
1209 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
1210 	if (ret) {
1211 		dd_dev_err(dd,
1212 			   "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
1213 		return;
1214 	}
1215 	set_sbus_fast_mode(dd);
1216 
1217 	if (is_ax(dd)) {
1218 		/* A0 serdes do not work with a re-download */
1219 		u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
1220 
1221 		/* place SerDes in reset and disable SPICO */
1222 		sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
1223 		/* wait 100 refclk cycles @ 156.25MHz => 640ns */
1224 		udelay(1);
1225 		/* remove SerDes reset */
1226 		sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
1227 		/* turn SPICO enable on */
1228 		sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
1229 	} else {
1230 		turn_off_spicos(dd, SPICO_FABRIC);
1231 		/*
1232 		 * No need for firmware retry - what to download has already
1233 		 * been decided.
1234 		 * No need to pay attention to the load return - the only
1235 		 * failure is a validation failure, which has already been
1236 		 * checked by the initial download.
1237 		 */
1238 		(void)load_fabric_serdes_firmware(dd, &fw_fabric);
1239 	}
1240 
1241 	clear_sbus_fast_mode(dd);
1242 	release_chip_resource(dd, CR_SBUS);
1243 }
1244 
1245 /* Access to the SBus in this routine should probably be serialized */
1246 int sbus_request_slow(struct hfi1_devdata *dd,
1247 		      u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
1248 {
1249 	u64 reg, count = 0;
1250 
1251 	/* make sure fast mode is clear */
1252 	clear_sbus_fast_mode(dd);
1253 
1254 	sbus_request(dd, receiver_addr, data_addr, command, data_in);
1255 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
1256 		  ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
1257 	/* Wait for both DONE and RCV_DATA_VALID to go high */
1258 	reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
1259 	while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
1260 		 (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
1261 		if (count++ >= SBUS_MAX_POLL_COUNT) {
1262 			u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
1263 			/*
1264 			 * If the loop has timed out, we are OK if DONE bit
1265 			 * is set and RCV_DATA_VALID and EXECUTE counters
1266 			 * are the same. If not, we cannot proceed.
1267 			 */
1268 			if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
1269 			    (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
1270 			     SBUS_COUNTER(counts, EXECUTE)))
1271 				break;
1272 			return -ETIMEDOUT;
1273 		}
1274 		udelay(1);
1275 		reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
1276 	}
1277 	count = 0;
1278 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
1279 	/* Wait for DONE to clear after EXECUTE is cleared */
1280 	reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
1281 	while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
1282 		if (count++ >= SBUS_MAX_POLL_COUNT)
1283 			return -ETIME;
1284 		udelay(1);
1285 		reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
1286 	}
1287 	return 0;
1288 }
1289 
1290 static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
1291 				       struct firmware_details *fdet)
1292 {
1293 	int i, err;
1294 	const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
1295 
1296 	dd_dev_info(dd, "Downloading fabric firmware\n");
1297 
1298 	/* step 1: load security variables */
1299 	load_security_variables(dd, fdet);
1300 	/* step 2: place SerDes in reset and disable SPICO */
1301 	sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
1302 	/* wait 100 refclk cycles @ 156.25MHz => 640ns */
1303 	udelay(1);
1304 	/* step 3:  remove SerDes reset */
1305 	sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
1306 	/* step 4: assert IMEM override */
1307 	sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
1308 	/* step 5: download SerDes machine code */
1309 	for (i = 0; i < fdet->firmware_len; i += 4) {
1310 		sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
1311 			     *(u32 *)&fdet->firmware_ptr[i]);
1312 	}
1313 	/* step 6: IMEM override off */
1314 	sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
1315 	/* step 7: turn ECC on */
1316 	sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
1317 
1318 	/* steps 8-11: run the RSA engine */
1319 	err = run_rsa(dd, "fabric serdes", fdet->signature);
1320 	if (err)
1321 		return err;
1322 
1323 	/* step 12: turn SPICO enable on */
1324 	sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
1325 	/* step 13: enable core hardware interrupts */
1326 	sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
1327 
1328 	return 0;
1329 }
1330 
1331 static int load_sbus_firmware(struct hfi1_devdata *dd,
1332 			      struct firmware_details *fdet)
1333 {
1334 	int i, err;
1335 	const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
1336 
1337 	dd_dev_info(dd, "Downloading SBus firmware\n");
1338 
1339 	/* step 1: load security variables */
1340 	load_security_variables(dd, fdet);
1341 	/* step 2: place SPICO into reset and enable off */
1342 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
1343 	/* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
1344 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
1345 	/* step 4: set starting IMEM address for burst download */
1346 	sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
1347 	/* step 5: download the SBus Master machine code */
1348 	for (i = 0; i < fdet->firmware_len; i += 4) {
1349 		sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
1350 			     *(u32 *)&fdet->firmware_ptr[i]);
1351 	}
1352 	/* step 6: set IMEM_CNTL_EN off */
1353 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
1354 	/* step 7: turn ECC on */
1355 	sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
1356 
1357 	/* steps 8-11: run the RSA engine */
1358 	err = run_rsa(dd, "SBus", fdet->signature);
1359 	if (err)
1360 		return err;
1361 
1362 	/* step 12: set SPICO_ENABLE on */
1363 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
1364 
1365 	return 0;
1366 }
1367 
1368 static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
1369 				     struct firmware_details *fdet)
1370 {
1371 	int i;
1372 	const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
1373 
1374 	dd_dev_info(dd, "Downloading PCIe firmware\n");
1375 
1376 	/* step 1: load security variables */
1377 	load_security_variables(dd, fdet);
1378 	/* step 2: assert single step (halts the SBus Master spico) */
1379 	sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
1380 	/* step 3: enable XDMEM access */
1381 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
1382 	/* step 4: load firmware into SBus Master XDMEM */
1383 	/*
1384 	 * NOTE: the dmem address, write_en, and wdata are all pre-packed,
1385 	 * we only need to pick up the bytes and write them
1386 	 */
1387 	for (i = 0; i < fdet->firmware_len; i += 4) {
1388 		sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
1389 			     *(u32 *)&fdet->firmware_ptr[i]);
1390 	}
1391 	/* step 5: disable XDMEM access */
1392 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
1393 	/* step 6: allow SBus Spico to run */
1394 	sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
1395 
1396 	/*
1397 	 * steps 7-11: run RSA, if it succeeds, firmware is available to
1398 	 * be swapped
1399 	 */
1400 	return run_rsa(dd, "PCIe serdes", fdet->signature);
1401 }
1402 
1403 /*
1404  * Set the given broadcast values on the given list of devices.
1405  */
1406 static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
1407 				 const u8 *addrs, int count)
1408 {
1409 	while (--count >= 0) {
1410 		/*
1411 		 * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
1412 		 * defaults for everything else.  Do not read-modify-write,
1413 		 * per instruction from the manufacturer.
1414 		 *
1415 		 * Register 0xfd:
1416 		 *	bits    what
1417 		 *	-----	---------------------------------
1418 		 *	  0	IGNORE_BROADCAST  (default 0)
1419 		 *	11:4	BROADCAST_GROUP_1 (default 0xff)
1420 		 *	23:16	BROADCAST_GROUP_2 (default 0xff)
1421 		 */
1422 		sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
1423 			     (u32)bg1 << 4 | (u32)bg2 << 16);
1424 	}
1425 }
1426 
1427 int acquire_hw_mutex(struct hfi1_devdata *dd)
1428 {
1429 	unsigned long timeout;
1430 	int try = 0;
1431 	u8 mask = 1 << dd->hfi1_id;
1432 	u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
1433 
1434 	if (user == mask) {
1435 		dd_dev_info(dd,
1436 			    "Hardware mutex already acquired, mutex mask %u\n",
1437 			    (u32)mask);
1438 		return 0;
1439 	}
1440 
1441 retry:
1442 	timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
1443 	while (1) {
1444 		write_csr(dd, ASIC_CFG_MUTEX, mask);
1445 		user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
1446 		if (user == mask)
1447 			return 0; /* success */
1448 		if (time_after(jiffies, timeout))
1449 			break; /* timed out */
1450 		msleep(20);
1451 	}
1452 
1453 	/* timed out */
1454 	dd_dev_err(dd,
1455 		   "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
1456 		   (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
1457 
1458 	if (try == 0) {
1459 		/* break mutex and retry */
1460 		write_csr(dd, ASIC_CFG_MUTEX, 0);
1461 		try++;
1462 		goto retry;
1463 	}
1464 
1465 	return -EBUSY;
1466 }
1467 
1468 void release_hw_mutex(struct hfi1_devdata *dd)
1469 {
1470 	u8 mask = 1 << dd->hfi1_id;
1471 	u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
1472 
1473 	if (user != mask)
1474 		dd_dev_warn(dd,
1475 			    "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
1476 			    (u32)user, (u32)mask);
1477 	else
1478 		write_csr(dd, ASIC_CFG_MUTEX, 0);
1479 }
1480 
1481 /* return the given resource bit(s) as a mask for the given HFI */
1482 static inline u64 resource_mask(u32 hfi1_id, u32 resource)
1483 {
1484 	return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
1485 }
1486 
1487 static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
1488 				       const char *func)
1489 {
1490 	dd_dev_err(dd,
1491 		   "%s: hardware mutex stuck - suggest rebooting the machine\n",
1492 		   func);
1493 }
1494 
1495 /*
1496  * Acquire access to a chip resource.
1497  *
1498  * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
1499  */
1500 static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
1501 {
1502 	u64 scratch0, all_bits, my_bit;
1503 	int ret;
1504 
1505 	if (resource & CR_DYN_MASK) {
1506 		/* a dynamic resource is in use if either HFI has set the bit */
1507 		if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
1508 		    (resource & (CR_I2C1 | CR_I2C2))) {
1509 			/* discrete devices must serialize across both chains */
1510 			all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) |
1511 					resource_mask(1, CR_I2C1 | CR_I2C2);
1512 		} else {
1513 			all_bits = resource_mask(0, resource) |
1514 						resource_mask(1, resource);
1515 		}
1516 		my_bit = resource_mask(dd->hfi1_id, resource);
1517 	} else {
1518 		/* non-dynamic resources are not split between HFIs */
1519 		all_bits = resource;
1520 		my_bit = resource;
1521 	}
1522 
1523 	/* lock against other callers within the driver wanting a resource */
1524 	mutex_lock(&dd->asic_data->asic_resource_mutex);
1525 
1526 	ret = acquire_hw_mutex(dd);
1527 	if (ret) {
1528 		fail_mutex_acquire_message(dd, __func__);
1529 		ret = -EIO;
1530 		goto done;
1531 	}
1532 
1533 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
1534 	if (scratch0 & all_bits) {
1535 		ret = -EBUSY;
1536 	} else {
1537 		write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
1538 		/* force write to be visible to other HFI on another OS */
1539 		(void)read_csr(dd, ASIC_CFG_SCRATCH);
1540 	}
1541 
1542 	release_hw_mutex(dd);
1543 
1544 done:
1545 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
1546 	return ret;
1547 }
1548 
1549 /*
1550  * Acquire access to a chip resource, wait up to mswait milliseconds for
1551  * the resource to become available.
1552  *
1553  * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
1554  * acquire failed.
1555  */
1556 int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
1557 {
1558 	unsigned long timeout;
1559 	int ret;
1560 
1561 	timeout = jiffies + msecs_to_jiffies(mswait);
1562 	while (1) {
1563 		ret = __acquire_chip_resource(dd, resource);
1564 		if (ret != -EBUSY)
1565 			return ret;
1566 		/* resource is busy, check our timeout */
1567 		if (time_after_eq(jiffies, timeout))
1568 			return -EBUSY;
1569 		usleep_range(80, 120);	/* arbitrary delay */
1570 	}
1571 }
1572 
1573 /*
1574  * Release access to a chip resource
1575  */
1576 void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
1577 {
1578 	u64 scratch0, bit;
1579 
1580 	/* only dynamic resources should ever be cleared */
1581 	if (!(resource & CR_DYN_MASK)) {
1582 		dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
1583 			   resource);
1584 		return;
1585 	}
1586 	bit = resource_mask(dd->hfi1_id, resource);
1587 
1588 	/* lock against other callers within the driver wanting a resource */
1589 	mutex_lock(&dd->asic_data->asic_resource_mutex);
1590 
1591 	if (acquire_hw_mutex(dd)) {
1592 		fail_mutex_acquire_message(dd, __func__);
1593 		goto done;
1594 	}
1595 
1596 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
1597 	if ((scratch0 & bit) != 0) {
1598 		scratch0 &= ~bit;
1599 		write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
1600 		/* force write to be visible to other HFI on another OS */
1601 		(void)read_csr(dd, ASIC_CFG_SCRATCH);
1602 	} else {
1603 		dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
1604 			    __func__, dd->hfi1_id, resource);
1605 	}
1606 
1607 	release_hw_mutex(dd);
1608 
1609 done:
1610 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
1611 }
1612 
1613 /*
1614  * Return true if resource is set, false otherwise.  Print a warning
1615  * if not set and a function is supplied.
1616  */
1617 bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
1618 			 const char *func)
1619 {
1620 	u64 scratch0, bit;
1621 
1622 	if (resource & CR_DYN_MASK)
1623 		bit = resource_mask(dd->hfi1_id, resource);
1624 	else
1625 		bit = resource;
1626 
1627 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
1628 	if ((scratch0 & bit) == 0) {
1629 		if (func)
1630 			dd_dev_warn(dd,
1631 				    "%s: id %d, resource 0x%x, not acquired!\n",
1632 				    func, dd->hfi1_id, resource);
1633 		return false;
1634 	}
1635 	return true;
1636 }
1637 
1638 static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
1639 {
1640 	u64 scratch0;
1641 
1642 	/* lock against other callers within the driver wanting a resource */
1643 	mutex_lock(&dd->asic_data->asic_resource_mutex);
1644 
1645 	if (acquire_hw_mutex(dd)) {
1646 		fail_mutex_acquire_message(dd, func);
1647 		goto done;
1648 	}
1649 
1650 	/* clear all dynamic access bits for this HFI */
1651 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
1652 	scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
1653 	write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
1654 	/* force write to be visible to other HFI on another OS */
1655 	(void)read_csr(dd, ASIC_CFG_SCRATCH);
1656 
1657 	release_hw_mutex(dd);
1658 
1659 done:
1660 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
1661 }
1662 
1663 void init_chip_resources(struct hfi1_devdata *dd)
1664 {
1665 	/* clear any holds left by us */
1666 	clear_chip_resources(dd, __func__);
1667 }
1668 
1669 void finish_chip_resources(struct hfi1_devdata *dd)
1670 {
1671 	/* clear any holds left by us */
1672 	clear_chip_resources(dd, __func__);
1673 }
1674 
1675 void set_sbus_fast_mode(struct hfi1_devdata *dd)
1676 {
1677 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
1678 		  ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
1679 }
1680 
1681 void clear_sbus_fast_mode(struct hfi1_devdata *dd)
1682 {
1683 	u64 reg, count = 0;
1684 
1685 	reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
1686 	while (SBUS_COUNTER(reg, EXECUTE) !=
1687 	       SBUS_COUNTER(reg, RCV_DATA_VALID)) {
1688 		if (count++ >= SBUS_MAX_POLL_COUNT)
1689 			break;
1690 		udelay(1);
1691 		reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
1692 	}
1693 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
1694 }
1695 
1696 int load_firmware(struct hfi1_devdata *dd)
1697 {
1698 	int ret;
1699 
1700 	if (fw_fabric_serdes_load) {
1701 		ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
1702 		if (ret)
1703 			return ret;
1704 
1705 		set_sbus_fast_mode(dd);
1706 
1707 		set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
1708 				     fabric_serdes_broadcast[dd->hfi1_id],
1709 				     fabric_serdes_addrs[dd->hfi1_id],
1710 				     NUM_FABRIC_SERDES);
1711 		turn_off_spicos(dd, SPICO_FABRIC);
1712 		do {
1713 			ret = load_fabric_serdes_firmware(dd, &fw_fabric);
1714 		} while (retry_firmware(dd, ret));
1715 
1716 		clear_sbus_fast_mode(dd);
1717 		release_chip_resource(dd, CR_SBUS);
1718 		if (ret)
1719 			return ret;
1720 	}
1721 
1722 	if (fw_8051_load) {
1723 		do {
1724 			ret = load_8051_firmware(dd, &fw_8051);
1725 		} while (retry_firmware(dd, ret));
1726 		if (ret)
1727 			return ret;
1728 	}
1729 
1730 	dump_fw_version(dd);
1731 	return 0;
1732 }
1733 
1734 int hfi1_firmware_init(struct hfi1_devdata *dd)
1735 {
1736 	/* only RTL can use these */
1737 	if (dd->icode != ICODE_RTL_SILICON) {
1738 		fw_fabric_serdes_load = 0;
1739 		fw_pcie_serdes_load = 0;
1740 		fw_sbus_load = 0;
1741 	}
1742 
1743 	/* no 8051 or QSFP on simulator */
1744 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
1745 		fw_8051_load = 0;
1746 
1747 	if (!fw_8051_name) {
1748 		if (dd->icode == ICODE_RTL_SILICON)
1749 			fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
1750 		else
1751 			fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
1752 	}
1753 	if (!fw_fabric_serdes_name)
1754 		fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
1755 	if (!fw_sbus_name)
1756 		fw_sbus_name = DEFAULT_FW_SBUS_NAME;
1757 	if (!fw_pcie_serdes_name)
1758 		fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
1759 
1760 	return obtain_firmware(dd);
1761 }
1762 
1763 /*
1764  * This function is a helper function for parse_platform_config(...) and
1765  * does not check for validity of the platform configuration cache
1766  * (because we know it is invalid as we are building up the cache).
1767  * As such, this should not be called from anywhere other than
1768  * parse_platform_config
1769  */
1770 static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
1771 {
1772 	u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
1773 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
1774 
1775 	if (!system_table)
1776 		return -EINVAL;
1777 
1778 	meta_ver_meta =
1779 	*(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
1780 	+ SYSTEM_TABLE_META_VERSION);
1781 
1782 	mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
1783 	ver_start = meta_ver_meta & mask;
1784 
1785 	meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
1786 
1787 	mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
1788 	ver_len = meta_ver_meta & mask;
1789 
1790 	ver_start /= 8;
1791 	meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
1792 
1793 	if (meta_ver < 4) {
1794 		dd_dev_info(
1795 			dd, "%s:Please update platform config\n", __func__);
1796 		return -EINVAL;
1797 	}
1798 	return 0;
1799 }
1800 
1801 int parse_platform_config(struct hfi1_devdata *dd)
1802 {
1803 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
1804 	struct hfi1_pportdata *ppd = dd->pport;
1805 	u32 *ptr = NULL;
1806 	u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
1807 	u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
1808 	int ret = -EINVAL; /* assume failure */
1809 
1810 	/*
1811 	 * For integrated devices that did not fall back to the default file,
1812 	 * the SI tuning information for active channels is acquired from the
1813 	 * scratch register bitmap, thus there is no platform config to parse.
1814 	 * Skip parsing in these situations.
1815 	 */
1816 	if (ppd->config_from_scratch)
1817 		return 0;
1818 
1819 	if (!dd->platform_config.data) {
1820 		dd_dev_err(dd, "%s: Missing config file\n", __func__);
1821 		goto bail;
1822 	}
1823 	ptr = (u32 *)dd->platform_config.data;
1824 
1825 	magic_num = *ptr;
1826 	ptr++;
1827 	if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
1828 		dd_dev_err(dd, "%s: Bad config file\n", __func__);
1829 		goto bail;
1830 	}
1831 
1832 	/* Field is file size in DWORDs */
1833 	file_length = (*ptr) * 4;
1834 
1835 	/*
1836 	 * Length can't be larger than partition size. Assume platform
1837 	 * config format version 4 is being used. Interpret the file size
1838 	 * field as header instead by not moving the pointer.
1839 	 */
1840 	if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
1841 		dd_dev_info(dd,
1842 			    "%s:File length out of bounds, using alternative format\n",
1843 			    __func__);
1844 		file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
1845 	} else {
1846 		ptr++;
1847 	}
1848 
1849 	if (file_length > dd->platform_config.size) {
1850 		dd_dev_info(dd, "%s:File claims to be larger than read size\n",
1851 			    __func__);
1852 		goto bail;
1853 	} else if (file_length < dd->platform_config.size) {
1854 		dd_dev_info(dd,
1855 			    "%s:File claims to be smaller than read size, continuing\n",
1856 			    __func__);
1857 	}
1858 	/* exactly equal, perfection */
1859 
1860 	/*
1861 	 * In both cases where we proceed, using the self-reported file length
1862 	 * is the safer option. In case of old format a predefined value is
1863 	 * being used.
1864 	 */
1865 	while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
1866 		header1 = *ptr;
1867 		header2 = *(ptr + 1);
1868 		if (header1 != ~header2) {
1869 			dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
1870 				   __func__, (ptr - (u32 *)
1871 					      dd->platform_config.data));
1872 			goto bail;
1873 		}
1874 
1875 		record_idx = *ptr &
1876 			((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
1877 
1878 		table_length_dwords = (*ptr >>
1879 				PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
1880 		      ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
1881 
1882 		table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
1883 			((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
1884 
1885 		/* Done with this set of headers */
1886 		ptr += 2;
1887 
1888 		if (record_idx) {
1889 			/* data table */
1890 			switch (table_type) {
1891 			case PLATFORM_CONFIG_SYSTEM_TABLE:
1892 				pcfgcache->config_tables[table_type].num_table =
1893 									1;
1894 				ret = check_meta_version(dd, ptr);
1895 				if (ret)
1896 					goto bail;
1897 				break;
1898 			case PLATFORM_CONFIG_PORT_TABLE:
1899 				pcfgcache->config_tables[table_type].num_table =
1900 									2;
1901 				break;
1902 			case PLATFORM_CONFIG_RX_PRESET_TABLE:
1903 				/* fall through */
1904 			case PLATFORM_CONFIG_TX_PRESET_TABLE:
1905 				/* fall through */
1906 			case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
1907 				/* fall through */
1908 			case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
1909 				pcfgcache->config_tables[table_type].num_table =
1910 							table_length_dwords;
1911 				break;
1912 			default:
1913 				dd_dev_err(dd,
1914 					   "%s: Unknown data table %d, offset %ld\n",
1915 					   __func__, table_type,
1916 					   (ptr - (u32 *)
1917 					    dd->platform_config.data));
1918 				goto bail; /* We don't trust this file now */
1919 			}
1920 			pcfgcache->config_tables[table_type].table = ptr;
1921 		} else {
1922 			/* metadata table */
1923 			switch (table_type) {
1924 			case PLATFORM_CONFIG_SYSTEM_TABLE:
1925 				/* fall through */
1926 			case PLATFORM_CONFIG_PORT_TABLE:
1927 				/* fall through */
1928 			case PLATFORM_CONFIG_RX_PRESET_TABLE:
1929 				/* fall through */
1930 			case PLATFORM_CONFIG_TX_PRESET_TABLE:
1931 				/* fall through */
1932 			case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
1933 				/* fall through */
1934 			case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
1935 				break;
1936 			default:
1937 				dd_dev_err(dd,
1938 					   "%s: Unknown meta table %d, offset %ld\n",
1939 					   __func__, table_type,
1940 					   (ptr -
1941 					    (u32 *)dd->platform_config.data));
1942 				goto bail; /* We don't trust this file now */
1943 			}
1944 			pcfgcache->config_tables[table_type].table_metadata =
1945 									ptr;
1946 		}
1947 
1948 		/* Calculate and check table crc */
1949 		crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
1950 			       (table_length_dwords * 4));
1951 		crc ^= ~(u32)0;
1952 
1953 		/* Jump the table */
1954 		ptr += table_length_dwords;
1955 		if (crc != *ptr) {
1956 			dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
1957 				   __func__, (ptr -
1958 				   (u32 *)dd->platform_config.data));
1959 			goto bail;
1960 		}
1961 		/* Jump the CRC DWORD */
1962 		ptr++;
1963 	}
1964 
1965 	pcfgcache->cache_valid = 1;
1966 	return 0;
1967 bail:
1968 	memset(pcfgcache, 0, sizeof(struct platform_config_cache));
1969 	return ret;
1970 }
1971 
1972 static void get_integrated_platform_config_field(
1973 		struct hfi1_devdata *dd,
1974 		enum platform_config_table_type_encoding table_type,
1975 		int field_index, u32 *data)
1976 {
1977 	struct hfi1_pportdata *ppd = dd->pport;
1978 	u8 *cache = ppd->qsfp_info.cache;
1979 	u32 tx_preset = 0;
1980 
1981 	switch (table_type) {
1982 	case PLATFORM_CONFIG_SYSTEM_TABLE:
1983 		if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
1984 			*data = ppd->max_power_class;
1985 		else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
1986 			*data = ppd->default_atten;
1987 		break;
1988 	case PLATFORM_CONFIG_PORT_TABLE:
1989 		if (field_index == PORT_TABLE_PORT_TYPE)
1990 			*data = ppd->port_type;
1991 		else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
1992 			*data = ppd->local_atten;
1993 		else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
1994 			*data = ppd->remote_atten;
1995 		break;
1996 	case PLATFORM_CONFIG_RX_PRESET_TABLE:
1997 		if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
1998 			*data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
1999 				QSFP_RX_CDR_APPLY_SHIFT;
2000 		else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
2001 			*data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
2002 				QSFP_RX_EMP_APPLY_SHIFT;
2003 		else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
2004 			*data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
2005 				QSFP_RX_AMP_APPLY_SHIFT;
2006 		else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
2007 			*data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
2008 				QSFP_RX_CDR_SHIFT;
2009 		else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
2010 			*data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
2011 				QSFP_RX_EMP_SHIFT;
2012 		else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
2013 			*data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
2014 				QSFP_RX_AMP_SHIFT;
2015 		break;
2016 	case PLATFORM_CONFIG_TX_PRESET_TABLE:
2017 		if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
2018 			tx_preset = ppd->tx_preset_eq;
2019 		else
2020 			tx_preset = ppd->tx_preset_noeq;
2021 		if (field_index == TX_PRESET_TABLE_PRECUR)
2022 			*data = (tx_preset & TX_PRECUR_SMASK) >>
2023 				TX_PRECUR_SHIFT;
2024 		else if (field_index == TX_PRESET_TABLE_ATTN)
2025 			*data = (tx_preset & TX_ATTN_SMASK) >>
2026 				TX_ATTN_SHIFT;
2027 		else if (field_index == TX_PRESET_TABLE_POSTCUR)
2028 			*data = (tx_preset & TX_POSTCUR_SMASK) >>
2029 				TX_POSTCUR_SHIFT;
2030 		else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
2031 			*data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
2032 				QSFP_TX_CDR_APPLY_SHIFT;
2033 		else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
2034 			*data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
2035 				QSFP_TX_EQ_APPLY_SHIFT;
2036 		else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
2037 			*data = (tx_preset & QSFP_TX_CDR_SMASK) >>
2038 				QSFP_TX_CDR_SHIFT;
2039 		else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
2040 			*data = (tx_preset & QSFP_TX_EQ_SMASK) >>
2041 				QSFP_TX_EQ_SHIFT;
2042 		break;
2043 	case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
2044 	case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
2045 	default:
2046 		break;
2047 	}
2048 }
2049 
2050 static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
2051 					  int field, u32 *field_len_bits,
2052 					  u32 *field_start_bits)
2053 {
2054 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
2055 	u32 *src_ptr = NULL;
2056 
2057 	if (!pcfgcache->cache_valid)
2058 		return -EINVAL;
2059 
2060 	switch (table) {
2061 	case PLATFORM_CONFIG_SYSTEM_TABLE:
2062 		/* fall through */
2063 	case PLATFORM_CONFIG_PORT_TABLE:
2064 		/* fall through */
2065 	case PLATFORM_CONFIG_RX_PRESET_TABLE:
2066 		/* fall through */
2067 	case PLATFORM_CONFIG_TX_PRESET_TABLE:
2068 		/* fall through */
2069 	case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
2070 		/* fall through */
2071 	case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
2072 		if (field && field < platform_config_table_limits[table])
2073 			src_ptr =
2074 			pcfgcache->config_tables[table].table_metadata + field;
2075 		break;
2076 	default:
2077 		dd_dev_info(dd, "%s: Unknown table\n", __func__);
2078 		break;
2079 	}
2080 
2081 	if (!src_ptr)
2082 		return -EINVAL;
2083 
2084 	if (field_start_bits)
2085 		*field_start_bits = *src_ptr &
2086 		      ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
2087 
2088 	if (field_len_bits)
2089 		*field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
2090 		       & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
2091 
2092 	return 0;
2093 }
2094 
2095 /* This is the central interface to getting data out of the platform config
2096  * file. It depends on parse_platform_config() having populated the
2097  * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
2098  * validate the sanity of the cache.
2099  *
2100  * The non-obvious parameters:
2101  * @table_index: Acts as a look up key into which instance of the tables the
2102  * relevant field is fetched from.
2103  *
2104  * This applies to the data tables that have multiple instances. The port table
2105  * is an exception to this rule as each HFI only has one port and thus the
2106  * relevant table can be distinguished by hfi_id.
2107  *
2108  * @data: pointer to memory that will be populated with the field requested.
2109  * @len: length of memory pointed by @data in bytes.
2110  */
2111 int get_platform_config_field(struct hfi1_devdata *dd,
2112 			      enum platform_config_table_type_encoding
2113 			      table_type, int table_index, int field_index,
2114 			      u32 *data, u32 len)
2115 {
2116 	int ret = 0, wlen = 0, seek = 0;
2117 	u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
2118 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
2119 	struct hfi1_pportdata *ppd = dd->pport;
2120 
2121 	if (data)
2122 		memset(data, 0, len);
2123 	else
2124 		return -EINVAL;
2125 
2126 	if (ppd->config_from_scratch) {
2127 		/*
2128 		 * Use saved configuration from ppd for integrated platforms
2129 		 */
2130 		get_integrated_platform_config_field(dd, table_type,
2131 						     field_index, data);
2132 		return 0;
2133 	}
2134 
2135 	ret = get_platform_fw_field_metadata(dd, table_type, field_index,
2136 					     &field_len_bits,
2137 					     &field_start_bits);
2138 	if (ret)
2139 		return -EINVAL;
2140 
2141 	/* Convert length to bits */
2142 	len *= 8;
2143 
2144 	/* Our metadata function checked cache_valid and field_index for us */
2145 	switch (table_type) {
2146 	case PLATFORM_CONFIG_SYSTEM_TABLE:
2147 		src_ptr = pcfgcache->config_tables[table_type].table;
2148 
2149 		if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
2150 			if (len < field_len_bits)
2151 				return -EINVAL;
2152 
2153 			seek = field_start_bits / 8;
2154 			wlen = field_len_bits / 8;
2155 
2156 			src_ptr = (u32 *)((u8 *)src_ptr + seek);
2157 
2158 			/*
2159 			 * We expect the field to be byte aligned and whole byte
2160 			 * lengths if we are here
2161 			 */
2162 			memcpy(data, src_ptr, wlen);
2163 			return 0;
2164 		}
2165 		break;
2166 	case PLATFORM_CONFIG_PORT_TABLE:
2167 		/* Port table is 4 DWORDS */
2168 		src_ptr = dd->hfi1_id ?
2169 			pcfgcache->config_tables[table_type].table + 4 :
2170 			pcfgcache->config_tables[table_type].table;
2171 		break;
2172 	case PLATFORM_CONFIG_RX_PRESET_TABLE:
2173 		/* fall through */
2174 	case PLATFORM_CONFIG_TX_PRESET_TABLE:
2175 		/* fall through */
2176 	case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
2177 		/* fall through */
2178 	case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
2179 		src_ptr = pcfgcache->config_tables[table_type].table;
2180 
2181 		if (table_index <
2182 			pcfgcache->config_tables[table_type].num_table)
2183 			src_ptr += table_index;
2184 		else
2185 			src_ptr = NULL;
2186 		break;
2187 	default:
2188 		dd_dev_info(dd, "%s: Unknown table\n", __func__);
2189 		break;
2190 	}
2191 
2192 	if (!src_ptr || len < field_len_bits)
2193 		return -EINVAL;
2194 
2195 	src_ptr += (field_start_bits / 32);
2196 	*data = (*src_ptr >> (field_start_bits % 32)) &
2197 			((1 << field_len_bits) - 1);
2198 
2199 	return 0;
2200 }
2201 
2202 /*
2203  * Download the firmware needed for the Gen3 PCIe SerDes.  An update
2204  * to the SBus firmware is needed before updating the PCIe firmware.
2205  *
2206  * Note: caller must be holding the SBus resource.
2207  */
2208 int load_pcie_firmware(struct hfi1_devdata *dd)
2209 {
2210 	int ret = 0;
2211 
2212 	/* both firmware loads below use the SBus */
2213 	set_sbus_fast_mode(dd);
2214 
2215 	if (fw_sbus_load) {
2216 		turn_off_spicos(dd, SPICO_SBUS);
2217 		do {
2218 			ret = load_sbus_firmware(dd, &fw_sbus);
2219 		} while (retry_firmware(dd, ret));
2220 		if (ret)
2221 			goto done;
2222 	}
2223 
2224 	if (fw_pcie_serdes_load) {
2225 		dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
2226 		set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
2227 				     pcie_serdes_broadcast[dd->hfi1_id],
2228 				     pcie_serdes_addrs[dd->hfi1_id],
2229 				     NUM_PCIE_SERDES);
2230 		do {
2231 			ret = load_pcie_serdes_firmware(dd, &fw_pcie);
2232 		} while (retry_firmware(dd, ret));
2233 		if (ret)
2234 			goto done;
2235 	}
2236 
2237 done:
2238 	clear_sbus_fast_mode(dd);
2239 
2240 	return ret;
2241 }
2242 
2243 /*
2244  * Read the GUID from the hardware, store it in dd.
2245  */
2246 void read_guid(struct hfi1_devdata *dd)
2247 {
2248 	/* Take the DC out of reset to get a valid GUID value */
2249 	write_csr(dd, CCE_DC_CTRL, 0);
2250 	(void)read_csr(dd, CCE_DC_CTRL);
2251 
2252 	dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
2253 	dd_dev_info(dd, "GUID %llx",
2254 		    (unsigned long long)dd->base_guid);
2255 }
2256 
2257 /* read and display firmware version info */
2258 static void dump_fw_version(struct hfi1_devdata *dd)
2259 {
2260 	u32 pcie_vers[NUM_PCIE_SERDES];
2261 	u32 fabric_vers[NUM_FABRIC_SERDES];
2262 	u32 sbus_vers;
2263 	int i;
2264 	int all_same;
2265 	int ret;
2266 	u8 rcv_addr;
2267 
2268 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
2269 	if (ret) {
2270 		dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
2271 		return;
2272 	}
2273 
2274 	/* set fast mode */
2275 	set_sbus_fast_mode(dd);
2276 
2277 	/* read version for SBus Master */
2278 	sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
2279 	sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
2280 	/* wait for interrupt to be processed */
2281 	usleep_range(10000, 11000);
2282 	sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
2283 	dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
2284 
2285 	/* read version for PCIe SerDes */
2286 	all_same = 1;
2287 	pcie_vers[0] = 0;
2288 	for (i = 0; i < NUM_PCIE_SERDES; i++) {
2289 		rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
2290 		sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
2291 		/* wait for interrupt to be processed */
2292 		usleep_range(10000, 11000);
2293 		pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
2294 		if (i > 0 && pcie_vers[0] != pcie_vers[i])
2295 			all_same = 0;
2296 	}
2297 
2298 	if (all_same) {
2299 		dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
2300 			    pcie_vers[0]);
2301 	} else {
2302 		dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
2303 		for (i = 0; i < NUM_PCIE_SERDES; i++) {
2304 			dd_dev_info(dd,
2305 				    "PCIe SerDes lane %d firmware version 0x%x\n",
2306 				    i, pcie_vers[i]);
2307 		}
2308 	}
2309 
2310 	/* read version for fabric SerDes */
2311 	all_same = 1;
2312 	fabric_vers[0] = 0;
2313 	for (i = 0; i < NUM_FABRIC_SERDES; i++) {
2314 		rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
2315 		sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
2316 		/* wait for interrupt to be processed */
2317 		usleep_range(10000, 11000);
2318 		fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
2319 		if (i > 0 && fabric_vers[0] != fabric_vers[i])
2320 			all_same = 0;
2321 	}
2322 
2323 	if (all_same) {
2324 		dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
2325 			    fabric_vers[0]);
2326 	} else {
2327 		dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
2328 		for (i = 0; i < NUM_FABRIC_SERDES; i++) {
2329 			dd_dev_info(dd,
2330 				    "Fabric SerDes lane %d firmware version 0x%x\n",
2331 				    i, fabric_vers[i]);
2332 		}
2333 	}
2334 
2335 	clear_sbus_fast_mode(dd);
2336 	release_chip_resource(dd, CR_SBUS);
2337 }
2338