xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_tmpl.c (revision 9a8f3203)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_tmpl.h"
9 
10 /* note default template is in big endian */
11 static const uint32_t ql27xx_fwdt_default_template[] = {
12 	0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
13 	0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
14 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
15 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
16 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
17 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
18 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
19 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
20 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
21 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
22 	0x00000000, 0x04010000, 0x14000000, 0x00000000,
23 	0x02000000, 0x44000000, 0x09010000, 0x10000000,
24 	0x00000000, 0x02000000, 0x01010000, 0x1c000000,
25 	0x00000000, 0x02000000, 0x00600000, 0x00000000,
26 	0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
27 	0x02000000, 0x00600000, 0x00000000, 0xcc000000,
28 	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
29 	0x10600000, 0x00000000, 0xd4000000, 0x01010000,
30 	0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
31 	0x00000060, 0xf0000000, 0x00010000, 0x18000000,
32 	0x00000000, 0x02000000, 0x00700000, 0x041000c0,
33 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
34 	0x10700000, 0x041000c0, 0x00010000, 0x18000000,
35 	0x00000000, 0x02000000, 0x40700000, 0x041000c0,
36 	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
37 	0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
38 	0x18000000, 0x00000000, 0x02000000, 0x007c0000,
39 	0x040300c4, 0x00010000, 0x18000000, 0x00000000,
40 	0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
41 	0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
42 	0x00000000, 0xc0000000, 0x00010000, 0x18000000,
43 	0x00000000, 0x02000000, 0x007c0000, 0x04200000,
44 	0x0b010000, 0x18000000, 0x00000000, 0x02000000,
45 	0x0c000000, 0x00000000, 0x02010000, 0x20000000,
46 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
47 	0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
48 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
49 	0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
50 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
51 	0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
52 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
53 	0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
54 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
55 	0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
56 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
57 	0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
58 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
59 	0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
60 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
61 	0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
62 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
63 	0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
64 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
65 	0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
66 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
67 	0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
68 	0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
69 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
70 	0x0a000000, 0x04200080, 0x00010000, 0x18000000,
71 	0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
72 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
73 	0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
74 	0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
75 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
76 	0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
77 	0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
78 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
79 	0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
80 	0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
81 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
82 	0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
83 	0x00000000, 0x02000000, 0x00300000, 0x041000c0,
84 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
85 	0x10300000, 0x041000c0, 0x00010000, 0x18000000,
86 	0x00000000, 0x02000000, 0x20300000, 0x041000c0,
87 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
88 	0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
89 	0x00000000, 0x02000000, 0x06010000, 0x1c000000,
90 	0x00000000, 0x02000000, 0x01000000, 0x00000200,
91 	0xff230200, 0x06010000, 0x1c000000, 0x00000000,
92 	0x02000000, 0x02000000, 0x00001000, 0x00000000,
93 	0x07010000, 0x18000000, 0x00000000, 0x02000000,
94 	0x00000000, 0x01000000, 0x07010000, 0x18000000,
95 	0x00000000, 0x02000000, 0x00000000, 0x02000000,
96 	0x07010000, 0x18000000, 0x00000000, 0x02000000,
97 	0x00000000, 0x03000000, 0x0d010000, 0x14000000,
98 	0x00000000, 0x02000000, 0x00000000, 0xff000000,
99 	0x10000000, 0x00000000, 0x00000080,
100 };
101 
102 static inline void __iomem *
103 qla27xx_isp_reg(struct scsi_qla_host *vha)
104 {
105 	return &vha->hw->iobase->isp24;
106 }
107 
108 static inline void
109 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
110 {
111 	if (buf) {
112 		buf += *len;
113 		*(__le16 *)buf = cpu_to_le16(value);
114 	}
115 	*len += sizeof(value);
116 }
117 
118 static inline void
119 qla27xx_insert32(uint32_t value, void *buf, ulong *len)
120 {
121 	if (buf) {
122 		buf += *len;
123 		*(__le32 *)buf = cpu_to_le32(value);
124 	}
125 	*len += sizeof(value);
126 }
127 
128 static inline void
129 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
130 {
131 
132 	if (buf && mem && size) {
133 		buf += *len;
134 		memcpy(buf, mem, size);
135 	}
136 	*len += size;
137 }
138 
139 static inline void
140 qla27xx_read8(void __iomem *window, void *buf, ulong *len)
141 {
142 	uint8_t value = ~0;
143 
144 	if (buf) {
145 		value = RD_REG_BYTE(window);
146 	}
147 	qla27xx_insert32(value, buf, len);
148 }
149 
150 static inline void
151 qla27xx_read16(void __iomem *window, void *buf, ulong *len)
152 {
153 	uint16_t value = ~0;
154 
155 	if (buf) {
156 		value = RD_REG_WORD(window);
157 	}
158 	qla27xx_insert32(value, buf, len);
159 }
160 
161 static inline void
162 qla27xx_read32(void __iomem *window, void *buf, ulong *len)
163 {
164 	uint32_t value = ~0;
165 
166 	if (buf) {
167 		value = RD_REG_DWORD(window);
168 	}
169 	qla27xx_insert32(value, buf, len);
170 }
171 
172 static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
173 {
174 	return
175 	    (width == 1) ? qla27xx_read8 :
176 	    (width == 2) ? qla27xx_read16 :
177 			   qla27xx_read32;
178 }
179 
180 static inline void
181 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
182 	uint offset, void *buf, ulong *len)
183 {
184 	void __iomem *window = (void __iomem *)reg + offset;
185 
186 	qla27xx_read32(window, buf, len);
187 }
188 
189 static inline void
190 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
191 	uint offset, uint32_t data, void *buf)
192 {
193 	__iomem void *window = (void __iomem *)reg + offset;
194 
195 	if (buf) {
196 		WRT_REG_DWORD(window, data);
197 	}
198 }
199 
200 static inline void
201 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
202 	uint32_t addr, uint offset, uint count, uint width, void *buf,
203 	ulong *len)
204 {
205 	void __iomem *window = (void __iomem *)reg + offset;
206 	void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
207 
208 	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
209 	while (count--) {
210 		qla27xx_insert32(addr, buf, len);
211 		readn(window, buf, len);
212 		window += width;
213 		addr++;
214 	}
215 }
216 
217 static inline void
218 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
219 {
220 	if (buf)
221 		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
222 }
223 
224 static inline struct qla27xx_fwdt_entry *
225 qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
226 {
227 	return (void *)ent + ent->hdr.size;
228 }
229 
230 static struct qla27xx_fwdt_entry *
231 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
232 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
233 {
234 	ql_dbg(ql_dbg_misc, vha, 0xd100,
235 	    "%s: nop [%lx]\n", __func__, *len);
236 	qla27xx_skip_entry(ent, buf);
237 
238 	return qla27xx_next_entry(ent);
239 }
240 
241 static struct qla27xx_fwdt_entry *
242 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
243 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
244 {
245 	ql_dbg(ql_dbg_misc, vha, 0xd1ff,
246 	    "%s: end [%lx]\n", __func__, *len);
247 	qla27xx_skip_entry(ent, buf);
248 
249 	/* terminate */
250 	return NULL;
251 }
252 
253 static struct qla27xx_fwdt_entry *
254 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
255 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
256 {
257 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
258 
259 	ql_dbg(ql_dbg_misc, vha, 0xd200,
260 	    "%s: rdio t1 [%lx]\n", __func__, *len);
261 	qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
262 	    ent->t256.reg_count, ent->t256.reg_width, buf, len);
263 
264 	return qla27xx_next_entry(ent);
265 }
266 
267 static struct qla27xx_fwdt_entry *
268 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
269 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
270 {
271 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
272 
273 	ql_dbg(ql_dbg_misc, vha, 0xd201,
274 	    "%s: wrio t1 [%lx]\n", __func__, *len);
275 	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
276 	qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
277 
278 	return qla27xx_next_entry(ent);
279 }
280 
281 static struct qla27xx_fwdt_entry *
282 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
283 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
284 {
285 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
286 
287 	ql_dbg(ql_dbg_misc, vha, 0xd202,
288 	    "%s: rdio t2 [%lx]\n", __func__, *len);
289 	qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
290 	qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
291 	    ent->t258.reg_count, ent->t258.reg_width, buf, len);
292 
293 	return qla27xx_next_entry(ent);
294 }
295 
296 static struct qla27xx_fwdt_entry *
297 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
298 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
299 {
300 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
301 
302 	ql_dbg(ql_dbg_misc, vha, 0xd203,
303 	    "%s: wrio t2 [%lx]\n", __func__, *len);
304 	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
305 	qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
306 	qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
307 
308 	return qla27xx_next_entry(ent);
309 }
310 
311 static struct qla27xx_fwdt_entry *
312 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
313 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
314 {
315 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
316 
317 	ql_dbg(ql_dbg_misc, vha, 0xd204,
318 	    "%s: rdpci [%lx]\n", __func__, *len);
319 	qla27xx_insert32(ent->t260.pci_offset, buf, len);
320 	qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
321 
322 	return qla27xx_next_entry(ent);
323 }
324 
325 static struct qla27xx_fwdt_entry *
326 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
327 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
328 {
329 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
330 
331 	ql_dbg(ql_dbg_misc, vha, 0xd205,
332 	    "%s: wrpci [%lx]\n", __func__, *len);
333 	qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
334 
335 	return qla27xx_next_entry(ent);
336 }
337 
338 static struct qla27xx_fwdt_entry *
339 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
340 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
341 {
342 	ulong dwords;
343 	ulong start;
344 	ulong end;
345 
346 	ql_dbg(ql_dbg_misc, vha, 0xd206,
347 	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
348 	start = ent->t262.start_addr;
349 	end = ent->t262.end_addr;
350 
351 	if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
352 		;
353 	} else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
354 		end = vha->hw->fw_memory_size;
355 		if (buf)
356 			ent->t262.end_addr = end;
357 	} else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
358 		start = vha->hw->fw_shared_ram_start;
359 		end = vha->hw->fw_shared_ram_end;
360 		if (buf) {
361 			ent->t262.start_addr = start;
362 			ent->t262.end_addr = end;
363 		}
364 	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
365 		start = vha->hw->fw_ddr_ram_start;
366 		end = vha->hw->fw_ddr_ram_end;
367 		if (buf) {
368 			ent->t262.start_addr = start;
369 			ent->t262.end_addr = end;
370 		}
371 	} else if (ent->t262.ram_area == T262_RAM_AREA_MISC) {
372 		if (buf) {
373 			ent->t262.start_addr = start;
374 			ent->t262.end_addr = end;
375 		}
376 	} else {
377 		ql_dbg(ql_dbg_misc, vha, 0xd022,
378 		    "%s: unknown area %x\n", __func__, ent->t262.ram_area);
379 		qla27xx_skip_entry(ent, buf);
380 		goto done;
381 	}
382 
383 	if (end < start || start == 0 || end == 0) {
384 		ql_dbg(ql_dbg_misc, vha, 0xd023,
385 		    "%s: unusable range (start=%x end=%x)\n", __func__,
386 		    ent->t262.end_addr, ent->t262.start_addr);
387 		qla27xx_skip_entry(ent, buf);
388 		goto done;
389 	}
390 
391 	dwords = end - start + 1;
392 	if (buf) {
393 		buf += *len;
394 		qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
395 	}
396 	*len += dwords * sizeof(uint32_t);
397 done:
398 	return qla27xx_next_entry(ent);
399 }
400 
401 static struct qla27xx_fwdt_entry *
402 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
403 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
404 {
405 	uint count = 0;
406 	uint i;
407 	uint length;
408 
409 	ql_dbg(ql_dbg_misc, vha, 0xd207,
410 	    "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
411 	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
412 		for (i = 0; i < vha->hw->max_req_queues; i++) {
413 			struct req_que *req = vha->hw->req_q_map[i];
414 
415 			if (req || !buf) {
416 				length = req ?
417 				    req->length : REQUEST_ENTRY_CNT_24XX;
418 				qla27xx_insert16(i, buf, len);
419 				qla27xx_insert16(length, buf, len);
420 				qla27xx_insertbuf(req ? req->ring : NULL,
421 				    length * sizeof(*req->ring), buf, len);
422 				count++;
423 			}
424 		}
425 	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
426 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
427 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
428 
429 			if (rsp || !buf) {
430 				length = rsp ?
431 				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
432 				qla27xx_insert16(i, buf, len);
433 				qla27xx_insert16(length, buf, len);
434 				qla27xx_insertbuf(rsp ? rsp->ring : NULL,
435 				    length * sizeof(*rsp->ring), buf, len);
436 				count++;
437 			}
438 		}
439 	} else if (QLA_TGT_MODE_ENABLED() &&
440 	    ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
441 		struct qla_hw_data *ha = vha->hw;
442 		struct atio *atr = ha->tgt.atio_ring;
443 
444 		if (atr || !buf) {
445 			length = ha->tgt.atio_q_length;
446 			qla27xx_insert16(0, buf, len);
447 			qla27xx_insert16(length, buf, len);
448 			qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
449 			count++;
450 		}
451 	} else {
452 		ql_dbg(ql_dbg_misc, vha, 0xd026,
453 		    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
454 		qla27xx_skip_entry(ent, buf);
455 	}
456 
457 	if (buf) {
458 		if (count)
459 			ent->t263.num_queues = count;
460 		else
461 			qla27xx_skip_entry(ent, buf);
462 	}
463 
464 	return qla27xx_next_entry(ent);
465 }
466 
467 static struct qla27xx_fwdt_entry *
468 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
469 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
470 {
471 	ql_dbg(ql_dbg_misc, vha, 0xd208,
472 	    "%s: getfce [%lx]\n", __func__, *len);
473 	if (vha->hw->fce) {
474 		if (buf) {
475 			ent->t264.fce_trace_size = FCE_SIZE;
476 			ent->t264.write_pointer = vha->hw->fce_wr;
477 			ent->t264.base_pointer = vha->hw->fce_dma;
478 			ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
479 			ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
480 			ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
481 			ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
482 			ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
483 			ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
484 		}
485 		qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
486 	} else {
487 		ql_dbg(ql_dbg_misc, vha, 0xd027,
488 		    "%s: missing fce\n", __func__);
489 		qla27xx_skip_entry(ent, buf);
490 	}
491 
492 	return qla27xx_next_entry(ent);
493 }
494 
495 static struct qla27xx_fwdt_entry *
496 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
497 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
498 {
499 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
500 
501 	ql_dbg(ql_dbg_misc, vha, 0xd209,
502 	    "%s: pause risc [%lx]\n", __func__, *len);
503 	if (buf)
504 		qla24xx_pause_risc(reg, vha->hw);
505 
506 	return qla27xx_next_entry(ent);
507 }
508 
509 static struct qla27xx_fwdt_entry *
510 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
511 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
512 {
513 	ql_dbg(ql_dbg_misc, vha, 0xd20a,
514 	    "%s: reset risc [%lx]\n", __func__, *len);
515 	if (buf)
516 		qla24xx_soft_reset(vha->hw);
517 
518 	return qla27xx_next_entry(ent);
519 }
520 
521 static struct qla27xx_fwdt_entry *
522 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
523 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
524 {
525 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
526 
527 	ql_dbg(ql_dbg_misc, vha, 0xd20b,
528 	    "%s: dis intr [%lx]\n", __func__, *len);
529 	qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
530 
531 	return qla27xx_next_entry(ent);
532 }
533 
534 static struct qla27xx_fwdt_entry *
535 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
536 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
537 {
538 	ql_dbg(ql_dbg_misc, vha, 0xd20c,
539 	    "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
540 	switch (ent->t268.buf_type) {
541 	case T268_BUF_TYPE_EXTD_TRACE:
542 		if (vha->hw->eft) {
543 			if (buf) {
544 				ent->t268.buf_size = EFT_SIZE;
545 				ent->t268.start_addr = vha->hw->eft_dma;
546 			}
547 			qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
548 		} else {
549 			ql_dbg(ql_dbg_misc, vha, 0xd028,
550 			    "%s: missing eft\n", __func__);
551 			qla27xx_skip_entry(ent, buf);
552 		}
553 		break;
554 	case T268_BUF_TYPE_EXCH_BUFOFF:
555 		if (vha->hw->exchoffld_buf) {
556 			if (buf) {
557 				ent->t268.buf_size = vha->hw->exchoffld_size;
558 				ent->t268.start_addr =
559 					vha->hw->exchoffld_buf_dma;
560 			}
561 			qla27xx_insertbuf(vha->hw->exchoffld_buf,
562 			    vha->hw->exchoffld_size, buf, len);
563 		} else {
564 			ql_dbg(ql_dbg_misc, vha, 0xd028,
565 			    "%s: missing exch offld\n", __func__);
566 			qla27xx_skip_entry(ent, buf);
567 		}
568 		break;
569 	case T268_BUF_TYPE_EXTD_LOGIN:
570 		if (vha->hw->exlogin_buf) {
571 			if (buf) {
572 				ent->t268.buf_size = vha->hw->exlogin_size;
573 				ent->t268.start_addr =
574 					vha->hw->exlogin_buf_dma;
575 			}
576 			qla27xx_insertbuf(vha->hw->exlogin_buf,
577 			    vha->hw->exlogin_size, buf, len);
578 		} else {
579 			ql_dbg(ql_dbg_misc, vha, 0xd028,
580 			    "%s: missing ext login\n", __func__);
581 			qla27xx_skip_entry(ent, buf);
582 		}
583 		break;
584 
585 	case T268_BUF_TYPE_REQ_MIRROR:
586 	case T268_BUF_TYPE_RSP_MIRROR:
587 		/*
588 		 * Mirror pointers are not implemented in the
589 		 * driver, instead shadow pointers are used by
590 		 * the drier. Skip these entries.
591 		 */
592 		qla27xx_skip_entry(ent, buf);
593 		break;
594 	default:
595 		ql_dbg(ql_dbg_async, vha, 0xd02b,
596 		    "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
597 		qla27xx_skip_entry(ent, buf);
598 		break;
599 	}
600 
601 	return qla27xx_next_entry(ent);
602 }
603 
604 static struct qla27xx_fwdt_entry *
605 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
606 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
607 {
608 	ql_dbg(ql_dbg_misc, vha, 0xd20d,
609 	    "%s: scratch [%lx]\n", __func__, *len);
610 	qla27xx_insert32(0xaaaaaaaa, buf, len);
611 	qla27xx_insert32(0xbbbbbbbb, buf, len);
612 	qla27xx_insert32(0xcccccccc, buf, len);
613 	qla27xx_insert32(0xdddddddd, buf, len);
614 	qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
615 	if (buf)
616 		ent->t269.scratch_size = 5 * sizeof(uint32_t);
617 
618 	return qla27xx_next_entry(ent);
619 }
620 
621 static struct qla27xx_fwdt_entry *
622 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
623 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
624 {
625 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
626 	ulong dwords = ent->t270.count;
627 	ulong addr = ent->t270.addr;
628 
629 	ql_dbg(ql_dbg_misc, vha, 0xd20e,
630 	    "%s: rdremreg [%lx]\n", __func__, *len);
631 	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
632 	while (dwords--) {
633 		qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
634 		qla27xx_insert32(addr, buf, len);
635 		qla27xx_read_reg(reg, 0xc4, buf, len);
636 		addr += sizeof(uint32_t);
637 	}
638 
639 	return qla27xx_next_entry(ent);
640 }
641 
642 static struct qla27xx_fwdt_entry *
643 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
644 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
645 {
646 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
647 	ulong addr = ent->t271.addr;
648 	ulong data = ent->t271.data;
649 
650 	ql_dbg(ql_dbg_misc, vha, 0xd20f,
651 	    "%s: wrremreg [%lx]\n", __func__, *len);
652 	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
653 	qla27xx_write_reg(reg, 0xc4, data, buf);
654 	qla27xx_write_reg(reg, 0xc0, addr, buf);
655 
656 	return qla27xx_next_entry(ent);
657 }
658 
659 static struct qla27xx_fwdt_entry *
660 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
661 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
662 {
663 	ulong dwords = ent->t272.count;
664 	ulong start = ent->t272.addr;
665 
666 	ql_dbg(ql_dbg_misc, vha, 0xd210,
667 	    "%s: rdremram [%lx]\n", __func__, *len);
668 	if (buf) {
669 		ql_dbg(ql_dbg_misc, vha, 0xd02c,
670 		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
671 		buf += *len;
672 		qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
673 	}
674 	*len += dwords * sizeof(uint32_t);
675 
676 	return qla27xx_next_entry(ent);
677 }
678 
679 static struct qla27xx_fwdt_entry *
680 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
681 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
682 {
683 	ulong dwords = ent->t273.count;
684 	ulong addr = ent->t273.addr;
685 	uint32_t value;
686 
687 	ql_dbg(ql_dbg_misc, vha, 0xd211,
688 	    "%s: pcicfg [%lx]\n", __func__, *len);
689 	while (dwords--) {
690 		value = ~0;
691 		if (pci_read_config_dword(vha->hw->pdev, addr, &value))
692 			ql_dbg(ql_dbg_misc, vha, 0xd02d,
693 			    "%s: failed pcicfg read at %lx\n", __func__, addr);
694 		qla27xx_insert32(addr, buf, len);
695 		qla27xx_insert32(value, buf, len);
696 		addr += sizeof(uint32_t);
697 	}
698 
699 	return qla27xx_next_entry(ent);
700 }
701 
702 static struct qla27xx_fwdt_entry *
703 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
704 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
705 {
706 	uint count = 0;
707 	uint i;
708 
709 	ql_dbg(ql_dbg_misc, vha, 0xd212,
710 	    "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
711 	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
712 		for (i = 0; i < vha->hw->max_req_queues; i++) {
713 			struct req_que *req = vha->hw->req_q_map[i];
714 
715 			if (req || !buf) {
716 				qla27xx_insert16(i, buf, len);
717 				qla27xx_insert16(1, buf, len);
718 				qla27xx_insert32(req && req->out_ptr ?
719 				    *req->out_ptr : 0, buf, len);
720 				count++;
721 			}
722 		}
723 	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
724 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
725 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
726 
727 			if (rsp || !buf) {
728 				qla27xx_insert16(i, buf, len);
729 				qla27xx_insert16(1, buf, len);
730 				qla27xx_insert32(rsp && rsp->in_ptr ?
731 				    *rsp->in_ptr : 0, buf, len);
732 				count++;
733 			}
734 		}
735 	} else if (QLA_TGT_MODE_ENABLED() &&
736 	    ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
737 		struct qla_hw_data *ha = vha->hw;
738 		struct atio *atr = ha->tgt.atio_ring_ptr;
739 
740 		if (atr || !buf) {
741 			qla27xx_insert16(0, buf, len);
742 			qla27xx_insert16(1, buf, len);
743 			qla27xx_insert32(ha->tgt.atio_q_in ?
744 			    readl(ha->tgt.atio_q_in) : 0, buf, len);
745 			count++;
746 		}
747 	} else {
748 		ql_dbg(ql_dbg_misc, vha, 0xd02f,
749 		    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
750 		qla27xx_skip_entry(ent, buf);
751 	}
752 
753 	if (buf) {
754 		if (count)
755 			ent->t274.num_queues = count;
756 		else
757 			qla27xx_skip_entry(ent, buf);
758 	}
759 
760 	return qla27xx_next_entry(ent);
761 }
762 
763 static struct qla27xx_fwdt_entry *
764 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
765 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
766 {
767 	ulong offset = offsetof(typeof(*ent), t275.buffer);
768 
769 	ql_dbg(ql_dbg_misc, vha, 0xd213,
770 	    "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
771 	if (!ent->t275.length) {
772 		ql_dbg(ql_dbg_misc, vha, 0xd020,
773 		    "%s: buffer zero length\n", __func__);
774 		qla27xx_skip_entry(ent, buf);
775 		goto done;
776 	}
777 	if (offset + ent->t275.length > ent->hdr.size) {
778 		ql_dbg(ql_dbg_misc, vha, 0xd030,
779 		    "%s: buffer overflow\n", __func__);
780 		qla27xx_skip_entry(ent, buf);
781 		goto done;
782 	}
783 
784 	qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
785 done:
786 	return qla27xx_next_entry(ent);
787 }
788 
789 static struct qla27xx_fwdt_entry *
790 qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
791     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
792 {
793 	uint type = vha->hw->pdev->device >> 4 & 0xf;
794 	uint func = vha->hw->port_no & 0x3;
795 
796 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
797 	    "%s: cond [%lx]\n", __func__, *len);
798 
799 	if (type != ent->t276.cond1 || func != ent->t276.cond2) {
800 		ent = qla27xx_next_entry(ent);
801 		qla27xx_skip_entry(ent, buf);
802 	}
803 
804 	return qla27xx_next_entry(ent);
805 }
806 
807 static struct qla27xx_fwdt_entry *
808 qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
809     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
810 {
811 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
812 
813 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
814 	    "%s: rdpep [%lx]\n", __func__, *len);
815 	qla27xx_insert32(ent->t277.wr_cmd_data, buf, len);
816 	qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf);
817 	qla27xx_read_reg(reg, ent->t277.data_addr, buf, len);
818 
819 	return qla27xx_next_entry(ent);
820 }
821 
822 static struct qla27xx_fwdt_entry *
823 qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
824     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
825 {
826 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
827 
828 	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
829 	    "%s: wrpep [%lx]\n", __func__, *len);
830 	qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf);
831 	qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf);
832 
833 	return qla27xx_next_entry(ent);
834 }
835 
836 static struct qla27xx_fwdt_entry *
837 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
838 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
839 {
840 	ql_dbg(ql_dbg_misc, vha, 0xd2ff,
841 	    "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len);
842 	qla27xx_skip_entry(ent, buf);
843 
844 	return qla27xx_next_entry(ent);
845 }
846 
847 static struct {
848 	uint type;
849 	typeof(qla27xx_fwdt_entry_other)(*call);
850 } qla27xx_fwdt_entry_call[] = {
851 	{ ENTRY_TYPE_NOP,		qla27xx_fwdt_entry_t0    },
852 	{ ENTRY_TYPE_TMP_END,		qla27xx_fwdt_entry_t255  },
853 	{ ENTRY_TYPE_RD_IOB_T1,		qla27xx_fwdt_entry_t256  },
854 	{ ENTRY_TYPE_WR_IOB_T1,		qla27xx_fwdt_entry_t257  },
855 	{ ENTRY_TYPE_RD_IOB_T2,		qla27xx_fwdt_entry_t258  },
856 	{ ENTRY_TYPE_WR_IOB_T2,		qla27xx_fwdt_entry_t259  },
857 	{ ENTRY_TYPE_RD_PCI,		qla27xx_fwdt_entry_t260  },
858 	{ ENTRY_TYPE_WR_PCI,		qla27xx_fwdt_entry_t261  },
859 	{ ENTRY_TYPE_RD_RAM,		qla27xx_fwdt_entry_t262  },
860 	{ ENTRY_TYPE_GET_QUEUE,		qla27xx_fwdt_entry_t263  },
861 	{ ENTRY_TYPE_GET_FCE,		qla27xx_fwdt_entry_t264  },
862 	{ ENTRY_TYPE_PSE_RISC,		qla27xx_fwdt_entry_t265  },
863 	{ ENTRY_TYPE_RST_RISC,		qla27xx_fwdt_entry_t266  },
864 	{ ENTRY_TYPE_DIS_INTR,		qla27xx_fwdt_entry_t267  },
865 	{ ENTRY_TYPE_GET_HBUF,		qla27xx_fwdt_entry_t268  },
866 	{ ENTRY_TYPE_SCRATCH,		qla27xx_fwdt_entry_t269  },
867 	{ ENTRY_TYPE_RDREMREG,		qla27xx_fwdt_entry_t270  },
868 	{ ENTRY_TYPE_WRREMREG,		qla27xx_fwdt_entry_t271  },
869 	{ ENTRY_TYPE_RDREMRAM,		qla27xx_fwdt_entry_t272  },
870 	{ ENTRY_TYPE_PCICFG,		qla27xx_fwdt_entry_t273  },
871 	{ ENTRY_TYPE_GET_SHADOW,	qla27xx_fwdt_entry_t274  },
872 	{ ENTRY_TYPE_WRITE_BUF,		qla27xx_fwdt_entry_t275  },
873 	{ ENTRY_TYPE_CONDITIONAL,	qla27xx_fwdt_entry_t276  },
874 	{ ENTRY_TYPE_RDPEPREG,		qla27xx_fwdt_entry_t277  },
875 	{ ENTRY_TYPE_WRPEPREG,		qla27xx_fwdt_entry_t278  },
876 	{ -1,				qla27xx_fwdt_entry_other }
877 };
878 
879 static inline
880 typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
881 {
882 	typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
883 
884 	while (list->type < type)
885 		list++;
886 
887 	if (list->type == type)
888 		return list->call;
889 	return qla27xx_fwdt_entry_other;
890 }
891 
892 static void
893 qla27xx_walk_template(struct scsi_qla_host *vha,
894 	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
895 {
896 	struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
897 	ulong count = tmp->entry_count;
898 
899 	ql_dbg(ql_dbg_misc, vha, 0xd01a,
900 	    "%s: entry count %lx\n", __func__, count);
901 	while (count--) {
902 		ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len);
903 		if (!ent)
904 			break;
905 	}
906 
907 	if (count)
908 		ql_dbg(ql_dbg_misc, vha, 0xd018,
909 		    "%s: entry residual count (%lx)\n", __func__, count);
910 
911 	if (ent)
912 		ql_dbg(ql_dbg_misc, vha, 0xd019,
913 		    "%s: missing end entry (%lx)\n", __func__, count);
914 
915 	if (buf && *len != vha->hw->fw_dump_len)
916 		ql_dbg(ql_dbg_misc, vha, 0xd01b,
917 		    "%s: length=%#lx residual=%+ld\n",
918 		    __func__, *len, vha->hw->fw_dump_len - *len);
919 
920 	if (buf) {
921 		ql_log(ql_log_warn, vha, 0xd015,
922 		    "Firmware dump saved to temp buffer (%lu/%p)\n",
923 		    vha->host_no, vha->hw->fw_dump);
924 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
925 	}
926 }
927 
928 static void
929 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
930 {
931 	tmp->capture_timestamp = jiffies;
932 }
933 
934 static void
935 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
936 {
937 	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
938 
939 	sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
940 	    v+0, v+1, v+2, v+3, v+4, v+5);
941 
942 	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
943 	tmp->driver_info[1] = v[5] << 8 | v[4];
944 	tmp->driver_info[2] = 0x12345678;
945 }
946 
947 static void
948 qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
949 	struct scsi_qla_host *vha)
950 {
951 	tmp->firmware_version[0] = vha->hw->fw_major_version;
952 	tmp->firmware_version[1] = vha->hw->fw_minor_version;
953 	tmp->firmware_version[2] = vha->hw->fw_subminor_version;
954 	tmp->firmware_version[3] =
955 	    vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
956 	tmp->firmware_version[4] =
957 	    vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
958 }
959 
960 static void
961 ql27xx_edit_template(struct scsi_qla_host *vha,
962 	struct qla27xx_fwdt_template *tmp)
963 {
964 	qla27xx_time_stamp(tmp);
965 	qla27xx_driver_info(tmp);
966 	qla27xx_firmware_info(tmp, vha);
967 }
968 
969 static inline uint32_t
970 qla27xx_template_checksum(void *p, ulong size)
971 {
972 	uint32_t *buf = p;
973 	uint64_t sum = 0;
974 
975 	size /= sizeof(*buf);
976 
977 	while (size--)
978 		sum += *buf++;
979 
980 	sum = (sum & 0xffffffff) + (sum >> 32);
981 
982 	return ~sum;
983 }
984 
985 static inline int
986 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
987 {
988 	return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
989 }
990 
991 static inline int
992 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
993 {
994 	return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
995 }
996 
997 static void
998 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
999 {
1000 	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
1001 	ulong len;
1002 
1003 	if (qla27xx_fwdt_template_valid(tmp)) {
1004 		len = tmp->template_size;
1005 		tmp = memcpy(vha->hw->fw_dump, tmp, len);
1006 		ql27xx_edit_template(vha, tmp);
1007 		qla27xx_walk_template(vha, tmp, tmp, &len);
1008 		vha->hw->fw_dump_len = len;
1009 		vha->hw->fw_dumped = 1;
1010 	}
1011 }
1012 
1013 ulong
1014 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
1015 {
1016 	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
1017 	ulong len = 0;
1018 
1019 	if (qla27xx_fwdt_template_valid(tmp)) {
1020 		len = tmp->template_size;
1021 		qla27xx_walk_template(vha, tmp, NULL, &len);
1022 	}
1023 
1024 	return len;
1025 }
1026 
1027 ulong
1028 qla27xx_fwdt_template_size(void *p)
1029 {
1030 	struct qla27xx_fwdt_template *tmp = p;
1031 
1032 	return tmp->template_size;
1033 }
1034 
1035 ulong
1036 qla27xx_fwdt_template_default_size(void)
1037 {
1038 	return sizeof(ql27xx_fwdt_default_template);
1039 }
1040 
1041 const void *
1042 qla27xx_fwdt_template_default(void)
1043 {
1044 	return ql27xx_fwdt_default_template;
1045 }
1046 
1047 int
1048 qla27xx_fwdt_template_valid(void *p)
1049 {
1050 	struct qla27xx_fwdt_template *tmp = p;
1051 
1052 	if (!qla27xx_verify_template_header(tmp)) {
1053 		ql_log(ql_log_warn, NULL, 0xd01c,
1054 		    "%s: template type %x\n", __func__, tmp->template_type);
1055 		return false;
1056 	}
1057 
1058 	if (!qla27xx_verify_template_checksum(tmp)) {
1059 		ql_log(ql_log_warn, NULL, 0xd01d,
1060 		    "%s: failed template checksum\n", __func__);
1061 		return false;
1062 	}
1063 
1064 	return true;
1065 }
1066 
1067 void
1068 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1069 {
1070 	ulong flags = 0;
1071 
1072 #ifndef __CHECKER__
1073 	if (!hardware_locked)
1074 		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1075 #endif
1076 
1077 	if (!vha->hw->fw_dump)
1078 		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
1079 	else if (!vha->hw->fw_dump_template)
1080 		ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
1081 	else if (vha->hw->fw_dumped)
1082 		ql_log(ql_log_warn, vha, 0xd300,
1083 		    "Firmware has been previously dumped (%p),"
1084 		    " -- ignoring request\n", vha->hw->fw_dump);
1085 	else {
1086 		QLA_FW_STOPPED(vha->hw);
1087 		qla27xx_execute_fwdt_template(vha);
1088 	}
1089 
1090 #ifndef __CHECKER__
1091 	if (!hardware_locked)
1092 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1093 #endif
1094 }
1095