xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_tmpl.c (revision 31af04cd)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_tmpl.h"
9 
10 /* note default template is in big endian */
11 static const uint32_t ql27xx_fwdt_default_template[] = {
12 	0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
13 	0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
14 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
15 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
16 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
17 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
18 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
19 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
20 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
21 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
22 	0x00000000, 0x04010000, 0x14000000, 0x00000000,
23 	0x02000000, 0x44000000, 0x09010000, 0x10000000,
24 	0x00000000, 0x02000000, 0x01010000, 0x1c000000,
25 	0x00000000, 0x02000000, 0x00600000, 0x00000000,
26 	0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
27 	0x02000000, 0x00600000, 0x00000000, 0xcc000000,
28 	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
29 	0x10600000, 0x00000000, 0xd4000000, 0x01010000,
30 	0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
31 	0x00000060, 0xf0000000, 0x00010000, 0x18000000,
32 	0x00000000, 0x02000000, 0x00700000, 0x041000c0,
33 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
34 	0x10700000, 0x041000c0, 0x00010000, 0x18000000,
35 	0x00000000, 0x02000000, 0x40700000, 0x041000c0,
36 	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
37 	0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
38 	0x18000000, 0x00000000, 0x02000000, 0x007c0000,
39 	0x040300c4, 0x00010000, 0x18000000, 0x00000000,
40 	0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
41 	0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
42 	0x00000000, 0xc0000000, 0x00010000, 0x18000000,
43 	0x00000000, 0x02000000, 0x007c0000, 0x04200000,
44 	0x0b010000, 0x18000000, 0x00000000, 0x02000000,
45 	0x0c000000, 0x00000000, 0x02010000, 0x20000000,
46 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
47 	0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
48 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
49 	0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
50 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
51 	0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
52 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
53 	0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
54 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
55 	0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
56 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
57 	0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
58 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
59 	0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
60 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
61 	0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
62 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
63 	0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
64 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
65 	0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
66 	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
67 	0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
68 	0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
69 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
70 	0x0a000000, 0x04200080, 0x00010000, 0x18000000,
71 	0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
72 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
73 	0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
74 	0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
75 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
76 	0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
77 	0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
78 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
79 	0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
80 	0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
81 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
82 	0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
83 	0x00000000, 0x02000000, 0x00300000, 0x041000c0,
84 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
85 	0x10300000, 0x041000c0, 0x00010000, 0x18000000,
86 	0x00000000, 0x02000000, 0x20300000, 0x041000c0,
87 	0x00010000, 0x18000000, 0x00000000, 0x02000000,
88 	0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
89 	0x00000000, 0x02000000, 0x06010000, 0x1c000000,
90 	0x00000000, 0x02000000, 0x01000000, 0x00000200,
91 	0xff230200, 0x06010000, 0x1c000000, 0x00000000,
92 	0x02000000, 0x02000000, 0x00001000, 0x00000000,
93 	0x07010000, 0x18000000, 0x00000000, 0x02000000,
94 	0x00000000, 0x01000000, 0x07010000, 0x18000000,
95 	0x00000000, 0x02000000, 0x00000000, 0x02000000,
96 	0x07010000, 0x18000000, 0x00000000, 0x02000000,
97 	0x00000000, 0x03000000, 0x0d010000, 0x14000000,
98 	0x00000000, 0x02000000, 0x00000000, 0xff000000,
99 	0x10000000, 0x00000000, 0x00000080,
100 };
101 
102 static inline void __iomem *
103 qla27xx_isp_reg(struct scsi_qla_host *vha)
104 {
105 	return &vha->hw->iobase->isp24;
106 }
107 
108 static inline void
109 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
110 {
111 	if (buf) {
112 		buf += *len;
113 		*(__le16 *)buf = cpu_to_le16(value);
114 	}
115 	*len += sizeof(value);
116 }
117 
118 static inline void
119 qla27xx_insert32(uint32_t value, void *buf, ulong *len)
120 {
121 	if (buf) {
122 		buf += *len;
123 		*(__le32 *)buf = cpu_to_le32(value);
124 	}
125 	*len += sizeof(value);
126 }
127 
128 static inline void
129 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
130 {
131 
132 	if (buf && mem && size) {
133 		buf += *len;
134 		memcpy(buf, mem, size);
135 	}
136 	*len += size;
137 }
138 
139 static inline void
140 qla27xx_read8(void __iomem *window, void *buf, ulong *len)
141 {
142 	uint8_t value = ~0;
143 
144 	if (buf) {
145 		value = RD_REG_BYTE(window);
146 	}
147 	qla27xx_insert32(value, buf, len);
148 }
149 
150 static inline void
151 qla27xx_read16(void __iomem *window, void *buf, ulong *len)
152 {
153 	uint16_t value = ~0;
154 
155 	if (buf) {
156 		value = RD_REG_WORD(window);
157 	}
158 	qla27xx_insert32(value, buf, len);
159 }
160 
161 static inline void
162 qla27xx_read32(void __iomem *window, void *buf, ulong *len)
163 {
164 	uint32_t value = ~0;
165 
166 	if (buf) {
167 		value = RD_REG_DWORD(window);
168 	}
169 	qla27xx_insert32(value, buf, len);
170 }
171 
172 static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
173 {
174 	return
175 	    (width == 1) ? qla27xx_read8 :
176 	    (width == 2) ? qla27xx_read16 :
177 			   qla27xx_read32;
178 }
179 
180 static inline void
181 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
182 	uint offset, void *buf, ulong *len)
183 {
184 	void __iomem *window = (void __iomem *)reg + offset;
185 
186 	qla27xx_read32(window, buf, len);
187 }
188 
189 static inline void
190 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
191 	uint offset, uint32_t data, void *buf)
192 {
193 	__iomem void *window = (void __iomem *)reg + offset;
194 
195 	if (buf) {
196 		WRT_REG_DWORD(window, data);
197 	}
198 }
199 
200 static inline void
201 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
202 	uint32_t addr, uint offset, uint count, uint width, void *buf,
203 	ulong *len)
204 {
205 	void __iomem *window = (void __iomem *)reg + offset;
206 	void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
207 
208 	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
209 	while (count--) {
210 		qla27xx_insert32(addr, buf, len);
211 		readn(window, buf, len);
212 		window += width;
213 		addr++;
214 	}
215 }
216 
217 static inline void
218 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
219 {
220 	if (buf)
221 		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
222 }
223 
224 static int
225 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
226 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
227 {
228 	ql_dbg(ql_dbg_misc, vha, 0xd100,
229 	    "%s: nop [%lx]\n", __func__, *len);
230 	qla27xx_skip_entry(ent, buf);
231 
232 	return false;
233 }
234 
235 static int
236 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
237 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
238 {
239 	ql_dbg(ql_dbg_misc, vha, 0xd1ff,
240 	    "%s: end [%lx]\n", __func__, *len);
241 	qla27xx_skip_entry(ent, buf);
242 
243 	/* terminate */
244 	return true;
245 }
246 
247 static int
248 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
249 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
250 {
251 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
252 
253 	ql_dbg(ql_dbg_misc, vha, 0xd200,
254 	    "%s: rdio t1 [%lx]\n", __func__, *len);
255 	qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
256 	    ent->t256.reg_count, ent->t256.reg_width, buf, len);
257 
258 	return false;
259 }
260 
261 static int
262 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
263 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
264 {
265 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
266 
267 	ql_dbg(ql_dbg_misc, vha, 0xd201,
268 	    "%s: wrio t1 [%lx]\n", __func__, *len);
269 	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
270 	qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
271 
272 	return false;
273 }
274 
275 static int
276 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
277 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
278 {
279 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
280 
281 	ql_dbg(ql_dbg_misc, vha, 0xd202,
282 	    "%s: rdio t2 [%lx]\n", __func__, *len);
283 	qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
284 	qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
285 	    ent->t258.reg_count, ent->t258.reg_width, buf, len);
286 
287 	return false;
288 }
289 
290 static int
291 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
292 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
293 {
294 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
295 
296 	ql_dbg(ql_dbg_misc, vha, 0xd203,
297 	    "%s: wrio t2 [%lx]\n", __func__, *len);
298 	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
299 	qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
300 	qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
301 
302 	return false;
303 }
304 
305 static int
306 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
307 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
308 {
309 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
310 
311 	ql_dbg(ql_dbg_misc, vha, 0xd204,
312 	    "%s: rdpci [%lx]\n", __func__, *len);
313 	qla27xx_insert32(ent->t260.pci_offset, buf, len);
314 	qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
315 
316 	return false;
317 }
318 
319 static int
320 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
321 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
322 {
323 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
324 
325 	ql_dbg(ql_dbg_misc, vha, 0xd205,
326 	    "%s: wrpci [%lx]\n", __func__, *len);
327 	qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
328 
329 	return false;
330 }
331 
332 static int
333 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
334 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
335 {
336 	ulong dwords;
337 	ulong start;
338 	ulong end;
339 
340 	ql_dbg(ql_dbg_misc, vha, 0xd206,
341 	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
342 	start = ent->t262.start_addr;
343 	end = ent->t262.end_addr;
344 
345 	if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
346 		;
347 	} else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
348 		end = vha->hw->fw_memory_size;
349 		if (buf)
350 			ent->t262.end_addr = end;
351 	} else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
352 		start = vha->hw->fw_shared_ram_start;
353 		end = vha->hw->fw_shared_ram_end;
354 		if (buf) {
355 			ent->t262.start_addr = start;
356 			ent->t262.end_addr = end;
357 		}
358 	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
359 		start = vha->hw->fw_ddr_ram_start;
360 		end = vha->hw->fw_ddr_ram_end;
361 		if (buf) {
362 			ent->t262.start_addr = start;
363 			ent->t262.end_addr = end;
364 		}
365 	} else {
366 		ql_dbg(ql_dbg_misc, vha, 0xd022,
367 		    "%s: unknown area %x\n", __func__, ent->t262.ram_area);
368 		qla27xx_skip_entry(ent, buf);
369 		goto done;
370 	}
371 
372 	if (end < start || start == 0 || end == 0) {
373 		ql_dbg(ql_dbg_misc, vha, 0xd023,
374 		    "%s: unusable range (start=%x end=%x)\n", __func__,
375 		    ent->t262.end_addr, ent->t262.start_addr);
376 		qla27xx_skip_entry(ent, buf);
377 		goto done;
378 	}
379 
380 	dwords = end - start + 1;
381 	if (buf) {
382 		buf += *len;
383 		qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
384 	}
385 	*len += dwords * sizeof(uint32_t);
386 done:
387 	return false;
388 }
389 
390 static int
391 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
392 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
393 {
394 	uint count = 0;
395 	uint i;
396 	uint length;
397 
398 	ql_dbg(ql_dbg_misc, vha, 0xd207,
399 	    "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
400 	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
401 		for (i = 0; i < vha->hw->max_req_queues; i++) {
402 			struct req_que *req = vha->hw->req_q_map[i];
403 
404 			if (req || !buf) {
405 				length = req ?
406 				    req->length : REQUEST_ENTRY_CNT_24XX;
407 				qla27xx_insert16(i, buf, len);
408 				qla27xx_insert16(length, buf, len);
409 				qla27xx_insertbuf(req ? req->ring : NULL,
410 				    length * sizeof(*req->ring), buf, len);
411 				count++;
412 			}
413 		}
414 	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
415 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
416 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
417 
418 			if (rsp || !buf) {
419 				length = rsp ?
420 				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
421 				qla27xx_insert16(i, buf, len);
422 				qla27xx_insert16(length, buf, len);
423 				qla27xx_insertbuf(rsp ? rsp->ring : NULL,
424 				    length * sizeof(*rsp->ring), buf, len);
425 				count++;
426 			}
427 		}
428 	} else if (QLA_TGT_MODE_ENABLED() &&
429 	    ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
430 		struct qla_hw_data *ha = vha->hw;
431 		struct atio *atr = ha->tgt.atio_ring;
432 
433 		if (atr || !buf) {
434 			length = ha->tgt.atio_q_length;
435 			qla27xx_insert16(0, buf, len);
436 			qla27xx_insert16(length, buf, len);
437 			qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
438 			count++;
439 		}
440 	} else {
441 		ql_dbg(ql_dbg_misc, vha, 0xd026,
442 		    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
443 		qla27xx_skip_entry(ent, buf);
444 	}
445 
446 	if (buf) {
447 		if (count)
448 			ent->t263.num_queues = count;
449 		else
450 			qla27xx_skip_entry(ent, buf);
451 	}
452 
453 	return false;
454 }
455 
456 static int
457 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
458 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
459 {
460 	ql_dbg(ql_dbg_misc, vha, 0xd208,
461 	    "%s: getfce [%lx]\n", __func__, *len);
462 	if (vha->hw->fce) {
463 		if (buf) {
464 			ent->t264.fce_trace_size = FCE_SIZE;
465 			ent->t264.write_pointer = vha->hw->fce_wr;
466 			ent->t264.base_pointer = vha->hw->fce_dma;
467 			ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
468 			ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
469 			ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
470 			ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
471 			ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
472 			ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
473 		}
474 		qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
475 	} else {
476 		ql_dbg(ql_dbg_misc, vha, 0xd027,
477 		    "%s: missing fce\n", __func__);
478 		qla27xx_skip_entry(ent, buf);
479 	}
480 
481 	return false;
482 }
483 
484 static int
485 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
486 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
487 {
488 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
489 
490 	ql_dbg(ql_dbg_misc, vha, 0xd209,
491 	    "%s: pause risc [%lx]\n", __func__, *len);
492 	if (buf)
493 		qla24xx_pause_risc(reg, vha->hw);
494 
495 	return false;
496 }
497 
498 static int
499 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
500 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
501 {
502 	ql_dbg(ql_dbg_misc, vha, 0xd20a,
503 	    "%s: reset risc [%lx]\n", __func__, *len);
504 	if (buf)
505 		qla24xx_soft_reset(vha->hw);
506 
507 	return false;
508 }
509 
510 static int
511 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
512 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
513 {
514 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
515 
516 	ql_dbg(ql_dbg_misc, vha, 0xd20b,
517 	    "%s: dis intr [%lx]\n", __func__, *len);
518 	qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
519 
520 	return false;
521 }
522 
523 static int
524 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
525 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
526 {
527 	ql_dbg(ql_dbg_misc, vha, 0xd20c,
528 	    "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
529 	switch (ent->t268.buf_type) {
530 	case T268_BUF_TYPE_EXTD_TRACE:
531 		if (vha->hw->eft) {
532 			if (buf) {
533 				ent->t268.buf_size = EFT_SIZE;
534 				ent->t268.start_addr = vha->hw->eft_dma;
535 			}
536 			qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
537 		} else {
538 			ql_dbg(ql_dbg_misc, vha, 0xd028,
539 			    "%s: missing eft\n", __func__);
540 			qla27xx_skip_entry(ent, buf);
541 		}
542 		break;
543 	case T268_BUF_TYPE_EXCH_BUFOFF:
544 		if (vha->hw->exchoffld_buf) {
545 			if (buf) {
546 				ent->t268.buf_size = vha->hw->exchoffld_size;
547 				ent->t268.start_addr =
548 					vha->hw->exchoffld_buf_dma;
549 			}
550 			qla27xx_insertbuf(vha->hw->exchoffld_buf,
551 			    vha->hw->exchoffld_size, buf, len);
552 		} else {
553 			ql_dbg(ql_dbg_misc, vha, 0xd028,
554 			    "%s: missing exch offld\n", __func__);
555 			qla27xx_skip_entry(ent, buf);
556 		}
557 		break;
558 	case T268_BUF_TYPE_EXTD_LOGIN:
559 		if (vha->hw->exlogin_buf) {
560 			if (buf) {
561 				ent->t268.buf_size = vha->hw->exlogin_size;
562 				ent->t268.start_addr =
563 					vha->hw->exlogin_buf_dma;
564 			}
565 			qla27xx_insertbuf(vha->hw->exlogin_buf,
566 			    vha->hw->exlogin_size, buf, len);
567 		} else {
568 			ql_dbg(ql_dbg_misc, vha, 0xd028,
569 			    "%s: missing ext login\n", __func__);
570 			qla27xx_skip_entry(ent, buf);
571 		}
572 		break;
573 
574 	case T268_BUF_TYPE_REQ_MIRROR:
575 	case T268_BUF_TYPE_RSP_MIRROR:
576 		/*
577 		 * Mirror pointers are not implemented in the
578 		 * driver, instead shadow pointers are used by
579 		 * the drier. Skip these entries.
580 		 */
581 		qla27xx_skip_entry(ent, buf);
582 		break;
583 	default:
584 		ql_dbg(ql_dbg_async, vha, 0xd02b,
585 		    "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
586 		qla27xx_skip_entry(ent, buf);
587 		break;
588 	}
589 
590 	return false;
591 }
592 
593 static int
594 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
595 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
596 {
597 	ql_dbg(ql_dbg_misc, vha, 0xd20d,
598 	    "%s: scratch [%lx]\n", __func__, *len);
599 	qla27xx_insert32(0xaaaaaaaa, buf, len);
600 	qla27xx_insert32(0xbbbbbbbb, buf, len);
601 	qla27xx_insert32(0xcccccccc, buf, len);
602 	qla27xx_insert32(0xdddddddd, buf, len);
603 	qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
604 	if (buf)
605 		ent->t269.scratch_size = 5 * sizeof(uint32_t);
606 
607 	return false;
608 }
609 
610 static int
611 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
612 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
613 {
614 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
615 	ulong dwords = ent->t270.count;
616 	ulong addr = ent->t270.addr;
617 
618 	ql_dbg(ql_dbg_misc, vha, 0xd20e,
619 	    "%s: rdremreg [%lx]\n", __func__, *len);
620 	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
621 	while (dwords--) {
622 		qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
623 		qla27xx_insert32(addr, buf, len);
624 		qla27xx_read_reg(reg, 0xc4, buf, len);
625 		addr += sizeof(uint32_t);
626 	}
627 
628 	return false;
629 }
630 
631 static int
632 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
633 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
634 {
635 	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
636 	ulong addr = ent->t271.addr;
637 	ulong data = ent->t271.data;
638 
639 	ql_dbg(ql_dbg_misc, vha, 0xd20f,
640 	    "%s: wrremreg [%lx]\n", __func__, *len);
641 	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
642 	qla27xx_write_reg(reg, 0xc4, data, buf);
643 	qla27xx_write_reg(reg, 0xc0, addr, buf);
644 
645 	return false;
646 }
647 
648 static int
649 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
650 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
651 {
652 	ulong dwords = ent->t272.count;
653 	ulong start = ent->t272.addr;
654 
655 	ql_dbg(ql_dbg_misc, vha, 0xd210,
656 	    "%s: rdremram [%lx]\n", __func__, *len);
657 	if (buf) {
658 		ql_dbg(ql_dbg_misc, vha, 0xd02c,
659 		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
660 		buf += *len;
661 		qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
662 	}
663 	*len += dwords * sizeof(uint32_t);
664 
665 	return false;
666 }
667 
668 static int
669 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
670 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
671 {
672 	ulong dwords = ent->t273.count;
673 	ulong addr = ent->t273.addr;
674 	uint32_t value;
675 
676 	ql_dbg(ql_dbg_misc, vha, 0xd211,
677 	    "%s: pcicfg [%lx]\n", __func__, *len);
678 	while (dwords--) {
679 		value = ~0;
680 		if (pci_read_config_dword(vha->hw->pdev, addr, &value))
681 			ql_dbg(ql_dbg_misc, vha, 0xd02d,
682 			    "%s: failed pcicfg read at %lx\n", __func__, addr);
683 		qla27xx_insert32(addr, buf, len);
684 		qla27xx_insert32(value, buf, len);
685 		addr += sizeof(uint32_t);
686 	}
687 
688 	return false;
689 }
690 
691 static int
692 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
693 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
694 {
695 	uint count = 0;
696 	uint i;
697 
698 	ql_dbg(ql_dbg_misc, vha, 0xd212,
699 	    "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
700 	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
701 		for (i = 0; i < vha->hw->max_req_queues; i++) {
702 			struct req_que *req = vha->hw->req_q_map[i];
703 
704 			if (req || !buf) {
705 				qla27xx_insert16(i, buf, len);
706 				qla27xx_insert16(1, buf, len);
707 				qla27xx_insert32(req && req->out_ptr ?
708 				    *req->out_ptr : 0, buf, len);
709 				count++;
710 			}
711 		}
712 	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
713 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
714 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
715 
716 			if (rsp || !buf) {
717 				qla27xx_insert16(i, buf, len);
718 				qla27xx_insert16(1, buf, len);
719 				qla27xx_insert32(rsp && rsp->in_ptr ?
720 				    *rsp->in_ptr : 0, buf, len);
721 				count++;
722 			}
723 		}
724 	} else if (QLA_TGT_MODE_ENABLED() &&
725 	    ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
726 		struct qla_hw_data *ha = vha->hw;
727 		struct atio *atr = ha->tgt.atio_ring_ptr;
728 
729 		if (atr || !buf) {
730 			qla27xx_insert16(0, buf, len);
731 			qla27xx_insert16(1, buf, len);
732 			qla27xx_insert32(ha->tgt.atio_q_in ?
733 			    readl(ha->tgt.atio_q_in) : 0, buf, len);
734 			count++;
735 		}
736 	} else {
737 		ql_dbg(ql_dbg_misc, vha, 0xd02f,
738 		    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
739 		qla27xx_skip_entry(ent, buf);
740 	}
741 
742 	if (buf) {
743 		if (count)
744 			ent->t274.num_queues = count;
745 		else
746 			qla27xx_skip_entry(ent, buf);
747 	}
748 
749 	return false;
750 }
751 
752 static int
753 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
754 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
755 {
756 	ulong offset = offsetof(typeof(*ent), t275.buffer);
757 
758 	ql_dbg(ql_dbg_misc, vha, 0xd213,
759 	    "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
760 	if (!ent->t275.length) {
761 		ql_dbg(ql_dbg_misc, vha, 0xd020,
762 		    "%s: buffer zero length\n", __func__);
763 		qla27xx_skip_entry(ent, buf);
764 		goto done;
765 	}
766 	if (offset + ent->t275.length > ent->hdr.entry_size) {
767 		ql_dbg(ql_dbg_misc, vha, 0xd030,
768 		    "%s: buffer overflow\n", __func__);
769 		qla27xx_skip_entry(ent, buf);
770 		goto done;
771 	}
772 
773 	qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
774 done:
775 	return false;
776 }
777 
778 static int
779 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
780 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
781 {
782 	ql_dbg(ql_dbg_misc, vha, 0xd2ff,
783 	    "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
784 	qla27xx_skip_entry(ent, buf);
785 
786 	return false;
787 }
788 
789 struct qla27xx_fwdt_entry_call {
790 	uint type;
791 	int (*call)(
792 	    struct scsi_qla_host *,
793 	    struct qla27xx_fwdt_entry *,
794 	    void *,
795 	    ulong *);
796 };
797 
798 static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
799 	{ ENTRY_TYPE_NOP		, qla27xx_fwdt_entry_t0    } ,
800 	{ ENTRY_TYPE_TMP_END		, qla27xx_fwdt_entry_t255  } ,
801 	{ ENTRY_TYPE_RD_IOB_T1		, qla27xx_fwdt_entry_t256  } ,
802 	{ ENTRY_TYPE_WR_IOB_T1		, qla27xx_fwdt_entry_t257  } ,
803 	{ ENTRY_TYPE_RD_IOB_T2		, qla27xx_fwdt_entry_t258  } ,
804 	{ ENTRY_TYPE_WR_IOB_T2		, qla27xx_fwdt_entry_t259  } ,
805 	{ ENTRY_TYPE_RD_PCI		, qla27xx_fwdt_entry_t260  } ,
806 	{ ENTRY_TYPE_WR_PCI		, qla27xx_fwdt_entry_t261  } ,
807 	{ ENTRY_TYPE_RD_RAM		, qla27xx_fwdt_entry_t262  } ,
808 	{ ENTRY_TYPE_GET_QUEUE		, qla27xx_fwdt_entry_t263  } ,
809 	{ ENTRY_TYPE_GET_FCE		, qla27xx_fwdt_entry_t264  } ,
810 	{ ENTRY_TYPE_PSE_RISC		, qla27xx_fwdt_entry_t265  } ,
811 	{ ENTRY_TYPE_RST_RISC		, qla27xx_fwdt_entry_t266  } ,
812 	{ ENTRY_TYPE_DIS_INTR		, qla27xx_fwdt_entry_t267  } ,
813 	{ ENTRY_TYPE_GET_HBUF		, qla27xx_fwdt_entry_t268  } ,
814 	{ ENTRY_TYPE_SCRATCH		, qla27xx_fwdt_entry_t269  } ,
815 	{ ENTRY_TYPE_RDREMREG		, qla27xx_fwdt_entry_t270  } ,
816 	{ ENTRY_TYPE_WRREMREG		, qla27xx_fwdt_entry_t271  } ,
817 	{ ENTRY_TYPE_RDREMRAM		, qla27xx_fwdt_entry_t272  } ,
818 	{ ENTRY_TYPE_PCICFG		, qla27xx_fwdt_entry_t273  } ,
819 	{ ENTRY_TYPE_GET_SHADOW		, qla27xx_fwdt_entry_t274  } ,
820 	{ ENTRY_TYPE_WRITE_BUF		, qla27xx_fwdt_entry_t275  } ,
821 	{ -1				, qla27xx_fwdt_entry_other }
822 };
823 
824 static inline int (*qla27xx_find_entry(uint type))
825 	(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
826 {
827 	struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
828 
829 	while (list->type < type)
830 		list++;
831 
832 	if (list->type == type)
833 		return list->call;
834 	return qla27xx_fwdt_entry_other;
835 }
836 
837 static inline void *
838 qla27xx_next_entry(void *p)
839 {
840 	struct qla27xx_fwdt_entry *ent = p;
841 
842 	return p + ent->hdr.entry_size;
843 }
844 
845 static void
846 qla27xx_walk_template(struct scsi_qla_host *vha,
847 	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
848 {
849 	struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
850 	ulong count = tmp->entry_count;
851 
852 	ql_dbg(ql_dbg_misc, vha, 0xd01a,
853 	    "%s: entry count %lx\n", __func__, count);
854 	while (count--) {
855 		if (buf && *len >= vha->hw->fw_dump_len)
856 			break;
857 		if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
858 			break;
859 		ent = qla27xx_next_entry(ent);
860 	}
861 
862 	if (count)
863 		ql_dbg(ql_dbg_misc, vha, 0xd018,
864 		    "%s: entry residual count (%lx)\n", __func__, count);
865 
866 	if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
867 		ql_dbg(ql_dbg_misc, vha, 0xd019,
868 		    "%s: missing end entry (%lx)\n", __func__, count);
869 
870 	if (buf && *len != vha->hw->fw_dump_len)
871 		ql_dbg(ql_dbg_misc, vha, 0xd01b,
872 		    "%s: length=%#lx residual=%+ld\n",
873 		    __func__, *len, vha->hw->fw_dump_len - *len);
874 
875 	if (buf) {
876 		ql_log(ql_log_warn, vha, 0xd015,
877 		    "Firmware dump saved to temp buffer (%lu/%p)\n",
878 		    vha->host_no, vha->hw->fw_dump);
879 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
880 	}
881 }
882 
883 static void
884 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
885 {
886 	tmp->capture_timestamp = jiffies;
887 }
888 
889 static void
890 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
891 {
892 	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
893 
894 	sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
895 	    v+0, v+1, v+2, v+3, v+4, v+5);
896 
897 	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
898 	tmp->driver_info[1] = v[5] << 8 | v[4];
899 	tmp->driver_info[2] = 0x12345678;
900 }
901 
902 static void
903 qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
904 	struct scsi_qla_host *vha)
905 {
906 	tmp->firmware_version[0] = vha->hw->fw_major_version;
907 	tmp->firmware_version[1] = vha->hw->fw_minor_version;
908 	tmp->firmware_version[2] = vha->hw->fw_subminor_version;
909 	tmp->firmware_version[3] =
910 	    vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
911 	tmp->firmware_version[4] =
912 	    vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
913 }
914 
915 static void
916 ql27xx_edit_template(struct scsi_qla_host *vha,
917 	struct qla27xx_fwdt_template *tmp)
918 {
919 	qla27xx_time_stamp(tmp);
920 	qla27xx_driver_info(tmp);
921 	qla27xx_firmware_info(tmp, vha);
922 }
923 
924 static inline uint32_t
925 qla27xx_template_checksum(void *p, ulong size)
926 {
927 	uint32_t *buf = p;
928 	uint64_t sum = 0;
929 
930 	size /= sizeof(*buf);
931 
932 	while (size--)
933 		sum += *buf++;
934 
935 	sum = (sum & 0xffffffff) + (sum >> 32);
936 
937 	return ~sum;
938 }
939 
940 static inline int
941 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
942 {
943 	return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
944 }
945 
946 static inline int
947 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
948 {
949 	return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
950 }
951 
952 static void
953 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
954 {
955 	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
956 	ulong len;
957 
958 	if (qla27xx_fwdt_template_valid(tmp)) {
959 		len = tmp->template_size;
960 		tmp = memcpy(vha->hw->fw_dump, tmp, len);
961 		ql27xx_edit_template(vha, tmp);
962 		qla27xx_walk_template(vha, tmp, tmp, &len);
963 		vha->hw->fw_dump_len = len;
964 		vha->hw->fw_dumped = 1;
965 	}
966 }
967 
968 ulong
969 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
970 {
971 	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
972 	ulong len = 0;
973 
974 	if (qla27xx_fwdt_template_valid(tmp)) {
975 		len = tmp->template_size;
976 		qla27xx_walk_template(vha, tmp, NULL, &len);
977 	}
978 
979 	return len;
980 }
981 
982 ulong
983 qla27xx_fwdt_template_size(void *p)
984 {
985 	struct qla27xx_fwdt_template *tmp = p;
986 
987 	return tmp->template_size;
988 }
989 
990 ulong
991 qla27xx_fwdt_template_default_size(void)
992 {
993 	return sizeof(ql27xx_fwdt_default_template);
994 }
995 
996 const void *
997 qla27xx_fwdt_template_default(void)
998 {
999 	return ql27xx_fwdt_default_template;
1000 }
1001 
1002 int
1003 qla27xx_fwdt_template_valid(void *p)
1004 {
1005 	struct qla27xx_fwdt_template *tmp = p;
1006 
1007 	if (!qla27xx_verify_template_header(tmp)) {
1008 		ql_log(ql_log_warn, NULL, 0xd01c,
1009 		    "%s: template type %x\n", __func__, tmp->template_type);
1010 		return false;
1011 	}
1012 
1013 	if (!qla27xx_verify_template_checksum(tmp)) {
1014 		ql_log(ql_log_warn, NULL, 0xd01d,
1015 		    "%s: failed template checksum\n", __func__);
1016 		return false;
1017 	}
1018 
1019 	return true;
1020 }
1021 
1022 void
1023 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1024 {
1025 	ulong flags = 0;
1026 
1027 #ifndef __CHECKER__
1028 	if (!hardware_locked)
1029 		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1030 #endif
1031 
1032 	if (!vha->hw->fw_dump)
1033 		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
1034 	else if (!vha->hw->fw_dump_template)
1035 		ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
1036 	else if (vha->hw->fw_dumped)
1037 		ql_log(ql_log_warn, vha, 0xd300,
1038 		    "Firmware has been previously dumped (%p),"
1039 		    " -- ignoring request\n", vha->hw->fw_dump);
1040 	else {
1041 		QLA_FW_STOPPED(vha->hw);
1042 		qla27xx_execute_fwdt_template(vha);
1043 	}
1044 
1045 #ifndef __CHECKER__
1046 	if (!hardware_locked)
1047 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1048 #endif
1049 }
1050