xref: /openbmc/linux/drivers/scsi/esp_scsi.c (revision b3d9fc14)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* esp_scsi.c: ESP SCSI driver.
3  *
4  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/list.h>
12 #include <linux/completion.h>
13 #include <linux/kallsyms.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/irqreturn.h>
18 
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include <asm/dma.h>
22 
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_tcq.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_transport_spi.h>
30 
31 #include "esp_scsi.h"
32 
33 #define DRV_MODULE_NAME		"esp"
34 #define PFX DRV_MODULE_NAME	": "
35 #define DRV_VERSION		"2.000"
36 #define DRV_MODULE_RELDATE	"April 19, 2007"
37 
38 /* SCSI bus reset settle time in seconds.  */
39 static int esp_bus_reset_settle = 3;
40 
41 static u32 esp_debug;
42 #define ESP_DEBUG_INTR		0x00000001
43 #define ESP_DEBUG_SCSICMD	0x00000002
44 #define ESP_DEBUG_RESET		0x00000004
45 #define ESP_DEBUG_MSGIN		0x00000008
46 #define ESP_DEBUG_MSGOUT	0x00000010
47 #define ESP_DEBUG_CMDDONE	0x00000020
48 #define ESP_DEBUG_DISCONNECT	0x00000040
49 #define ESP_DEBUG_DATASTART	0x00000080
50 #define ESP_DEBUG_DATADONE	0x00000100
51 #define ESP_DEBUG_RECONNECT	0x00000200
52 #define ESP_DEBUG_AUTOSENSE	0x00000400
53 #define ESP_DEBUG_EVENT		0x00000800
54 #define ESP_DEBUG_COMMAND	0x00001000
55 
56 #define esp_log_intr(f, a...) \
57 do {	if (esp_debug & ESP_DEBUG_INTR) \
58 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
59 } while (0)
60 
61 #define esp_log_reset(f, a...) \
62 do {	if (esp_debug & ESP_DEBUG_RESET) \
63 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
64 } while (0)
65 
66 #define esp_log_msgin(f, a...) \
67 do {	if (esp_debug & ESP_DEBUG_MSGIN) \
68 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
69 } while (0)
70 
71 #define esp_log_msgout(f, a...) \
72 do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
73 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
74 } while (0)
75 
76 #define esp_log_cmddone(f, a...) \
77 do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
78 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
79 } while (0)
80 
81 #define esp_log_disconnect(f, a...) \
82 do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
83 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
84 } while (0)
85 
86 #define esp_log_datastart(f, a...) \
87 do {	if (esp_debug & ESP_DEBUG_DATASTART) \
88 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
89 } while (0)
90 
91 #define esp_log_datadone(f, a...) \
92 do {	if (esp_debug & ESP_DEBUG_DATADONE) \
93 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
94 } while (0)
95 
96 #define esp_log_reconnect(f, a...) \
97 do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
98 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
99 } while (0)
100 
101 #define esp_log_autosense(f, a...) \
102 do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
103 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
104 } while (0)
105 
106 #define esp_log_event(f, a...) \
107 do {   if (esp_debug & ESP_DEBUG_EVENT)	\
108 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
109 } while (0)
110 
111 #define esp_log_command(f, a...) \
112 do {   if (esp_debug & ESP_DEBUG_COMMAND)	\
113 		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
114 } while (0)
115 
116 #define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
117 #define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
118 
119 static void esp_log_fill_regs(struct esp *esp,
120 			      struct esp_event_ent *p)
121 {
122 	p->sreg = esp->sreg;
123 	p->seqreg = esp->seqreg;
124 	p->sreg2 = esp->sreg2;
125 	p->ireg = esp->ireg;
126 	p->select_state = esp->select_state;
127 	p->event = esp->event;
128 }
129 
130 void scsi_esp_cmd(struct esp *esp, u8 val)
131 {
132 	struct esp_event_ent *p;
133 	int idx = esp->esp_event_cur;
134 
135 	p = &esp->esp_event_log[idx];
136 	p->type = ESP_EVENT_TYPE_CMD;
137 	p->val = val;
138 	esp_log_fill_regs(esp, p);
139 
140 	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141 
142 	esp_log_command("cmd[%02x]\n", val);
143 	esp_write8(val, ESP_CMD);
144 }
145 EXPORT_SYMBOL(scsi_esp_cmd);
146 
147 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148 {
149 	if (esp->flags & ESP_FLAG_USE_FIFO) {
150 		int i;
151 
152 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
153 		for (i = 0; i < len; i++)
154 			esp_write8(esp->command_block[i], ESP_FDATA);
155 		scsi_esp_cmd(esp, cmd);
156 	} else {
157 		if (esp->rev == FASHME)
158 			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159 		cmd |= ESP_CMD_DMA;
160 		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
161 				       len, max_len, 0, cmd);
162 	}
163 }
164 
165 static void esp_event(struct esp *esp, u8 val)
166 {
167 	struct esp_event_ent *p;
168 	int idx = esp->esp_event_cur;
169 
170 	p = &esp->esp_event_log[idx];
171 	p->type = ESP_EVENT_TYPE_EVENT;
172 	p->val = val;
173 	esp_log_fill_regs(esp, p);
174 
175 	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
176 
177 	esp->event = val;
178 }
179 
180 static void esp_dump_cmd_log(struct esp *esp)
181 {
182 	int idx = esp->esp_event_cur;
183 	int stop = idx;
184 
185 	shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
186 	do {
187 		struct esp_event_ent *p = &esp->esp_event_log[idx];
188 
189 		shost_printk(KERN_INFO, esp->host,
190 			     "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
191 			     "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192 			     idx,
193 			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
194 			     p->val, p->sreg, p->seqreg,
195 			     p->sreg2, p->ireg, p->select_state, p->event);
196 
197 		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
198 	} while (idx != stop);
199 }
200 
201 static void esp_flush_fifo(struct esp *esp)
202 {
203 	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 	if (esp->rev == ESP236) {
205 		int lim = 1000;
206 
207 		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208 			if (--lim == 0) {
209 				shost_printk(KERN_ALERT, esp->host,
210 					     "ESP_FF_BYTES will not clear!\n");
211 				break;
212 			}
213 			udelay(1);
214 		}
215 	}
216 }
217 
218 static void hme_read_fifo(struct esp *esp)
219 {
220 	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
221 	int idx = 0;
222 
223 	while (fcnt--) {
224 		esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 		esp->fifo[idx++] = esp_read8(ESP_FDATA);
226 	}
227 	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
228 		esp_write8(0, ESP_FDATA);
229 		esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
231 	}
232 	esp->fifo_cnt = idx;
233 }
234 
235 static void esp_set_all_config3(struct esp *esp, u8 val)
236 {
237 	int i;
238 
239 	for (i = 0; i < ESP_MAX_TARGET; i++)
240 		esp->target[i].esp_config3 = val;
241 }
242 
243 /* Reset the ESP chip, _not_ the SCSI bus. */
244 static void esp_reset_esp(struct esp *esp)
245 {
246 	/* Now reset the ESP chip */
247 	scsi_esp_cmd(esp, ESP_CMD_RC);
248 	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
249 	if (esp->rev == FAST)
250 		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
251 	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
252 
253 	/* This is the only point at which it is reliable to read
254 	 * the ID-code for a fast ESP chip variants.
255 	 */
256 	esp->max_period = ((35 * esp->ccycle) / 1000);
257 	if (esp->rev == FAST) {
258 		u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
259 
260 		if (family_code == ESP_UID_F236) {
261 			esp->rev = FAS236;
262 		} else if (family_code == ESP_UID_HME) {
263 			esp->rev = FASHME; /* Version is usually '5'. */
264 		} else if (family_code == ESP_UID_FSC) {
265 			esp->rev = FSC;
266 			/* Enable Active Negation */
267 			esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
268 		} else {
269 			esp->rev = FAS100A;
270 		}
271 		esp->min_period = ((4 * esp->ccycle) / 1000);
272 	} else {
273 		esp->min_period = ((5 * esp->ccycle) / 1000);
274 	}
275 	if (esp->rev == FAS236) {
276 		/*
277 		 * The AM53c974 chip returns the same ID as FAS236;
278 		 * try to configure glitch eater.
279 		 */
280 		u8 config4 = ESP_CONFIG4_GE1;
281 		esp_write8(config4, ESP_CFG4);
282 		config4 = esp_read8(ESP_CFG4);
283 		if (config4 & ESP_CONFIG4_GE1) {
284 			esp->rev = PCSCSI;
285 			esp_write8(esp->config4, ESP_CFG4);
286 		}
287 	}
288 	esp->max_period = (esp->max_period + 3)>>2;
289 	esp->min_period = (esp->min_period + 3)>>2;
290 
291 	esp_write8(esp->config1, ESP_CFG1);
292 	switch (esp->rev) {
293 	case ESP100:
294 		/* nothing to do */
295 		break;
296 
297 	case ESP100A:
298 		esp_write8(esp->config2, ESP_CFG2);
299 		break;
300 
301 	case ESP236:
302 		/* Slow 236 */
303 		esp_write8(esp->config2, ESP_CFG2);
304 		esp->prev_cfg3 = esp->target[0].esp_config3;
305 		esp_write8(esp->prev_cfg3, ESP_CFG3);
306 		break;
307 
308 	case FASHME:
309 		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
310 		fallthrough;
311 
312 	case FAS236:
313 	case PCSCSI:
314 	case FSC:
315 		esp_write8(esp->config2, ESP_CFG2);
316 		if (esp->rev == FASHME) {
317 			u8 cfg3 = esp->target[0].esp_config3;
318 
319 			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
320 			if (esp->scsi_id >= 8)
321 				cfg3 |= ESP_CONFIG3_IDBIT3;
322 			esp_set_all_config3(esp, cfg3);
323 		} else {
324 			u32 cfg3 = esp->target[0].esp_config3;
325 
326 			cfg3 |= ESP_CONFIG3_FCLK;
327 			esp_set_all_config3(esp, cfg3);
328 		}
329 		esp->prev_cfg3 = esp->target[0].esp_config3;
330 		esp_write8(esp->prev_cfg3, ESP_CFG3);
331 		if (esp->rev == FASHME) {
332 			esp->radelay = 80;
333 		} else {
334 			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
335 				esp->radelay = 0;
336 			else
337 				esp->radelay = 96;
338 		}
339 		break;
340 
341 	case FAS100A:
342 		/* Fast 100a */
343 		esp_write8(esp->config2, ESP_CFG2);
344 		esp_set_all_config3(esp,
345 				    (esp->target[0].esp_config3 |
346 				     ESP_CONFIG3_FCLOCK));
347 		esp->prev_cfg3 = esp->target[0].esp_config3;
348 		esp_write8(esp->prev_cfg3, ESP_CFG3);
349 		esp->radelay = 32;
350 		break;
351 
352 	default:
353 		break;
354 	}
355 
356 	/* Reload the configuration registers */
357 	esp_write8(esp->cfact, ESP_CFACT);
358 
359 	esp->prev_stp = 0;
360 	esp_write8(esp->prev_stp, ESP_STP);
361 
362 	esp->prev_soff = 0;
363 	esp_write8(esp->prev_soff, ESP_SOFF);
364 
365 	esp_write8(esp->neg_defp, ESP_TIMEO);
366 
367 	/* Eat any bitrot in the chip */
368 	esp_read8(ESP_INTRPT);
369 	udelay(100);
370 }
371 
372 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
373 {
374 	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
375 	struct scatterlist *sg = scsi_sglist(cmd);
376 	int total = 0, i;
377 	struct scatterlist *s;
378 
379 	if (cmd->sc_data_direction == DMA_NONE)
380 		return;
381 
382 	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
383 		/*
384 		 * For pseudo DMA and PIO we need the virtual address instead of
385 		 * a dma address, so perform an identity mapping.
386 		 */
387 		spriv->num_sg = scsi_sg_count(cmd);
388 
389 		scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
390 			s->dma_address = (uintptr_t)sg_virt(s);
391 			total += sg_dma_len(s);
392 		}
393 	} else {
394 		spriv->num_sg = scsi_dma_map(cmd);
395 		scsi_for_each_sg(cmd, s, spriv->num_sg, i)
396 			total += sg_dma_len(s);
397 	}
398 	spriv->cur_residue = sg_dma_len(sg);
399 	spriv->prv_sg = NULL;
400 	spriv->cur_sg = sg;
401 	spriv->tot_residue = total;
402 }
403 
404 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
405 				   struct scsi_cmnd *cmd)
406 {
407 	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
408 
409 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
410 		return ent->sense_dma +
411 			(ent->sense_ptr - cmd->sense_buffer);
412 	}
413 
414 	return sg_dma_address(p->cur_sg) +
415 		(sg_dma_len(p->cur_sg) -
416 		 p->cur_residue);
417 }
418 
419 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
420 				    struct scsi_cmnd *cmd)
421 {
422 	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
423 
424 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
425 		return SCSI_SENSE_BUFFERSIZE -
426 			(ent->sense_ptr - cmd->sense_buffer);
427 	}
428 	return p->cur_residue;
429 }
430 
431 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
432 			    struct scsi_cmnd *cmd, unsigned int len)
433 {
434 	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
435 
436 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
437 		ent->sense_ptr += len;
438 		return;
439 	}
440 
441 	p->cur_residue -= len;
442 	p->tot_residue -= len;
443 	if (p->cur_residue < 0 || p->tot_residue < 0) {
444 		shost_printk(KERN_ERR, esp->host,
445 			     "Data transfer overflow.\n");
446 		shost_printk(KERN_ERR, esp->host,
447 			     "cur_residue[%d] tot_residue[%d] len[%u]\n",
448 			     p->cur_residue, p->tot_residue, len);
449 		p->cur_residue = 0;
450 		p->tot_residue = 0;
451 	}
452 	if (!p->cur_residue && p->tot_residue) {
453 		p->prv_sg = p->cur_sg;
454 		p->cur_sg = sg_next(p->cur_sg);
455 		p->cur_residue = sg_dma_len(p->cur_sg);
456 	}
457 }
458 
459 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
460 {
461 	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
462 		scsi_dma_unmap(cmd);
463 }
464 
465 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
466 {
467 	struct scsi_cmnd *cmd = ent->cmd;
468 	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
469 
470 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
471 		ent->saved_sense_ptr = ent->sense_ptr;
472 		return;
473 	}
474 	ent->saved_cur_residue = spriv->cur_residue;
475 	ent->saved_prv_sg = spriv->prv_sg;
476 	ent->saved_cur_sg = spriv->cur_sg;
477 	ent->saved_tot_residue = spriv->tot_residue;
478 }
479 
480 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
481 {
482 	struct scsi_cmnd *cmd = ent->cmd;
483 	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
484 
485 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
486 		ent->sense_ptr = ent->saved_sense_ptr;
487 		return;
488 	}
489 	spriv->cur_residue = ent->saved_cur_residue;
490 	spriv->prv_sg = ent->saved_prv_sg;
491 	spriv->cur_sg = ent->saved_cur_sg;
492 	spriv->tot_residue = ent->saved_tot_residue;
493 }
494 
495 static void esp_write_tgt_config3(struct esp *esp, int tgt)
496 {
497 	if (esp->rev > ESP100A) {
498 		u8 val = esp->target[tgt].esp_config3;
499 
500 		if (val != esp->prev_cfg3) {
501 			esp->prev_cfg3 = val;
502 			esp_write8(val, ESP_CFG3);
503 		}
504 	}
505 }
506 
507 static void esp_write_tgt_sync(struct esp *esp, int tgt)
508 {
509 	u8 off = esp->target[tgt].esp_offset;
510 	u8 per = esp->target[tgt].esp_period;
511 
512 	if (off != esp->prev_soff) {
513 		esp->prev_soff = off;
514 		esp_write8(off, ESP_SOFF);
515 	}
516 	if (per != esp->prev_stp) {
517 		esp->prev_stp = per;
518 		esp_write8(per, ESP_STP);
519 	}
520 }
521 
522 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
523 {
524 	if (esp->rev == FASHME) {
525 		/* Arbitrary segment boundaries, 24-bit counts.  */
526 		if (dma_len > (1U << 24))
527 			dma_len = (1U << 24);
528 	} else {
529 		u32 base, end;
530 
531 		/* ESP chip limits other variants by 16-bits of transfer
532 		 * count.  Actually on FAS100A and FAS236 we could get
533 		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
534 		 * in the ESP_CFG2 register but that causes other unwanted
535 		 * changes so we don't use it currently.
536 		 */
537 		if (dma_len > (1U << 16))
538 			dma_len = (1U << 16);
539 
540 		/* All of the DMA variants hooked up to these chips
541 		 * cannot handle crossing a 24-bit address boundary.
542 		 */
543 		base = dma_addr & ((1U << 24) - 1U);
544 		end = base + dma_len;
545 		if (end > (1U << 24))
546 			end = (1U <<24);
547 		dma_len = end - base;
548 	}
549 	return dma_len;
550 }
551 
552 static int esp_need_to_nego_wide(struct esp_target_data *tp)
553 {
554 	struct scsi_target *target = tp->starget;
555 
556 	return spi_width(target) != tp->nego_goal_width;
557 }
558 
559 static int esp_need_to_nego_sync(struct esp_target_data *tp)
560 {
561 	struct scsi_target *target = tp->starget;
562 
563 	/* When offset is zero, period is "don't care".  */
564 	if (!spi_offset(target) && !tp->nego_goal_offset)
565 		return 0;
566 
567 	if (spi_offset(target) == tp->nego_goal_offset &&
568 	    spi_period(target) == tp->nego_goal_period)
569 		return 0;
570 
571 	return 1;
572 }
573 
574 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
575 			     struct esp_lun_data *lp)
576 {
577 	if (!ent->orig_tag[0]) {
578 		/* Non-tagged, slot already taken?  */
579 		if (lp->non_tagged_cmd)
580 			return -EBUSY;
581 
582 		if (lp->hold) {
583 			/* We are being held by active tagged
584 			 * commands.
585 			 */
586 			if (lp->num_tagged)
587 				return -EBUSY;
588 
589 			/* Tagged commands completed, we can unplug
590 			 * the queue and run this untagged command.
591 			 */
592 			lp->hold = 0;
593 		} else if (lp->num_tagged) {
594 			/* Plug the queue until num_tagged decreases
595 			 * to zero in esp_free_lun_tag.
596 			 */
597 			lp->hold = 1;
598 			return -EBUSY;
599 		}
600 
601 		lp->non_tagged_cmd = ent;
602 		return 0;
603 	}
604 
605 	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
606 	if (lp->non_tagged_cmd || lp->hold)
607 		return -EBUSY;
608 
609 	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
610 
611 	lp->tagged_cmds[ent->orig_tag[1]] = ent;
612 	lp->num_tagged++;
613 
614 	return 0;
615 }
616 
617 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
618 			     struct esp_lun_data *lp)
619 {
620 	if (ent->orig_tag[0]) {
621 		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
622 		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
623 		lp->num_tagged--;
624 	} else {
625 		BUG_ON(lp->non_tagged_cmd != ent);
626 		lp->non_tagged_cmd = NULL;
627 	}
628 }
629 
630 static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
631 {
632 	ent->sense_ptr = ent->cmd->sense_buffer;
633 	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
634 		ent->sense_dma = (uintptr_t)ent->sense_ptr;
635 		return;
636 	}
637 
638 	ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
639 					SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
640 }
641 
642 static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
643 {
644 	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
645 		dma_unmap_single(esp->dev, ent->sense_dma,
646 				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
647 	ent->sense_ptr = NULL;
648 }
649 
650 /* When a contingent allegiance conditon is created, we force feed a
651  * REQUEST_SENSE command to the device to fetch the sense data.  I
652  * tried many other schemes, relying on the scsi error handling layer
653  * to send out the REQUEST_SENSE automatically, but this was difficult
654  * to get right especially in the presence of applications like smartd
655  * which use SG_IO to send out their own REQUEST_SENSE commands.
656  */
657 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
658 {
659 	struct scsi_cmnd *cmd = ent->cmd;
660 	struct scsi_device *dev = cmd->device;
661 	int tgt, lun;
662 	u8 *p, val;
663 
664 	tgt = dev->id;
665 	lun = dev->lun;
666 
667 
668 	if (!ent->sense_ptr) {
669 		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
670 				  tgt, lun);
671 		esp_map_sense(esp, ent);
672 	}
673 	ent->saved_sense_ptr = ent->sense_ptr;
674 
675 	esp->active_cmd = ent;
676 
677 	p = esp->command_block;
678 	esp->msg_out_len = 0;
679 
680 	*p++ = IDENTIFY(0, lun);
681 	*p++ = REQUEST_SENSE;
682 	*p++ = ((dev->scsi_level <= SCSI_2) ?
683 		(lun << 5) : 0);
684 	*p++ = 0;
685 	*p++ = 0;
686 	*p++ = SCSI_SENSE_BUFFERSIZE;
687 	*p++ = 0;
688 
689 	esp->select_state = ESP_SELECT_BASIC;
690 
691 	val = tgt;
692 	if (esp->rev == FASHME)
693 		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
694 	esp_write8(val, ESP_BUSID);
695 
696 	esp_write_tgt_sync(esp, tgt);
697 	esp_write_tgt_config3(esp, tgt);
698 
699 	val = (p - esp->command_block);
700 
701 	esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
702 }
703 
704 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
705 {
706 	struct esp_cmd_entry *ent;
707 
708 	list_for_each_entry(ent, &esp->queued_cmds, list) {
709 		struct scsi_cmnd *cmd = ent->cmd;
710 		struct scsi_device *dev = cmd->device;
711 		struct esp_lun_data *lp = dev->hostdata;
712 
713 		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
714 			ent->tag[0] = 0;
715 			ent->tag[1] = 0;
716 			return ent;
717 		}
718 
719 		if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
720 			ent->tag[0] = 0;
721 			ent->tag[1] = 0;
722 		}
723 		ent->orig_tag[0] = ent->tag[0];
724 		ent->orig_tag[1] = ent->tag[1];
725 
726 		if (esp_alloc_lun_tag(ent, lp) < 0)
727 			continue;
728 
729 		return ent;
730 	}
731 
732 	return NULL;
733 }
734 
735 static void esp_maybe_execute_command(struct esp *esp)
736 {
737 	struct esp_target_data *tp;
738 	struct scsi_device *dev;
739 	struct scsi_cmnd *cmd;
740 	struct esp_cmd_entry *ent;
741 	bool select_and_stop = false;
742 	int tgt, lun, i;
743 	u32 val, start_cmd;
744 	u8 *p;
745 
746 	if (esp->active_cmd ||
747 	    (esp->flags & ESP_FLAG_RESETTING))
748 		return;
749 
750 	ent = find_and_prep_issuable_command(esp);
751 	if (!ent)
752 		return;
753 
754 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
755 		esp_autosense(esp, ent);
756 		return;
757 	}
758 
759 	cmd = ent->cmd;
760 	dev = cmd->device;
761 	tgt = dev->id;
762 	lun = dev->lun;
763 	tp = &esp->target[tgt];
764 
765 	list_move(&ent->list, &esp->active_cmds);
766 
767 	esp->active_cmd = ent;
768 
769 	esp_map_dma(esp, cmd);
770 	esp_save_pointers(esp, ent);
771 
772 	if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
773 		select_and_stop = true;
774 
775 	p = esp->command_block;
776 
777 	esp->msg_out_len = 0;
778 	if (tp->flags & ESP_TGT_CHECK_NEGO) {
779 		/* Need to negotiate.  If the target is broken
780 		 * go for synchronous transfers and non-wide.
781 		 */
782 		if (tp->flags & ESP_TGT_BROKEN) {
783 			tp->flags &= ~ESP_TGT_DISCONNECT;
784 			tp->nego_goal_period = 0;
785 			tp->nego_goal_offset = 0;
786 			tp->nego_goal_width = 0;
787 			tp->nego_goal_tags = 0;
788 		}
789 
790 		/* If the settings are not changing, skip this.  */
791 		if (spi_width(tp->starget) == tp->nego_goal_width &&
792 		    spi_period(tp->starget) == tp->nego_goal_period &&
793 		    spi_offset(tp->starget) == tp->nego_goal_offset) {
794 			tp->flags &= ~ESP_TGT_CHECK_NEGO;
795 			goto build_identify;
796 		}
797 
798 		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
799 			esp->msg_out_len =
800 				spi_populate_width_msg(&esp->msg_out[0],
801 						       (tp->nego_goal_width ?
802 							1 : 0));
803 			tp->flags |= ESP_TGT_NEGO_WIDE;
804 		} else if (esp_need_to_nego_sync(tp)) {
805 			esp->msg_out_len =
806 				spi_populate_sync_msg(&esp->msg_out[0],
807 						      tp->nego_goal_period,
808 						      tp->nego_goal_offset);
809 			tp->flags |= ESP_TGT_NEGO_SYNC;
810 		} else {
811 			tp->flags &= ~ESP_TGT_CHECK_NEGO;
812 		}
813 
814 		/* If there are multiple message bytes, use Select and Stop */
815 		if (esp->msg_out_len)
816 			select_and_stop = true;
817 	}
818 
819 build_identify:
820 	*p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
821 
822 	if (ent->tag[0] && esp->rev == ESP100) {
823 		/* ESP100 lacks select w/atn3 command, use select
824 		 * and stop instead.
825 		 */
826 		select_and_stop = true;
827 	}
828 
829 	if (select_and_stop) {
830 		esp->cmd_bytes_left = cmd->cmd_len;
831 		esp->cmd_bytes_ptr = &cmd->cmnd[0];
832 
833 		if (ent->tag[0]) {
834 			for (i = esp->msg_out_len - 1;
835 			     i >= 0; i--)
836 				esp->msg_out[i + 2] = esp->msg_out[i];
837 			esp->msg_out[0] = ent->tag[0];
838 			esp->msg_out[1] = ent->tag[1];
839 			esp->msg_out_len += 2;
840 		}
841 
842 		start_cmd = ESP_CMD_SELAS;
843 		esp->select_state = ESP_SELECT_MSGOUT;
844 	} else {
845 		start_cmd = ESP_CMD_SELA;
846 		if (ent->tag[0]) {
847 			*p++ = ent->tag[0];
848 			*p++ = ent->tag[1];
849 
850 			start_cmd = ESP_CMD_SA3;
851 		}
852 
853 		for (i = 0; i < cmd->cmd_len; i++)
854 			*p++ = cmd->cmnd[i];
855 
856 		esp->select_state = ESP_SELECT_BASIC;
857 	}
858 	val = tgt;
859 	if (esp->rev == FASHME)
860 		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
861 	esp_write8(val, ESP_BUSID);
862 
863 	esp_write_tgt_sync(esp, tgt);
864 	esp_write_tgt_config3(esp, tgt);
865 
866 	val = (p - esp->command_block);
867 
868 	if (esp_debug & ESP_DEBUG_SCSICMD) {
869 		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
870 		for (i = 0; i < cmd->cmd_len; i++)
871 			printk("%02x ", cmd->cmnd[i]);
872 		printk("]\n");
873 	}
874 
875 	esp_send_dma_cmd(esp, val, 16, start_cmd);
876 }
877 
878 static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
879 {
880 	struct list_head *head = &esp->esp_cmd_pool;
881 	struct esp_cmd_entry *ret;
882 
883 	if (list_empty(head)) {
884 		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
885 	} else {
886 		ret = list_entry(head->next, struct esp_cmd_entry, list);
887 		list_del(&ret->list);
888 		memset(ret, 0, sizeof(*ret));
889 	}
890 	return ret;
891 }
892 
893 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
894 {
895 	list_add(&ent->list, &esp->esp_cmd_pool);
896 }
897 
898 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
899 			    struct scsi_cmnd *cmd, unsigned char host_byte)
900 {
901 	struct scsi_device *dev = cmd->device;
902 	int tgt = dev->id;
903 	int lun = dev->lun;
904 
905 	esp->active_cmd = NULL;
906 	esp_unmap_dma(esp, cmd);
907 	esp_free_lun_tag(ent, dev->hostdata);
908 	cmd->result = 0;
909 	set_host_byte(cmd, host_byte);
910 	if (host_byte == DID_OK)
911 		set_status_byte(cmd, ent->status);
912 
913 	if (ent->eh_done) {
914 		complete(ent->eh_done);
915 		ent->eh_done = NULL;
916 	}
917 
918 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
919 		esp_unmap_sense(esp, ent);
920 
921 		/* Restore the message/status bytes to what we actually
922 		 * saw originally.  Also, report that we are providing
923 		 * the sense data.
924 		 */
925 		cmd->result = ((DRIVER_SENSE << 24) |
926 			       (DID_OK << 16) |
927 			       (SAM_STAT_CHECK_CONDITION << 0));
928 
929 		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
930 		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
931 			int i;
932 
933 			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
934 			       esp->host->unique_id, tgt, lun);
935 			for (i = 0; i < 18; i++)
936 				printk("%02x ", cmd->sense_buffer[i]);
937 			printk("]\n");
938 		}
939 	}
940 
941 	cmd->scsi_done(cmd);
942 
943 	list_del(&ent->list);
944 	esp_put_ent(esp, ent);
945 
946 	esp_maybe_execute_command(esp);
947 }
948 
949 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
950 {
951 	struct scsi_device *dev = ent->cmd->device;
952 	struct esp_lun_data *lp = dev->hostdata;
953 
954 	scsi_track_queue_full(dev, lp->num_tagged - 1);
955 }
956 
957 static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
958 {
959 	struct scsi_device *dev = cmd->device;
960 	struct esp *esp = shost_priv(dev->host);
961 	struct esp_cmd_priv *spriv;
962 	struct esp_cmd_entry *ent;
963 
964 	ent = esp_get_ent(esp);
965 	if (!ent)
966 		return SCSI_MLQUEUE_HOST_BUSY;
967 
968 	ent->cmd = cmd;
969 
970 	cmd->scsi_done = done;
971 
972 	spriv = ESP_CMD_PRIV(cmd);
973 	spriv->num_sg = 0;
974 
975 	list_add_tail(&ent->list, &esp->queued_cmds);
976 
977 	esp_maybe_execute_command(esp);
978 
979 	return 0;
980 }
981 
982 static DEF_SCSI_QCMD(esp_queuecommand)
983 
984 static int esp_check_gross_error(struct esp *esp)
985 {
986 	if (esp->sreg & ESP_STAT_SPAM) {
987 		/* Gross Error, could be one of:
988 		 * - top of fifo overwritten
989 		 * - top of command register overwritten
990 		 * - DMA programmed with wrong direction
991 		 * - improper phase change
992 		 */
993 		shost_printk(KERN_ERR, esp->host,
994 			     "Gross error sreg[%02x]\n", esp->sreg);
995 		/* XXX Reset the chip. XXX */
996 		return 1;
997 	}
998 	return 0;
999 }
1000 
1001 static int esp_check_spur_intr(struct esp *esp)
1002 {
1003 	switch (esp->rev) {
1004 	case ESP100:
1005 	case ESP100A:
1006 		/* The interrupt pending bit of the status register cannot
1007 		 * be trusted on these revisions.
1008 		 */
1009 		esp->sreg &= ~ESP_STAT_INTR;
1010 		break;
1011 
1012 	default:
1013 		if (!(esp->sreg & ESP_STAT_INTR)) {
1014 			if (esp->ireg & ESP_INTR_SR)
1015 				return 1;
1016 
1017 			/* If the DMA is indicating interrupt pending and the
1018 			 * ESP is not, the only possibility is a DMA error.
1019 			 */
1020 			if (!esp->ops->dma_error(esp)) {
1021 				shost_printk(KERN_ERR, esp->host,
1022 					     "Spurious irq, sreg=%02x.\n",
1023 					     esp->sreg);
1024 				return -1;
1025 			}
1026 
1027 			shost_printk(KERN_ERR, esp->host, "DMA error\n");
1028 
1029 			/* XXX Reset the chip. XXX */
1030 			return -1;
1031 		}
1032 		break;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static void esp_schedule_reset(struct esp *esp)
1039 {
1040 	esp_log_reset("esp_schedule_reset() from %ps\n",
1041 		      __builtin_return_address(0));
1042 	esp->flags |= ESP_FLAG_RESETTING;
1043 	esp_event(esp, ESP_EVENT_RESET);
1044 }
1045 
1046 /* In order to avoid having to add a special half-reconnected state
1047  * into the driver we just sit here and poll through the rest of
1048  * the reselection process to get the tag message bytes.
1049  */
1050 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1051 						    struct esp_lun_data *lp)
1052 {
1053 	struct esp_cmd_entry *ent;
1054 	int i;
1055 
1056 	if (!lp->num_tagged) {
1057 		shost_printk(KERN_ERR, esp->host,
1058 			     "Reconnect w/num_tagged==0\n");
1059 		return NULL;
1060 	}
1061 
1062 	esp_log_reconnect("reconnect tag, ");
1063 
1064 	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1065 		if (esp->ops->irq_pending(esp))
1066 			break;
1067 	}
1068 	if (i == ESP_QUICKIRQ_LIMIT) {
1069 		shost_printk(KERN_ERR, esp->host,
1070 			     "Reconnect IRQ1 timeout\n");
1071 		return NULL;
1072 	}
1073 
1074 	esp->sreg = esp_read8(ESP_STATUS);
1075 	esp->ireg = esp_read8(ESP_INTRPT);
1076 
1077 	esp_log_reconnect("IRQ(%d:%x:%x), ",
1078 			  i, esp->ireg, esp->sreg);
1079 
1080 	if (esp->ireg & ESP_INTR_DC) {
1081 		shost_printk(KERN_ERR, esp->host,
1082 			     "Reconnect, got disconnect.\n");
1083 		return NULL;
1084 	}
1085 
1086 	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1087 		shost_printk(KERN_ERR, esp->host,
1088 			     "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1089 		return NULL;
1090 	}
1091 
1092 	/* DMA in the tag bytes... */
1093 	esp->command_block[0] = 0xff;
1094 	esp->command_block[1] = 0xff;
1095 	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1096 			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1097 
1098 	/* ACK the message.  */
1099 	scsi_esp_cmd(esp, ESP_CMD_MOK);
1100 
1101 	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1102 		if (esp->ops->irq_pending(esp)) {
1103 			esp->sreg = esp_read8(ESP_STATUS);
1104 			esp->ireg = esp_read8(ESP_INTRPT);
1105 			if (esp->ireg & ESP_INTR_FDONE)
1106 				break;
1107 		}
1108 		udelay(1);
1109 	}
1110 	if (i == ESP_RESELECT_TAG_LIMIT) {
1111 		shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1112 		return NULL;
1113 	}
1114 	esp->ops->dma_drain(esp);
1115 	esp->ops->dma_invalidate(esp);
1116 
1117 	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1118 			  i, esp->ireg, esp->sreg,
1119 			  esp->command_block[0],
1120 			  esp->command_block[1]);
1121 
1122 	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1123 	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1124 		shost_printk(KERN_ERR, esp->host,
1125 			     "Reconnect, bad tag type %02x.\n",
1126 			     esp->command_block[0]);
1127 		return NULL;
1128 	}
1129 
1130 	ent = lp->tagged_cmds[esp->command_block[1]];
1131 	if (!ent) {
1132 		shost_printk(KERN_ERR, esp->host,
1133 			     "Reconnect, no entry for tag %02x.\n",
1134 			     esp->command_block[1]);
1135 		return NULL;
1136 	}
1137 
1138 	return ent;
1139 }
1140 
1141 static int esp_reconnect(struct esp *esp)
1142 {
1143 	struct esp_cmd_entry *ent;
1144 	struct esp_target_data *tp;
1145 	struct esp_lun_data *lp;
1146 	struct scsi_device *dev;
1147 	int target, lun;
1148 
1149 	BUG_ON(esp->active_cmd);
1150 	if (esp->rev == FASHME) {
1151 		/* FASHME puts the target and lun numbers directly
1152 		 * into the fifo.
1153 		 */
1154 		target = esp->fifo[0];
1155 		lun = esp->fifo[1] & 0x7;
1156 	} else {
1157 		u8 bits = esp_read8(ESP_FDATA);
1158 
1159 		/* Older chips put the lun directly into the fifo, but
1160 		 * the target is given as a sample of the arbitration
1161 		 * lines on the bus at reselection time.  So we should
1162 		 * see the ID of the ESP and the one reconnecting target
1163 		 * set in the bitmap.
1164 		 */
1165 		if (!(bits & esp->scsi_id_mask))
1166 			goto do_reset;
1167 		bits &= ~esp->scsi_id_mask;
1168 		if (!bits || (bits & (bits - 1)))
1169 			goto do_reset;
1170 
1171 		target = ffs(bits) - 1;
1172 		lun = (esp_read8(ESP_FDATA) & 0x7);
1173 
1174 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1175 		if (esp->rev == ESP100) {
1176 			u8 ireg = esp_read8(ESP_INTRPT);
1177 			/* This chip has a bug during reselection that can
1178 			 * cause a spurious illegal-command interrupt, which
1179 			 * we simply ACK here.  Another possibility is a bus
1180 			 * reset so we must check for that.
1181 			 */
1182 			if (ireg & ESP_INTR_SR)
1183 				goto do_reset;
1184 		}
1185 		scsi_esp_cmd(esp, ESP_CMD_NULL);
1186 	}
1187 
1188 	esp_write_tgt_sync(esp, target);
1189 	esp_write_tgt_config3(esp, target);
1190 
1191 	scsi_esp_cmd(esp, ESP_CMD_MOK);
1192 
1193 	if (esp->rev == FASHME)
1194 		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1195 			   ESP_BUSID);
1196 
1197 	tp = &esp->target[target];
1198 	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1199 	if (!dev) {
1200 		shost_printk(KERN_ERR, esp->host,
1201 			     "Reconnect, no lp tgt[%u] lun[%u]\n",
1202 			     target, lun);
1203 		goto do_reset;
1204 	}
1205 	lp = dev->hostdata;
1206 
1207 	ent = lp->non_tagged_cmd;
1208 	if (!ent) {
1209 		ent = esp_reconnect_with_tag(esp, lp);
1210 		if (!ent)
1211 			goto do_reset;
1212 	}
1213 
1214 	esp->active_cmd = ent;
1215 
1216 	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1217 	esp_restore_pointers(esp, ent);
1218 	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1219 	return 1;
1220 
1221 do_reset:
1222 	esp_schedule_reset(esp);
1223 	return 0;
1224 }
1225 
1226 static int esp_finish_select(struct esp *esp)
1227 {
1228 	struct esp_cmd_entry *ent;
1229 	struct scsi_cmnd *cmd;
1230 
1231 	/* No longer selecting.  */
1232 	esp->select_state = ESP_SELECT_NONE;
1233 
1234 	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1235 	ent = esp->active_cmd;
1236 	cmd = ent->cmd;
1237 
1238 	if (esp->ops->dma_error(esp)) {
1239 		/* If we see a DMA error during or as a result of selection,
1240 		 * all bets are off.
1241 		 */
1242 		esp_schedule_reset(esp);
1243 		esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
1244 		return 0;
1245 	}
1246 
1247 	esp->ops->dma_invalidate(esp);
1248 
1249 	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1250 		struct esp_target_data *tp = &esp->target[cmd->device->id];
1251 
1252 		/* Carefully back out of the selection attempt.  Release
1253 		 * resources (such as DMA mapping & TAG) and reset state (such
1254 		 * as message out and command delivery variables).
1255 		 */
1256 		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1257 			esp_unmap_dma(esp, cmd);
1258 			esp_free_lun_tag(ent, cmd->device->hostdata);
1259 			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1260 			esp->cmd_bytes_ptr = NULL;
1261 			esp->cmd_bytes_left = 0;
1262 		} else {
1263 			esp_unmap_sense(esp, ent);
1264 		}
1265 
1266 		/* Now that the state is unwound properly, put back onto
1267 		 * the issue queue.  This command is no longer active.
1268 		 */
1269 		list_move(&ent->list, &esp->queued_cmds);
1270 		esp->active_cmd = NULL;
1271 
1272 		/* Return value ignored by caller, it directly invokes
1273 		 * esp_reconnect().
1274 		 */
1275 		return 0;
1276 	}
1277 
1278 	if (esp->ireg == ESP_INTR_DC) {
1279 		struct scsi_device *dev = cmd->device;
1280 
1281 		/* Disconnect.  Make sure we re-negotiate sync and
1282 		 * wide parameters if this target starts responding
1283 		 * again in the future.
1284 		 */
1285 		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1286 
1287 		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1288 		esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
1289 		return 1;
1290 	}
1291 
1292 	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1293 		/* Selection successful.  On pre-FAST chips we have
1294 		 * to do a NOP and possibly clean out the FIFO.
1295 		 */
1296 		if (esp->rev <= ESP236) {
1297 			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1298 
1299 			scsi_esp_cmd(esp, ESP_CMD_NULL);
1300 
1301 			if (!fcnt &&
1302 			    (!esp->prev_soff ||
1303 			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1304 				esp_flush_fifo(esp);
1305 		}
1306 
1307 		/* If we are doing a Select And Stop command, negotiation, etc.
1308 		 * we'll do the right thing as we transition to the next phase.
1309 		 */
1310 		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1311 		return 0;
1312 	}
1313 
1314 	shost_printk(KERN_INFO, esp->host,
1315 		     "Unexpected selection completion ireg[%x]\n", esp->ireg);
1316 	esp_schedule_reset(esp);
1317 	return 0;
1318 }
1319 
1320 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1321 			       struct scsi_cmnd *cmd)
1322 {
1323 	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1324 
1325 	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1326 	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1327 		fifo_cnt <<= 1;
1328 
1329 	ecount = 0;
1330 	if (!(esp->sreg & ESP_STAT_TCNT)) {
1331 		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1332 			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1333 		if (esp->rev == FASHME)
1334 			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1335 		if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1336 			ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1337 	}
1338 
1339 	bytes_sent = esp->data_dma_len;
1340 	bytes_sent -= ecount;
1341 	bytes_sent -= esp->send_cmd_residual;
1342 
1343 	/*
1344 	 * The am53c974 has a DMA 'pecularity'. The doc states:
1345 	 * In some odd byte conditions, one residual byte will
1346 	 * be left in the SCSI FIFO, and the FIFO Flags will
1347 	 * never count to '0 '. When this happens, the residual
1348 	 * byte should be retrieved via PIO following completion
1349 	 * of the BLAST operation.
1350 	 */
1351 	if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1352 		size_t count = 1;
1353 		size_t offset = bytes_sent;
1354 		u8 bval = esp_read8(ESP_FDATA);
1355 
1356 		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1357 			ent->sense_ptr[bytes_sent] = bval;
1358 		else {
1359 			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1360 			u8 *ptr;
1361 
1362 			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1363 						  &offset, &count);
1364 			if (likely(ptr)) {
1365 				*(ptr + offset) = bval;
1366 				scsi_kunmap_atomic_sg(ptr);
1367 			}
1368 		}
1369 		bytes_sent += fifo_cnt;
1370 		ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1371 	}
1372 	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1373 		bytes_sent -= fifo_cnt;
1374 
1375 	flush_fifo = 0;
1376 	if (!esp->prev_soff) {
1377 		/* Synchronous data transfer, always flush fifo. */
1378 		flush_fifo = 1;
1379 	} else {
1380 		if (esp->rev == ESP100) {
1381 			u32 fflags, phase;
1382 
1383 			/* ESP100 has a chip bug where in the synchronous data
1384 			 * phase it can mistake a final long REQ pulse from the
1385 			 * target as an extra data byte.  Fun.
1386 			 *
1387 			 * To detect this case we resample the status register
1388 			 * and fifo flags.  If we're still in a data phase and
1389 			 * we see spurious chunks in the fifo, we return error
1390 			 * to the caller which should reset and set things up
1391 			 * such that we only try future transfers to this
1392 			 * target in synchronous mode.
1393 			 */
1394 			esp->sreg = esp_read8(ESP_STATUS);
1395 			phase = esp->sreg & ESP_STAT_PMASK;
1396 			fflags = esp_read8(ESP_FFLAGS);
1397 
1398 			if ((phase == ESP_DOP &&
1399 			     (fflags & ESP_FF_ONOTZERO)) ||
1400 			    (phase == ESP_DIP &&
1401 			     (fflags & ESP_FF_FBYTES)))
1402 				return -1;
1403 		}
1404 		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1405 			flush_fifo = 1;
1406 	}
1407 
1408 	if (flush_fifo)
1409 		esp_flush_fifo(esp);
1410 
1411 	return bytes_sent;
1412 }
1413 
1414 static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1415 			u8 scsi_period, u8 scsi_offset,
1416 			u8 esp_stp, u8 esp_soff)
1417 {
1418 	spi_period(tp->starget) = scsi_period;
1419 	spi_offset(tp->starget) = scsi_offset;
1420 	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1421 
1422 	if (esp_soff) {
1423 		esp_stp &= 0x1f;
1424 		esp_soff |= esp->radelay;
1425 		if (esp->rev >= FAS236) {
1426 			u8 bit = ESP_CONFIG3_FSCSI;
1427 			if (esp->rev >= FAS100A)
1428 				bit = ESP_CONFIG3_FAST;
1429 
1430 			if (scsi_period < 50) {
1431 				if (esp->rev == FASHME)
1432 					esp_soff &= ~esp->radelay;
1433 				tp->esp_config3 |= bit;
1434 			} else {
1435 				tp->esp_config3 &= ~bit;
1436 			}
1437 			esp->prev_cfg3 = tp->esp_config3;
1438 			esp_write8(esp->prev_cfg3, ESP_CFG3);
1439 		}
1440 	}
1441 
1442 	tp->esp_period = esp->prev_stp = esp_stp;
1443 	tp->esp_offset = esp->prev_soff = esp_soff;
1444 
1445 	esp_write8(esp_soff, ESP_SOFF);
1446 	esp_write8(esp_stp, ESP_STP);
1447 
1448 	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1449 
1450 	spi_display_xfer_agreement(tp->starget);
1451 }
1452 
1453 static void esp_msgin_reject(struct esp *esp)
1454 {
1455 	struct esp_cmd_entry *ent = esp->active_cmd;
1456 	struct scsi_cmnd *cmd = ent->cmd;
1457 	struct esp_target_data *tp;
1458 	int tgt;
1459 
1460 	tgt = cmd->device->id;
1461 	tp = &esp->target[tgt];
1462 
1463 	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1464 		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1465 
1466 		if (!esp_need_to_nego_sync(tp)) {
1467 			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1468 			scsi_esp_cmd(esp, ESP_CMD_RATN);
1469 		} else {
1470 			esp->msg_out_len =
1471 				spi_populate_sync_msg(&esp->msg_out[0],
1472 						      tp->nego_goal_period,
1473 						      tp->nego_goal_offset);
1474 			tp->flags |= ESP_TGT_NEGO_SYNC;
1475 			scsi_esp_cmd(esp, ESP_CMD_SATN);
1476 		}
1477 		return;
1478 	}
1479 
1480 	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1481 		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1482 		tp->esp_period = 0;
1483 		tp->esp_offset = 0;
1484 		esp_setsync(esp, tp, 0, 0, 0, 0);
1485 		scsi_esp_cmd(esp, ESP_CMD_RATN);
1486 		return;
1487 	}
1488 
1489 	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1490 	esp_schedule_reset(esp);
1491 }
1492 
1493 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1494 {
1495 	u8 period = esp->msg_in[3];
1496 	u8 offset = esp->msg_in[4];
1497 	u8 stp;
1498 
1499 	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1500 		goto do_reject;
1501 
1502 	if (offset > 15)
1503 		goto do_reject;
1504 
1505 	if (offset) {
1506 		int one_clock;
1507 
1508 		if (period > esp->max_period) {
1509 			period = offset = 0;
1510 			goto do_sdtr;
1511 		}
1512 		if (period < esp->min_period)
1513 			goto do_reject;
1514 
1515 		one_clock = esp->ccycle / 1000;
1516 		stp = DIV_ROUND_UP(period << 2, one_clock);
1517 		if (stp && esp->rev >= FAS236) {
1518 			if (stp >= 50)
1519 				stp--;
1520 		}
1521 	} else {
1522 		stp = 0;
1523 	}
1524 
1525 	esp_setsync(esp, tp, period, offset, stp, offset);
1526 	return;
1527 
1528 do_reject:
1529 	esp->msg_out[0] = MESSAGE_REJECT;
1530 	esp->msg_out_len = 1;
1531 	scsi_esp_cmd(esp, ESP_CMD_SATN);
1532 	return;
1533 
1534 do_sdtr:
1535 	tp->nego_goal_period = period;
1536 	tp->nego_goal_offset = offset;
1537 	esp->msg_out_len =
1538 		spi_populate_sync_msg(&esp->msg_out[0],
1539 				      tp->nego_goal_period,
1540 				      tp->nego_goal_offset);
1541 	scsi_esp_cmd(esp, ESP_CMD_SATN);
1542 }
1543 
1544 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1545 {
1546 	int size = 8 << esp->msg_in[3];
1547 	u8 cfg3;
1548 
1549 	if (esp->rev != FASHME)
1550 		goto do_reject;
1551 
1552 	if (size != 8 && size != 16)
1553 		goto do_reject;
1554 
1555 	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1556 		goto do_reject;
1557 
1558 	cfg3 = tp->esp_config3;
1559 	if (size == 16) {
1560 		tp->flags |= ESP_TGT_WIDE;
1561 		cfg3 |= ESP_CONFIG3_EWIDE;
1562 	} else {
1563 		tp->flags &= ~ESP_TGT_WIDE;
1564 		cfg3 &= ~ESP_CONFIG3_EWIDE;
1565 	}
1566 	tp->esp_config3 = cfg3;
1567 	esp->prev_cfg3 = cfg3;
1568 	esp_write8(cfg3, ESP_CFG3);
1569 
1570 	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1571 
1572 	spi_period(tp->starget) = 0;
1573 	spi_offset(tp->starget) = 0;
1574 	if (!esp_need_to_nego_sync(tp)) {
1575 		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1576 		scsi_esp_cmd(esp, ESP_CMD_RATN);
1577 	} else {
1578 		esp->msg_out_len =
1579 			spi_populate_sync_msg(&esp->msg_out[0],
1580 					      tp->nego_goal_period,
1581 					      tp->nego_goal_offset);
1582 		tp->flags |= ESP_TGT_NEGO_SYNC;
1583 		scsi_esp_cmd(esp, ESP_CMD_SATN);
1584 	}
1585 	return;
1586 
1587 do_reject:
1588 	esp->msg_out[0] = MESSAGE_REJECT;
1589 	esp->msg_out_len = 1;
1590 	scsi_esp_cmd(esp, ESP_CMD_SATN);
1591 }
1592 
1593 static void esp_msgin_extended(struct esp *esp)
1594 {
1595 	struct esp_cmd_entry *ent = esp->active_cmd;
1596 	struct scsi_cmnd *cmd = ent->cmd;
1597 	struct esp_target_data *tp;
1598 	int tgt = cmd->device->id;
1599 
1600 	tp = &esp->target[tgt];
1601 	if (esp->msg_in[2] == EXTENDED_SDTR) {
1602 		esp_msgin_sdtr(esp, tp);
1603 		return;
1604 	}
1605 	if (esp->msg_in[2] == EXTENDED_WDTR) {
1606 		esp_msgin_wdtr(esp, tp);
1607 		return;
1608 	}
1609 
1610 	shost_printk(KERN_INFO, esp->host,
1611 		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
1612 
1613 	esp->msg_out[0] = MESSAGE_REJECT;
1614 	esp->msg_out_len = 1;
1615 	scsi_esp_cmd(esp, ESP_CMD_SATN);
1616 }
1617 
1618 /* Analyze msgin bytes received from target so far.  Return non-zero
1619  * if there are more bytes needed to complete the message.
1620  */
1621 static int esp_msgin_process(struct esp *esp)
1622 {
1623 	u8 msg0 = esp->msg_in[0];
1624 	int len = esp->msg_in_len;
1625 
1626 	if (msg0 & 0x80) {
1627 		/* Identify */
1628 		shost_printk(KERN_INFO, esp->host,
1629 			     "Unexpected msgin identify\n");
1630 		return 0;
1631 	}
1632 
1633 	switch (msg0) {
1634 	case EXTENDED_MESSAGE:
1635 		if (len == 1)
1636 			return 1;
1637 		if (len < esp->msg_in[1] + 2)
1638 			return 1;
1639 		esp_msgin_extended(esp);
1640 		return 0;
1641 
1642 	case IGNORE_WIDE_RESIDUE: {
1643 		struct esp_cmd_entry *ent;
1644 		struct esp_cmd_priv *spriv;
1645 		if (len == 1)
1646 			return 1;
1647 
1648 		if (esp->msg_in[1] != 1)
1649 			goto do_reject;
1650 
1651 		ent = esp->active_cmd;
1652 		spriv = ESP_CMD_PRIV(ent->cmd);
1653 
1654 		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1655 			spriv->cur_sg = spriv->prv_sg;
1656 			spriv->cur_residue = 1;
1657 		} else
1658 			spriv->cur_residue++;
1659 		spriv->tot_residue++;
1660 		return 0;
1661 	}
1662 	case NOP:
1663 		return 0;
1664 	case RESTORE_POINTERS:
1665 		esp_restore_pointers(esp, esp->active_cmd);
1666 		return 0;
1667 	case SAVE_POINTERS:
1668 		esp_save_pointers(esp, esp->active_cmd);
1669 		return 0;
1670 
1671 	case COMMAND_COMPLETE:
1672 	case DISCONNECT: {
1673 		struct esp_cmd_entry *ent = esp->active_cmd;
1674 
1675 		ent->message = msg0;
1676 		esp_event(esp, ESP_EVENT_FREE_BUS);
1677 		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1678 		return 0;
1679 	}
1680 	case MESSAGE_REJECT:
1681 		esp_msgin_reject(esp);
1682 		return 0;
1683 
1684 	default:
1685 	do_reject:
1686 		esp->msg_out[0] = MESSAGE_REJECT;
1687 		esp->msg_out_len = 1;
1688 		scsi_esp_cmd(esp, ESP_CMD_SATN);
1689 		return 0;
1690 	}
1691 }
1692 
1693 static int esp_process_event(struct esp *esp)
1694 {
1695 	int write, i;
1696 
1697 again:
1698 	write = 0;
1699 	esp_log_event("process event %d phase %x\n",
1700 		      esp->event, esp->sreg & ESP_STAT_PMASK);
1701 	switch (esp->event) {
1702 	case ESP_EVENT_CHECK_PHASE:
1703 		switch (esp->sreg & ESP_STAT_PMASK) {
1704 		case ESP_DOP:
1705 			esp_event(esp, ESP_EVENT_DATA_OUT);
1706 			break;
1707 		case ESP_DIP:
1708 			esp_event(esp, ESP_EVENT_DATA_IN);
1709 			break;
1710 		case ESP_STATP:
1711 			esp_flush_fifo(esp);
1712 			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1713 			esp_event(esp, ESP_EVENT_STATUS);
1714 			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1715 			return 1;
1716 
1717 		case ESP_MOP:
1718 			esp_event(esp, ESP_EVENT_MSGOUT);
1719 			break;
1720 
1721 		case ESP_MIP:
1722 			esp_event(esp, ESP_EVENT_MSGIN);
1723 			break;
1724 
1725 		case ESP_CMDP:
1726 			esp_event(esp, ESP_EVENT_CMD_START);
1727 			break;
1728 
1729 		default:
1730 			shost_printk(KERN_INFO, esp->host,
1731 				     "Unexpected phase, sreg=%02x\n",
1732 				     esp->sreg);
1733 			esp_schedule_reset(esp);
1734 			return 0;
1735 		}
1736 		goto again;
1737 
1738 	case ESP_EVENT_DATA_IN:
1739 		write = 1;
1740 		fallthrough;
1741 
1742 	case ESP_EVENT_DATA_OUT: {
1743 		struct esp_cmd_entry *ent = esp->active_cmd;
1744 		struct scsi_cmnd *cmd = ent->cmd;
1745 		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1746 		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1747 
1748 		if (esp->rev == ESP100)
1749 			scsi_esp_cmd(esp, ESP_CMD_NULL);
1750 
1751 		if (write)
1752 			ent->flags |= ESP_CMD_FLAG_WRITE;
1753 		else
1754 			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1755 
1756 		if (esp->ops->dma_length_limit)
1757 			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1758 							     dma_len);
1759 		else
1760 			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1761 
1762 		esp->data_dma_len = dma_len;
1763 
1764 		if (!dma_len) {
1765 			shost_printk(KERN_ERR, esp->host,
1766 				     "DMA length is zero!\n");
1767 			shost_printk(KERN_ERR, esp->host,
1768 				     "cur adr[%08llx] len[%08x]\n",
1769 				     (unsigned long long)esp_cur_dma_addr(ent, cmd),
1770 				     esp_cur_dma_len(ent, cmd));
1771 			esp_schedule_reset(esp);
1772 			return 0;
1773 		}
1774 
1775 		esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1776 				  (unsigned long long)dma_addr, dma_len, write);
1777 
1778 		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1779 				       write, ESP_CMD_DMA | ESP_CMD_TI);
1780 		esp_event(esp, ESP_EVENT_DATA_DONE);
1781 		break;
1782 	}
1783 	case ESP_EVENT_DATA_DONE: {
1784 		struct esp_cmd_entry *ent = esp->active_cmd;
1785 		struct scsi_cmnd *cmd = ent->cmd;
1786 		int bytes_sent;
1787 
1788 		if (esp->ops->dma_error(esp)) {
1789 			shost_printk(KERN_INFO, esp->host,
1790 				     "data done, DMA error, resetting\n");
1791 			esp_schedule_reset(esp);
1792 			return 0;
1793 		}
1794 
1795 		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1796 			/* XXX parity errors, etc. XXX */
1797 
1798 			esp->ops->dma_drain(esp);
1799 		}
1800 		esp->ops->dma_invalidate(esp);
1801 
1802 		if (esp->ireg != ESP_INTR_BSERV) {
1803 			/* We should always see exactly a bus-service
1804 			 * interrupt at the end of a successful transfer.
1805 			 */
1806 			shost_printk(KERN_INFO, esp->host,
1807 				     "data done, not BSERV, resetting\n");
1808 			esp_schedule_reset(esp);
1809 			return 0;
1810 		}
1811 
1812 		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1813 
1814 		esp_log_datadone("data done flgs[%x] sent[%d]\n",
1815 				 ent->flags, bytes_sent);
1816 
1817 		if (bytes_sent < 0) {
1818 			/* XXX force sync mode for this target XXX */
1819 			esp_schedule_reset(esp);
1820 			return 0;
1821 		}
1822 
1823 		esp_advance_dma(esp, ent, cmd, bytes_sent);
1824 		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1825 		goto again;
1826 	}
1827 
1828 	case ESP_EVENT_STATUS: {
1829 		struct esp_cmd_entry *ent = esp->active_cmd;
1830 
1831 		if (esp->ireg & ESP_INTR_FDONE) {
1832 			ent->status = esp_read8(ESP_FDATA);
1833 			ent->message = esp_read8(ESP_FDATA);
1834 			scsi_esp_cmd(esp, ESP_CMD_MOK);
1835 		} else if (esp->ireg == ESP_INTR_BSERV) {
1836 			ent->status = esp_read8(ESP_FDATA);
1837 			ent->message = 0xff;
1838 			esp_event(esp, ESP_EVENT_MSGIN);
1839 			return 0;
1840 		}
1841 
1842 		if (ent->message != COMMAND_COMPLETE) {
1843 			shost_printk(KERN_INFO, esp->host,
1844 				     "Unexpected message %x in status\n",
1845 				     ent->message);
1846 			esp_schedule_reset(esp);
1847 			return 0;
1848 		}
1849 
1850 		esp_event(esp, ESP_EVENT_FREE_BUS);
1851 		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1852 		break;
1853 	}
1854 	case ESP_EVENT_FREE_BUS: {
1855 		struct esp_cmd_entry *ent = esp->active_cmd;
1856 		struct scsi_cmnd *cmd = ent->cmd;
1857 
1858 		if (ent->message == COMMAND_COMPLETE ||
1859 		    ent->message == DISCONNECT)
1860 			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1861 
1862 		if (ent->message == COMMAND_COMPLETE) {
1863 			esp_log_cmddone("Command done status[%x] message[%x]\n",
1864 					ent->status, ent->message);
1865 			if (ent->status == SAM_STAT_TASK_SET_FULL)
1866 				esp_event_queue_full(esp, ent);
1867 
1868 			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1869 			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1870 				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1871 				esp_autosense(esp, ent);
1872 			} else {
1873 				esp_cmd_is_done(esp, ent, cmd, DID_OK);
1874 			}
1875 		} else if (ent->message == DISCONNECT) {
1876 			esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1877 					   cmd->device->id,
1878 					   ent->tag[0], ent->tag[1]);
1879 
1880 			esp->active_cmd = NULL;
1881 			esp_maybe_execute_command(esp);
1882 		} else {
1883 			shost_printk(KERN_INFO, esp->host,
1884 				     "Unexpected message %x in freebus\n",
1885 				     ent->message);
1886 			esp_schedule_reset(esp);
1887 			return 0;
1888 		}
1889 		if (esp->active_cmd)
1890 			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1891 		break;
1892 	}
1893 	case ESP_EVENT_MSGOUT: {
1894 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1895 
1896 		if (esp_debug & ESP_DEBUG_MSGOUT) {
1897 			int i;
1898 			printk("ESP: Sending message [ ");
1899 			for (i = 0; i < esp->msg_out_len; i++)
1900 				printk("%02x ", esp->msg_out[i]);
1901 			printk("]\n");
1902 		}
1903 
1904 		if (esp->rev == FASHME) {
1905 			int i;
1906 
1907 			/* Always use the fifo.  */
1908 			for (i = 0; i < esp->msg_out_len; i++) {
1909 				esp_write8(esp->msg_out[i], ESP_FDATA);
1910 				esp_write8(0, ESP_FDATA);
1911 			}
1912 			scsi_esp_cmd(esp, ESP_CMD_TI);
1913 		} else {
1914 			if (esp->msg_out_len == 1) {
1915 				esp_write8(esp->msg_out[0], ESP_FDATA);
1916 				scsi_esp_cmd(esp, ESP_CMD_TI);
1917 			} else if (esp->flags & ESP_FLAG_USE_FIFO) {
1918 				for (i = 0; i < esp->msg_out_len; i++)
1919 					esp_write8(esp->msg_out[i], ESP_FDATA);
1920 				scsi_esp_cmd(esp, ESP_CMD_TI);
1921 			} else {
1922 				/* Use DMA. */
1923 				memcpy(esp->command_block,
1924 				       esp->msg_out,
1925 				       esp->msg_out_len);
1926 
1927 				esp->ops->send_dma_cmd(esp,
1928 						       esp->command_block_dma,
1929 						       esp->msg_out_len,
1930 						       esp->msg_out_len,
1931 						       0,
1932 						       ESP_CMD_DMA|ESP_CMD_TI);
1933 			}
1934 		}
1935 		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1936 		break;
1937 	}
1938 	case ESP_EVENT_MSGOUT_DONE:
1939 		if (esp->rev == FASHME) {
1940 			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1941 		} else {
1942 			if (esp->msg_out_len > 1)
1943 				esp->ops->dma_invalidate(esp);
1944 
1945 			/* XXX if the chip went into disconnected mode,
1946 			 * we can't run the phase state machine anyway.
1947 			 */
1948 			if (!(esp->ireg & ESP_INTR_DC))
1949 				scsi_esp_cmd(esp, ESP_CMD_NULL);
1950 		}
1951 
1952 		esp->msg_out_len = 0;
1953 
1954 		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1955 		goto again;
1956 	case ESP_EVENT_MSGIN:
1957 		if (esp->ireg & ESP_INTR_BSERV) {
1958 			if (esp->rev == FASHME) {
1959 				if (!(esp_read8(ESP_STATUS2) &
1960 				      ESP_STAT2_FEMPTY))
1961 					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1962 			} else {
1963 				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1964 				if (esp->rev == ESP100)
1965 					scsi_esp_cmd(esp, ESP_CMD_NULL);
1966 			}
1967 			scsi_esp_cmd(esp, ESP_CMD_TI);
1968 			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1969 			return 1;
1970 		}
1971 		if (esp->ireg & ESP_INTR_FDONE) {
1972 			u8 val;
1973 
1974 			if (esp->rev == FASHME)
1975 				val = esp->fifo[0];
1976 			else
1977 				val = esp_read8(ESP_FDATA);
1978 			esp->msg_in[esp->msg_in_len++] = val;
1979 
1980 			esp_log_msgin("Got msgin byte %x\n", val);
1981 
1982 			if (!esp_msgin_process(esp))
1983 				esp->msg_in_len = 0;
1984 
1985 			if (esp->rev == FASHME)
1986 				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1987 
1988 			scsi_esp_cmd(esp, ESP_CMD_MOK);
1989 
1990 			/* Check whether a bus reset is to be done next */
1991 			if (esp->event == ESP_EVENT_RESET)
1992 				return 0;
1993 
1994 			if (esp->event != ESP_EVENT_FREE_BUS)
1995 				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1996 		} else {
1997 			shost_printk(KERN_INFO, esp->host,
1998 				     "MSGIN neither BSERV not FDON, resetting");
1999 			esp_schedule_reset(esp);
2000 			return 0;
2001 		}
2002 		break;
2003 	case ESP_EVENT_CMD_START:
2004 		memcpy(esp->command_block, esp->cmd_bytes_ptr,
2005 		       esp->cmd_bytes_left);
2006 		esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2007 		esp_event(esp, ESP_EVENT_CMD_DONE);
2008 		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2009 		break;
2010 	case ESP_EVENT_CMD_DONE:
2011 		esp->ops->dma_invalidate(esp);
2012 		if (esp->ireg & ESP_INTR_BSERV) {
2013 			esp_event(esp, ESP_EVENT_CHECK_PHASE);
2014 			goto again;
2015 		}
2016 		esp_schedule_reset(esp);
2017 		return 0;
2018 
2019 	case ESP_EVENT_RESET:
2020 		scsi_esp_cmd(esp, ESP_CMD_RS);
2021 		break;
2022 
2023 	default:
2024 		shost_printk(KERN_INFO, esp->host,
2025 			     "Unexpected event %x, resetting\n", esp->event);
2026 		esp_schedule_reset(esp);
2027 		return 0;
2028 	}
2029 	return 1;
2030 }
2031 
2032 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2033 {
2034 	struct scsi_cmnd *cmd = ent->cmd;
2035 
2036 	esp_unmap_dma(esp, cmd);
2037 	esp_free_lun_tag(ent, cmd->device->hostdata);
2038 	cmd->result = DID_RESET << 16;
2039 
2040 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2041 		esp_unmap_sense(esp, ent);
2042 
2043 	cmd->scsi_done(cmd);
2044 	list_del(&ent->list);
2045 	esp_put_ent(esp, ent);
2046 }
2047 
2048 static void esp_clear_hold(struct scsi_device *dev, void *data)
2049 {
2050 	struct esp_lun_data *lp = dev->hostdata;
2051 
2052 	BUG_ON(lp->num_tagged);
2053 	lp->hold = 0;
2054 }
2055 
2056 static void esp_reset_cleanup(struct esp *esp)
2057 {
2058 	struct esp_cmd_entry *ent, *tmp;
2059 	int i;
2060 
2061 	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2062 		struct scsi_cmnd *cmd = ent->cmd;
2063 
2064 		list_del(&ent->list);
2065 		cmd->result = DID_RESET << 16;
2066 		cmd->scsi_done(cmd);
2067 		esp_put_ent(esp, ent);
2068 	}
2069 
2070 	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2071 		if (ent == esp->active_cmd)
2072 			esp->active_cmd = NULL;
2073 		esp_reset_cleanup_one(esp, ent);
2074 	}
2075 
2076 	BUG_ON(esp->active_cmd != NULL);
2077 
2078 	/* Force renegotiation of sync/wide transfers.  */
2079 	for (i = 0; i < ESP_MAX_TARGET; i++) {
2080 		struct esp_target_data *tp = &esp->target[i];
2081 
2082 		tp->esp_period = 0;
2083 		tp->esp_offset = 0;
2084 		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2085 				     ESP_CONFIG3_FSCSI |
2086 				     ESP_CONFIG3_FAST);
2087 		tp->flags &= ~ESP_TGT_WIDE;
2088 		tp->flags |= ESP_TGT_CHECK_NEGO;
2089 
2090 		if (tp->starget)
2091 			__starget_for_each_device(tp->starget, NULL,
2092 						  esp_clear_hold);
2093 	}
2094 	esp->flags &= ~ESP_FLAG_RESETTING;
2095 }
2096 
2097 /* Runs under host->lock */
2098 static void __esp_interrupt(struct esp *esp)
2099 {
2100 	int finish_reset, intr_done;
2101 	u8 phase;
2102 
2103        /*
2104 	* Once INTRPT is read STATUS and SSTEP are cleared.
2105 	*/
2106 	esp->sreg = esp_read8(ESP_STATUS);
2107 	esp->seqreg = esp_read8(ESP_SSTEP);
2108 	esp->ireg = esp_read8(ESP_INTRPT);
2109 
2110 	if (esp->flags & ESP_FLAG_RESETTING) {
2111 		finish_reset = 1;
2112 	} else {
2113 		if (esp_check_gross_error(esp))
2114 			return;
2115 
2116 		finish_reset = esp_check_spur_intr(esp);
2117 		if (finish_reset < 0)
2118 			return;
2119 	}
2120 
2121 	if (esp->ireg & ESP_INTR_SR)
2122 		finish_reset = 1;
2123 
2124 	if (finish_reset) {
2125 		esp_reset_cleanup(esp);
2126 		if (esp->eh_reset) {
2127 			complete(esp->eh_reset);
2128 			esp->eh_reset = NULL;
2129 		}
2130 		return;
2131 	}
2132 
2133 	phase = (esp->sreg & ESP_STAT_PMASK);
2134 	if (esp->rev == FASHME) {
2135 		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2136 		     esp->select_state == ESP_SELECT_NONE &&
2137 		     esp->event != ESP_EVENT_STATUS &&
2138 		     esp->event != ESP_EVENT_DATA_DONE) ||
2139 		    (esp->ireg & ESP_INTR_RSEL)) {
2140 			esp->sreg2 = esp_read8(ESP_STATUS2);
2141 			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2142 			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2143 				hme_read_fifo(esp);
2144 		}
2145 	}
2146 
2147 	esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2148 		     "sreg2[%02x] ireg[%02x]\n",
2149 		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2150 
2151 	intr_done = 0;
2152 
2153 	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2154 		shost_printk(KERN_INFO, esp->host,
2155 			     "unexpected IREG %02x\n", esp->ireg);
2156 		if (esp->ireg & ESP_INTR_IC)
2157 			esp_dump_cmd_log(esp);
2158 
2159 		esp_schedule_reset(esp);
2160 	} else {
2161 		if (esp->ireg & ESP_INTR_RSEL) {
2162 			if (esp->active_cmd)
2163 				(void) esp_finish_select(esp);
2164 			intr_done = esp_reconnect(esp);
2165 		} else {
2166 			/* Some combination of FDONE, BSERV, DC. */
2167 			if (esp->select_state != ESP_SELECT_NONE)
2168 				intr_done = esp_finish_select(esp);
2169 		}
2170 	}
2171 	while (!intr_done)
2172 		intr_done = esp_process_event(esp);
2173 }
2174 
2175 irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2176 {
2177 	struct esp *esp = dev_id;
2178 	unsigned long flags;
2179 	irqreturn_t ret;
2180 
2181 	spin_lock_irqsave(esp->host->host_lock, flags);
2182 	ret = IRQ_NONE;
2183 	if (esp->ops->irq_pending(esp)) {
2184 		ret = IRQ_HANDLED;
2185 		for (;;) {
2186 			int i;
2187 
2188 			__esp_interrupt(esp);
2189 			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2190 				break;
2191 			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2192 
2193 			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2194 				if (esp->ops->irq_pending(esp))
2195 					break;
2196 			}
2197 			if (i == ESP_QUICKIRQ_LIMIT)
2198 				break;
2199 		}
2200 	}
2201 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2202 
2203 	return ret;
2204 }
2205 EXPORT_SYMBOL(scsi_esp_intr);
2206 
2207 static void esp_get_revision(struct esp *esp)
2208 {
2209 	u8 val;
2210 
2211 	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2212 	if (esp->config2 == 0) {
2213 		esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2214 		esp_write8(esp->config2, ESP_CFG2);
2215 
2216 		val = esp_read8(ESP_CFG2);
2217 		val &= ~ESP_CONFIG2_MAGIC;
2218 
2219 		esp->config2 = 0;
2220 		if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2221 			/*
2222 			 * If what we write to cfg2 does not come back,
2223 			 * cfg2 is not implemented.
2224 			 * Therefore this must be a plain esp100.
2225 			 */
2226 			esp->rev = ESP100;
2227 			return;
2228 		}
2229 	}
2230 
2231 	esp_set_all_config3(esp, 5);
2232 	esp->prev_cfg3 = 5;
2233 	esp_write8(esp->config2, ESP_CFG2);
2234 	esp_write8(0, ESP_CFG3);
2235 	esp_write8(esp->prev_cfg3, ESP_CFG3);
2236 
2237 	val = esp_read8(ESP_CFG3);
2238 	if (val != 5) {
2239 		/* The cfg2 register is implemented, however
2240 		 * cfg3 is not, must be esp100a.
2241 		 */
2242 		esp->rev = ESP100A;
2243 	} else {
2244 		esp_set_all_config3(esp, 0);
2245 		esp->prev_cfg3 = 0;
2246 		esp_write8(esp->prev_cfg3, ESP_CFG3);
2247 
2248 		/* All of cfg{1,2,3} implemented, must be one of
2249 		 * the fas variants, figure out which one.
2250 		 */
2251 		if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2252 			esp->rev = FAST;
2253 			esp->sync_defp = SYNC_DEFP_FAST;
2254 		} else {
2255 			esp->rev = ESP236;
2256 		}
2257 	}
2258 }
2259 
2260 static void esp_init_swstate(struct esp *esp)
2261 {
2262 	int i;
2263 
2264 	INIT_LIST_HEAD(&esp->queued_cmds);
2265 	INIT_LIST_HEAD(&esp->active_cmds);
2266 	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2267 
2268 	/* Start with a clear state, domain validation (via ->slave_configure,
2269 	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2270 	 * commands.
2271 	 */
2272 	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2273 		esp->target[i].flags = 0;
2274 		esp->target[i].nego_goal_period = 0;
2275 		esp->target[i].nego_goal_offset = 0;
2276 		esp->target[i].nego_goal_width = 0;
2277 		esp->target[i].nego_goal_tags = 0;
2278 	}
2279 }
2280 
2281 /* This places the ESP into a known state at boot time. */
2282 static void esp_bootup_reset(struct esp *esp)
2283 {
2284 	u8 val;
2285 
2286 	/* Reset the DMA */
2287 	esp->ops->reset_dma(esp);
2288 
2289 	/* Reset the ESP */
2290 	esp_reset_esp(esp);
2291 
2292 	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2293 	val = esp_read8(ESP_CFG1);
2294 	val |= ESP_CONFIG1_SRRDISAB;
2295 	esp_write8(val, ESP_CFG1);
2296 
2297 	scsi_esp_cmd(esp, ESP_CMD_RS);
2298 	udelay(400);
2299 
2300 	esp_write8(esp->config1, ESP_CFG1);
2301 
2302 	/* Eat any bitrot in the chip and we are done... */
2303 	esp_read8(ESP_INTRPT);
2304 }
2305 
2306 static void esp_set_clock_params(struct esp *esp)
2307 {
2308 	int fhz;
2309 	u8 ccf;
2310 
2311 	/* This is getting messy but it has to be done correctly or else
2312 	 * you get weird behavior all over the place.  We are trying to
2313 	 * basically figure out three pieces of information.
2314 	 *
2315 	 * a) Clock Conversion Factor
2316 	 *
2317 	 *    This is a representation of the input crystal clock frequency
2318 	 *    going into the ESP on this machine.  Any operation whose timing
2319 	 *    is longer than 400ns depends on this value being correct.  For
2320 	 *    example, you'll get blips for arbitration/selection during high
2321 	 *    load or with multiple targets if this is not set correctly.
2322 	 *
2323 	 * b) Selection Time-Out
2324 	 *
2325 	 *    The ESP isn't very bright and will arbitrate for the bus and try
2326 	 *    to select a target forever if you let it.  This value tells the
2327 	 *    ESP when it has taken too long to negotiate and that it should
2328 	 *    interrupt the CPU so we can see what happened.  The value is
2329 	 *    computed as follows (from NCR/Symbios chip docs).
2330 	 *
2331 	 *          (Time Out Period) *  (Input Clock)
2332 	 *    STO = ----------------------------------
2333 	 *          (8192) * (Clock Conversion Factor)
2334 	 *
2335 	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2336 	 *
2337 	 * c) Imperical constants for synchronous offset and transfer period
2338          *    register values
2339 	 *
2340 	 *    This entails the smallest and largest sync period we could ever
2341 	 *    handle on this ESP.
2342 	 */
2343 	fhz = esp->cfreq;
2344 
2345 	ccf = ((fhz / 1000000) + 4) / 5;
2346 	if (ccf == 1)
2347 		ccf = 2;
2348 
2349 	/* If we can't find anything reasonable, just assume 20MHZ.
2350 	 * This is the clock frequency of the older sun4c's where I've
2351 	 * been unable to find the clock-frequency PROM property.  All
2352 	 * other machines provide useful values it seems.
2353 	 */
2354 	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2355 		fhz = 20000000;
2356 		ccf = 4;
2357 	}
2358 
2359 	esp->cfact = (ccf == 8 ? 0 : ccf);
2360 	esp->cfreq = fhz;
2361 	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2362 	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2363 	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2364 	esp->sync_defp = SYNC_DEFP_SLOW;
2365 }
2366 
2367 static const char *esp_chip_names[] = {
2368 	"ESP100",
2369 	"ESP100A",
2370 	"ESP236",
2371 	"FAS236",
2372 	"AM53C974",
2373 	"53CF9x-2",
2374 	"FAS100A",
2375 	"FAST",
2376 	"FASHME",
2377 };
2378 
2379 static struct scsi_transport_template *esp_transport_template;
2380 
2381 int scsi_esp_register(struct esp *esp)
2382 {
2383 	static int instance;
2384 	int err;
2385 
2386 	if (!esp->num_tags)
2387 		esp->num_tags = ESP_DEFAULT_TAGS;
2388 	esp->host->transportt = esp_transport_template;
2389 	esp->host->max_lun = ESP_MAX_LUN;
2390 	esp->host->cmd_per_lun = 2;
2391 	esp->host->unique_id = instance;
2392 
2393 	esp_set_clock_params(esp);
2394 
2395 	esp_get_revision(esp);
2396 
2397 	esp_init_swstate(esp);
2398 
2399 	esp_bootup_reset(esp);
2400 
2401 	dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2402 		   esp->host->unique_id, esp->regs, esp->dma_regs,
2403 		   esp->host->irq);
2404 	dev_printk(KERN_INFO, esp->dev,
2405 		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2406 		   esp->host->unique_id, esp_chip_names[esp->rev],
2407 		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2408 
2409 	/* Let the SCSI bus reset settle. */
2410 	ssleep(esp_bus_reset_settle);
2411 
2412 	err = scsi_add_host(esp->host, esp->dev);
2413 	if (err)
2414 		return err;
2415 
2416 	instance++;
2417 
2418 	scsi_scan_host(esp->host);
2419 
2420 	return 0;
2421 }
2422 EXPORT_SYMBOL(scsi_esp_register);
2423 
2424 void scsi_esp_unregister(struct esp *esp)
2425 {
2426 	scsi_remove_host(esp->host);
2427 }
2428 EXPORT_SYMBOL(scsi_esp_unregister);
2429 
2430 static int esp_target_alloc(struct scsi_target *starget)
2431 {
2432 	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2433 	struct esp_target_data *tp = &esp->target[starget->id];
2434 
2435 	tp->starget = starget;
2436 
2437 	return 0;
2438 }
2439 
2440 static void esp_target_destroy(struct scsi_target *starget)
2441 {
2442 	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2443 	struct esp_target_data *tp = &esp->target[starget->id];
2444 
2445 	tp->starget = NULL;
2446 }
2447 
2448 static int esp_slave_alloc(struct scsi_device *dev)
2449 {
2450 	struct esp *esp = shost_priv(dev->host);
2451 	struct esp_target_data *tp = &esp->target[dev->id];
2452 	struct esp_lun_data *lp;
2453 
2454 	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2455 	if (!lp)
2456 		return -ENOMEM;
2457 	dev->hostdata = lp;
2458 
2459 	spi_min_period(tp->starget) = esp->min_period;
2460 	spi_max_offset(tp->starget) = 15;
2461 
2462 	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2463 		spi_max_width(tp->starget) = 1;
2464 	else
2465 		spi_max_width(tp->starget) = 0;
2466 
2467 	return 0;
2468 }
2469 
2470 static int esp_slave_configure(struct scsi_device *dev)
2471 {
2472 	struct esp *esp = shost_priv(dev->host);
2473 	struct esp_target_data *tp = &esp->target[dev->id];
2474 
2475 	if (dev->tagged_supported)
2476 		scsi_change_queue_depth(dev, esp->num_tags);
2477 
2478 	tp->flags |= ESP_TGT_DISCONNECT;
2479 
2480 	if (!spi_initial_dv(dev->sdev_target))
2481 		spi_dv_device(dev);
2482 
2483 	return 0;
2484 }
2485 
2486 static void esp_slave_destroy(struct scsi_device *dev)
2487 {
2488 	struct esp_lun_data *lp = dev->hostdata;
2489 
2490 	kfree(lp);
2491 	dev->hostdata = NULL;
2492 }
2493 
2494 static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2495 {
2496 	struct esp *esp = shost_priv(cmd->device->host);
2497 	struct esp_cmd_entry *ent, *tmp;
2498 	struct completion eh_done;
2499 	unsigned long flags;
2500 
2501 	/* XXX This helps a lot with debugging but might be a bit
2502 	 * XXX much for the final driver.
2503 	 */
2504 	spin_lock_irqsave(esp->host->host_lock, flags);
2505 	shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2506 		     cmd, cmd->cmnd[0]);
2507 	ent = esp->active_cmd;
2508 	if (ent)
2509 		shost_printk(KERN_ERR, esp->host,
2510 			     "Current command [%p:%02x]\n",
2511 			     ent->cmd, ent->cmd->cmnd[0]);
2512 	list_for_each_entry(ent, &esp->queued_cmds, list) {
2513 		shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2514 			     ent->cmd, ent->cmd->cmnd[0]);
2515 	}
2516 	list_for_each_entry(ent, &esp->active_cmds, list) {
2517 		shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2518 			     ent->cmd, ent->cmd->cmnd[0]);
2519 	}
2520 	esp_dump_cmd_log(esp);
2521 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2522 
2523 	spin_lock_irqsave(esp->host->host_lock, flags);
2524 
2525 	ent = NULL;
2526 	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2527 		if (tmp->cmd == cmd) {
2528 			ent = tmp;
2529 			break;
2530 		}
2531 	}
2532 
2533 	if (ent) {
2534 		/* Easiest case, we didn't even issue the command
2535 		 * yet so it is trivial to abort.
2536 		 */
2537 		list_del(&ent->list);
2538 
2539 		cmd->result = DID_ABORT << 16;
2540 		cmd->scsi_done(cmd);
2541 
2542 		esp_put_ent(esp, ent);
2543 
2544 		goto out_success;
2545 	}
2546 
2547 	init_completion(&eh_done);
2548 
2549 	ent = esp->active_cmd;
2550 	if (ent && ent->cmd == cmd) {
2551 		/* Command is the currently active command on
2552 		 * the bus.  If we already have an output message
2553 		 * pending, no dice.
2554 		 */
2555 		if (esp->msg_out_len)
2556 			goto out_failure;
2557 
2558 		/* Send out an abort, encouraging the target to
2559 		 * go to MSGOUT phase by asserting ATN.
2560 		 */
2561 		esp->msg_out[0] = ABORT_TASK_SET;
2562 		esp->msg_out_len = 1;
2563 		ent->eh_done = &eh_done;
2564 
2565 		scsi_esp_cmd(esp, ESP_CMD_SATN);
2566 	} else {
2567 		/* The command is disconnected.  This is not easy to
2568 		 * abort.  For now we fail and let the scsi error
2569 		 * handling layer go try a scsi bus reset or host
2570 		 * reset.
2571 		 *
2572 		 * What we could do is put together a scsi command
2573 		 * solely for the purpose of sending an abort message
2574 		 * to the target.  Coming up with all the code to
2575 		 * cook up scsi commands, special case them everywhere,
2576 		 * etc. is for questionable gain and it would be better
2577 		 * if the generic scsi error handling layer could do at
2578 		 * least some of that for us.
2579 		 *
2580 		 * Anyways this is an area for potential future improvement
2581 		 * in this driver.
2582 		 */
2583 		goto out_failure;
2584 	}
2585 
2586 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2587 
2588 	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2589 		spin_lock_irqsave(esp->host->host_lock, flags);
2590 		ent->eh_done = NULL;
2591 		spin_unlock_irqrestore(esp->host->host_lock, flags);
2592 
2593 		return FAILED;
2594 	}
2595 
2596 	return SUCCESS;
2597 
2598 out_success:
2599 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2600 	return SUCCESS;
2601 
2602 out_failure:
2603 	/* XXX This might be a good location to set ESP_TGT_BROKEN
2604 	 * XXX since we know which target/lun in particular is
2605 	 * XXX causing trouble.
2606 	 */
2607 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2608 	return FAILED;
2609 }
2610 
2611 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2612 {
2613 	struct esp *esp = shost_priv(cmd->device->host);
2614 	struct completion eh_reset;
2615 	unsigned long flags;
2616 
2617 	init_completion(&eh_reset);
2618 
2619 	spin_lock_irqsave(esp->host->host_lock, flags);
2620 
2621 	esp->eh_reset = &eh_reset;
2622 
2623 	/* XXX This is too simple... We should add lots of
2624 	 * XXX checks here so that if we find that the chip is
2625 	 * XXX very wedged we return failure immediately so
2626 	 * XXX that we can perform a full chip reset.
2627 	 */
2628 	esp->flags |= ESP_FLAG_RESETTING;
2629 	scsi_esp_cmd(esp, ESP_CMD_RS);
2630 
2631 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2632 
2633 	ssleep(esp_bus_reset_settle);
2634 
2635 	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2636 		spin_lock_irqsave(esp->host->host_lock, flags);
2637 		esp->eh_reset = NULL;
2638 		spin_unlock_irqrestore(esp->host->host_lock, flags);
2639 
2640 		return FAILED;
2641 	}
2642 
2643 	return SUCCESS;
2644 }
2645 
2646 /* All bets are off, reset the entire device.  */
2647 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2648 {
2649 	struct esp *esp = shost_priv(cmd->device->host);
2650 	unsigned long flags;
2651 
2652 	spin_lock_irqsave(esp->host->host_lock, flags);
2653 	esp_bootup_reset(esp);
2654 	esp_reset_cleanup(esp);
2655 	spin_unlock_irqrestore(esp->host->host_lock, flags);
2656 
2657 	ssleep(esp_bus_reset_settle);
2658 
2659 	return SUCCESS;
2660 }
2661 
2662 static const char *esp_info(struct Scsi_Host *host)
2663 {
2664 	return "esp";
2665 }
2666 
2667 struct scsi_host_template scsi_esp_template = {
2668 	.module			= THIS_MODULE,
2669 	.name			= "esp",
2670 	.info			= esp_info,
2671 	.queuecommand		= esp_queuecommand,
2672 	.target_alloc		= esp_target_alloc,
2673 	.target_destroy		= esp_target_destroy,
2674 	.slave_alloc		= esp_slave_alloc,
2675 	.slave_configure	= esp_slave_configure,
2676 	.slave_destroy		= esp_slave_destroy,
2677 	.eh_abort_handler	= esp_eh_abort_handler,
2678 	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2679 	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2680 	.can_queue		= 7,
2681 	.this_id		= 7,
2682 	.sg_tablesize		= SG_ALL,
2683 	.max_sectors		= 0xffff,
2684 	.skip_settle_delay	= 1,
2685 };
2686 EXPORT_SYMBOL(scsi_esp_template);
2687 
2688 static void esp_get_signalling(struct Scsi_Host *host)
2689 {
2690 	struct esp *esp = shost_priv(host);
2691 	enum spi_signal_type type;
2692 
2693 	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2694 		type = SPI_SIGNAL_HVD;
2695 	else
2696 		type = SPI_SIGNAL_SE;
2697 
2698 	spi_signalling(host) = type;
2699 }
2700 
2701 static void esp_set_offset(struct scsi_target *target, int offset)
2702 {
2703 	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2704 	struct esp *esp = shost_priv(host);
2705 	struct esp_target_data *tp = &esp->target[target->id];
2706 
2707 	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2708 		tp->nego_goal_offset = 0;
2709 	else
2710 		tp->nego_goal_offset = offset;
2711 	tp->flags |= ESP_TGT_CHECK_NEGO;
2712 }
2713 
2714 static void esp_set_period(struct scsi_target *target, int period)
2715 {
2716 	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2717 	struct esp *esp = shost_priv(host);
2718 	struct esp_target_data *tp = &esp->target[target->id];
2719 
2720 	tp->nego_goal_period = period;
2721 	tp->flags |= ESP_TGT_CHECK_NEGO;
2722 }
2723 
2724 static void esp_set_width(struct scsi_target *target, int width)
2725 {
2726 	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2727 	struct esp *esp = shost_priv(host);
2728 	struct esp_target_data *tp = &esp->target[target->id];
2729 
2730 	tp->nego_goal_width = (width ? 1 : 0);
2731 	tp->flags |= ESP_TGT_CHECK_NEGO;
2732 }
2733 
2734 static struct spi_function_template esp_transport_ops = {
2735 	.set_offset		= esp_set_offset,
2736 	.show_offset		= 1,
2737 	.set_period		= esp_set_period,
2738 	.show_period		= 1,
2739 	.set_width		= esp_set_width,
2740 	.show_width		= 1,
2741 	.get_signalling		= esp_get_signalling,
2742 };
2743 
2744 static int __init esp_init(void)
2745 {
2746 	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2747 		     sizeof(struct esp_cmd_priv));
2748 
2749 	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2750 	if (!esp_transport_template)
2751 		return -ENODEV;
2752 
2753 	return 0;
2754 }
2755 
2756 static void __exit esp_exit(void)
2757 {
2758 	spi_release_transport(esp_transport_template);
2759 }
2760 
2761 MODULE_DESCRIPTION("ESP SCSI driver core");
2762 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2763 MODULE_LICENSE("GPL");
2764 MODULE_VERSION(DRV_VERSION);
2765 
2766 module_param(esp_bus_reset_settle, int, 0);
2767 MODULE_PARM_DESC(esp_bus_reset_settle,
2768 		 "ESP scsi bus reset delay in seconds");
2769 
2770 module_param(esp_debug, int, 0);
2771 MODULE_PARM_DESC(esp_debug,
2772 "ESP bitmapped debugging message enable value:\n"
2773 "	0x00000001	Log interrupt events\n"
2774 "	0x00000002	Log scsi commands\n"
2775 "	0x00000004	Log resets\n"
2776 "	0x00000008	Log message in events\n"
2777 "	0x00000010	Log message out events\n"
2778 "	0x00000020	Log command completion\n"
2779 "	0x00000040	Log disconnects\n"
2780 "	0x00000080	Log data start\n"
2781 "	0x00000100	Log data done\n"
2782 "	0x00000200	Log reconnects\n"
2783 "	0x00000400	Log auto-sense data\n"
2784 );
2785 
2786 module_init(esp_init);
2787 module_exit(esp_exit);
2788 
2789 #ifdef CONFIG_SCSI_ESP_PIO
2790 static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2791 {
2792 	int i = 500000;
2793 
2794 	do {
2795 		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2796 
2797 		if (fbytes)
2798 			return fbytes;
2799 
2800 		udelay(1);
2801 	} while (--i);
2802 
2803 	shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2804 		     esp_read8(ESP_STATUS));
2805 	return 0;
2806 }
2807 
2808 static inline int esp_wait_for_intr(struct esp *esp)
2809 {
2810 	int i = 500000;
2811 
2812 	do {
2813 		esp->sreg = esp_read8(ESP_STATUS);
2814 		if (esp->sreg & ESP_STAT_INTR)
2815 			return 0;
2816 
2817 		udelay(1);
2818 	} while (--i);
2819 
2820 	shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2821 		     esp->sreg);
2822 	return 1;
2823 }
2824 
2825 #define ESP_FIFO_SIZE 16
2826 
2827 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2828 		      u32 dma_count, int write, u8 cmd)
2829 {
2830 	u8 phase = esp->sreg & ESP_STAT_PMASK;
2831 
2832 	cmd &= ~ESP_CMD_DMA;
2833 	esp->send_cmd_error = 0;
2834 
2835 	if (write) {
2836 		u8 *dst = (u8 *)addr;
2837 		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2838 
2839 		scsi_esp_cmd(esp, cmd);
2840 
2841 		while (1) {
2842 			if (!esp_wait_for_fifo(esp))
2843 				break;
2844 
2845 			*dst++ = readb(esp->fifo_reg);
2846 			--esp_count;
2847 
2848 			if (!esp_count)
2849 				break;
2850 
2851 			if (esp_wait_for_intr(esp)) {
2852 				esp->send_cmd_error = 1;
2853 				break;
2854 			}
2855 
2856 			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2857 				break;
2858 
2859 			esp->ireg = esp_read8(ESP_INTRPT);
2860 			if (esp->ireg & mask) {
2861 				esp->send_cmd_error = 1;
2862 				break;
2863 			}
2864 
2865 			if (phase == ESP_MIP)
2866 				esp_write8(ESP_CMD_MOK, ESP_CMD);
2867 
2868 			esp_write8(ESP_CMD_TI, ESP_CMD);
2869 		}
2870 	} else {
2871 		unsigned int n = ESP_FIFO_SIZE;
2872 		u8 *src = (u8 *)addr;
2873 
2874 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2875 
2876 		if (n > esp_count)
2877 			n = esp_count;
2878 		writesb(esp->fifo_reg, src, n);
2879 		src += n;
2880 		esp_count -= n;
2881 
2882 		scsi_esp_cmd(esp, cmd);
2883 
2884 		while (esp_count) {
2885 			if (esp_wait_for_intr(esp)) {
2886 				esp->send_cmd_error = 1;
2887 				break;
2888 			}
2889 
2890 			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2891 				break;
2892 
2893 			esp->ireg = esp_read8(ESP_INTRPT);
2894 			if (esp->ireg & ~ESP_INTR_BSERV) {
2895 				esp->send_cmd_error = 1;
2896 				break;
2897 			}
2898 
2899 			n = ESP_FIFO_SIZE -
2900 			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2901 
2902 			if (n > esp_count)
2903 				n = esp_count;
2904 			writesb(esp->fifo_reg, src, n);
2905 			src += n;
2906 			esp_count -= n;
2907 
2908 			esp_write8(ESP_CMD_TI, ESP_CMD);
2909 		}
2910 	}
2911 
2912 	esp->send_cmd_residual = esp_count;
2913 }
2914 EXPORT_SYMBOL(esp_send_pio_cmd);
2915 #endif
2916