xref: /openbmc/linux/drivers/scsi/stex.c (revision 94d964e5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SuperTrak EX Series Storage Controller driver for Linux
4  *
5  *	Copyright (C) 2005-2015 Promise Technology Inc.
6  *
7  *	Written By:
8  *		Ed Lin <promise_linux@promise.com>
9  */
10 
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include <linux/time.h>
17 #include <linux/pci.h>
18 #include <linux/blkdev.h>
19 #include <linux/interrupt.h>
20 #include <linux/types.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/ktime.h>
24 #include <linux/reboot.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27 #include <asm/byteorder.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_tcq.h>
33 #include <scsi/scsi_dbg.h>
34 #include <scsi/scsi_eh.h>
35 
36 #define DRV_NAME "stex"
37 #define ST_DRIVER_VERSION	"6.02.0000.01"
38 #define ST_VER_MAJOR		6
39 #define ST_VER_MINOR		02
40 #define ST_OEM				0000
41 #define ST_BUILD_VER		01
42 
43 enum {
44 	/* MU register offset */
45 	IMR0	= 0x10,	/* MU_INBOUND_MESSAGE_REG0 */
46 	IMR1	= 0x14,	/* MU_INBOUND_MESSAGE_REG1 */
47 	OMR0	= 0x18,	/* MU_OUTBOUND_MESSAGE_REG0 */
48 	OMR1	= 0x1c,	/* MU_OUTBOUND_MESSAGE_REG1 */
49 	IDBL	= 0x20,	/* MU_INBOUND_DOORBELL */
50 	IIS	= 0x24,	/* MU_INBOUND_INTERRUPT_STATUS */
51 	IIM	= 0x28,	/* MU_INBOUND_INTERRUPT_MASK */
52 	ODBL	= 0x2c,	/* MU_OUTBOUND_DOORBELL */
53 	OIS	= 0x30,	/* MU_OUTBOUND_INTERRUPT_STATUS */
54 	OIM	= 0x3c,	/* MU_OUTBOUND_INTERRUPT_MASK */
55 
56 	YIOA_STATUS				= 0x00,
57 	YH2I_INT				= 0x20,
58 	YINT_EN					= 0x34,
59 	YI2H_INT				= 0x9c,
60 	YI2H_INT_C				= 0xa0,
61 	YH2I_REQ				= 0xc0,
62 	YH2I_REQ_HI				= 0xc4,
63 	PSCRATCH0				= 0xb0,
64 	PSCRATCH1				= 0xb4,
65 	PSCRATCH2				= 0xb8,
66 	PSCRATCH3				= 0xbc,
67 	PSCRATCH4				= 0xc8,
68 	MAILBOX_BASE			= 0x1000,
69 	MAILBOX_HNDSHK_STS		= 0x0,
70 
71 	/* MU register value */
72 	MU_INBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
73 	MU_INBOUND_DOORBELL_REQHEADCHANGED	= (1 << 1),
74 	MU_INBOUND_DOORBELL_STATUSTAILCHANGED	= (1 << 2),
75 	MU_INBOUND_DOORBELL_HMUSTOPPED		= (1 << 3),
76 	MU_INBOUND_DOORBELL_RESET		= (1 << 4),
77 
78 	MU_OUTBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
79 	MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED	= (1 << 1),
80 	MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED	= (1 << 2),
81 	MU_OUTBOUND_DOORBELL_BUSCHANGE		= (1 << 3),
82 	MU_OUTBOUND_DOORBELL_HASEVENT		= (1 << 4),
83 	MU_OUTBOUND_DOORBELL_REQUEST_RESET	= (1 << 27),
84 
85 	/* MU status code */
86 	MU_STATE_STARTING			= 1,
87 	MU_STATE_STARTED			= 2,
88 	MU_STATE_RESETTING			= 3,
89 	MU_STATE_FAILED				= 4,
90 	MU_STATE_STOP				= 5,
91 	MU_STATE_NOCONNECT			= 6,
92 
93 	MU_MAX_DELAY				= 50,
94 	MU_HANDSHAKE_SIGNATURE			= 0x55aaaa55,
95 	MU_HANDSHAKE_SIGNATURE_HALF		= 0x5a5a0000,
96 	MU_HARD_RESET_WAIT			= 30000,
97 	HMU_PARTNER_TYPE			= 2,
98 
99 	/* firmware returned values */
100 	SRB_STATUS_SUCCESS			= 0x01,
101 	SRB_STATUS_ERROR			= 0x04,
102 	SRB_STATUS_BUSY				= 0x05,
103 	SRB_STATUS_INVALID_REQUEST		= 0x06,
104 	SRB_STATUS_SELECTION_TIMEOUT		= 0x0A,
105 	SRB_SEE_SENSE 				= 0x80,
106 
107 	/* task attribute */
108 	TASK_ATTRIBUTE_SIMPLE			= 0x0,
109 	TASK_ATTRIBUTE_HEADOFQUEUE		= 0x1,
110 	TASK_ATTRIBUTE_ORDERED			= 0x2,
111 	TASK_ATTRIBUTE_ACA			= 0x4,
112 
113 	SS_STS_NORMAL				= 0x80000000,
114 	SS_STS_DONE				= 0x40000000,
115 	SS_STS_HANDSHAKE			= 0x20000000,
116 
117 	SS_HEAD_HANDSHAKE			= 0x80,
118 
119 	SS_H2I_INT_RESET			= 0x100,
120 
121 	SS_I2H_REQUEST_RESET			= 0x2000,
122 
123 	SS_MU_OPERATIONAL			= 0x80000000,
124 
125 	STEX_CDB_LENGTH				= 16,
126 	STATUS_VAR_LEN				= 128,
127 
128 	/* sg flags */
129 	SG_CF_EOT				= 0x80,	/* end of table */
130 	SG_CF_64B				= 0x40,	/* 64 bit item */
131 	SG_CF_HOST				= 0x20,	/* sg in host memory */
132 	MSG_DATA_DIR_ND				= 0,
133 	MSG_DATA_DIR_IN				= 1,
134 	MSG_DATA_DIR_OUT			= 2,
135 
136 	st_shasta				= 0,
137 	st_vsc					= 1,
138 	st_yosemite				= 2,
139 	st_seq					= 3,
140 	st_yel					= 4,
141 	st_P3					= 5,
142 
143 	PASSTHRU_REQ_TYPE			= 0x00000001,
144 	PASSTHRU_REQ_NO_WAKEUP			= 0x00000100,
145 	ST_INTERNAL_TIMEOUT			= 180,
146 
147 	ST_TO_CMD				= 0,
148 	ST_FROM_CMD				= 1,
149 
150 	/* vendor specific commands of Promise */
151 	MGT_CMD					= 0xd8,
152 	SINBAND_MGT_CMD				= 0xd9,
153 	ARRAY_CMD				= 0xe0,
154 	CONTROLLER_CMD				= 0xe1,
155 	DEBUGGING_CMD				= 0xe2,
156 	PASSTHRU_CMD				= 0xe3,
157 
158 	PASSTHRU_GET_ADAPTER			= 0x05,
159 	PASSTHRU_GET_DRVVER			= 0x10,
160 
161 	CTLR_CONFIG_CMD				= 0x03,
162 	CTLR_SHUTDOWN				= 0x0d,
163 
164 	CTLR_POWER_STATE_CHANGE			= 0x0e,
165 	CTLR_POWER_SAVING			= 0x01,
166 
167 	PASSTHRU_SIGNATURE			= 0x4e415041,
168 	MGT_CMD_SIGNATURE			= 0xba,
169 
170 	INQUIRY_EVPD				= 0x01,
171 
172 	ST_ADDITIONAL_MEM			= 0x200000,
173 	ST_ADDITIONAL_MEM_MIN			= 0x80000,
174 	PMIC_SHUTDOWN				= 0x0D,
175 	PMIC_REUMSE					= 0x10,
176 	ST_IGNORED					= -1,
177 	ST_NOTHANDLED				= 7,
178 	ST_S3						= 3,
179 	ST_S4						= 4,
180 	ST_S5						= 5,
181 	ST_S6						= 6,
182 };
183 
184 struct st_sgitem {
185 	u8 ctrl;	/* SG_CF_xxx */
186 	u8 reserved[3];
187 	__le32 count;
188 	__le64 addr;
189 };
190 
191 struct st_ss_sgitem {
192 	__le32 addr;
193 	__le32 addr_hi;
194 	__le32 count;
195 };
196 
197 struct st_sgtable {
198 	__le16 sg_count;
199 	__le16 max_sg_count;
200 	__le32 sz_in_byte;
201 };
202 
203 struct st_msg_header {
204 	__le64 handle;
205 	u8 flag;
206 	u8 channel;
207 	__le16 timeout;
208 	u32 reserved;
209 };
210 
211 struct handshake_frame {
212 	__le64 rb_phy;		/* request payload queue physical address */
213 	__le16 req_sz;		/* size of each request payload */
214 	__le16 req_cnt;		/* count of reqs the buffer can hold */
215 	__le16 status_sz;	/* size of each status payload */
216 	__le16 status_cnt;	/* count of status the buffer can hold */
217 	__le64 hosttime;	/* seconds from Jan 1, 1970 (GMT) */
218 	u8 partner_type;	/* who sends this frame */
219 	u8 reserved0[7];
220 	__le32 partner_ver_major;
221 	__le32 partner_ver_minor;
222 	__le32 partner_ver_oem;
223 	__le32 partner_ver_build;
224 	__le32 extra_offset;	/* NEW */
225 	__le32 extra_size;	/* NEW */
226 	__le32 scratch_size;
227 	u32 reserved1;
228 };
229 
230 struct req_msg {
231 	__le16 tag;
232 	u8 lun;
233 	u8 target;
234 	u8 task_attr;
235 	u8 task_manage;
236 	u8 data_dir;
237 	u8 payload_sz;		/* payload size in 4-byte, not used */
238 	u8 cdb[STEX_CDB_LENGTH];
239 	u32 variable[];
240 };
241 
242 struct status_msg {
243 	__le16 tag;
244 	u8 lun;
245 	u8 target;
246 	u8 srb_status;
247 	u8 scsi_status;
248 	u8 reserved;
249 	u8 payload_sz;		/* payload size in 4-byte */
250 	u8 variable[STATUS_VAR_LEN];
251 };
252 
253 struct ver_info {
254 	u32 major;
255 	u32 minor;
256 	u32 oem;
257 	u32 build;
258 	u32 reserved[2];
259 };
260 
261 struct st_frame {
262 	u32 base[6];
263 	u32 rom_addr;
264 
265 	struct ver_info drv_ver;
266 	struct ver_info bios_ver;
267 
268 	u32 bus;
269 	u32 slot;
270 	u32 irq_level;
271 	u32 irq_vec;
272 	u32 id;
273 	u32 subid;
274 
275 	u32 dimm_size;
276 	u8 dimm_type;
277 	u8 reserved[3];
278 
279 	u32 channel;
280 	u32 reserved1;
281 };
282 
283 struct st_drvver {
284 	u32 major;
285 	u32 minor;
286 	u32 oem;
287 	u32 build;
288 	u32 signature[2];
289 	u8 console_id;
290 	u8 host_no;
291 	u8 reserved0[2];
292 	u32 reserved[3];
293 };
294 
295 struct st_ccb {
296 	struct req_msg *req;
297 	struct scsi_cmnd *cmd;
298 
299 	void *sense_buffer;
300 	unsigned int sense_bufflen;
301 	int sg_count;
302 
303 	u32 req_type;
304 	u8 srb_status;
305 	u8 scsi_status;
306 	u8 reserved[2];
307 };
308 
309 struct st_hba {
310 	void __iomem *mmio_base;	/* iomapped PCI memory space */
311 	void *dma_mem;
312 	dma_addr_t dma_handle;
313 	size_t dma_size;
314 
315 	struct Scsi_Host *host;
316 	struct pci_dev *pdev;
317 
318 	struct req_msg * (*alloc_rq) (struct st_hba *);
319 	int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
320 	void (*send) (struct st_hba *, struct req_msg *, u16);
321 
322 	u32 req_head;
323 	u32 req_tail;
324 	u32 status_head;
325 	u32 status_tail;
326 
327 	struct status_msg *status_buffer;
328 	void *copy_buffer; /* temp buffer for driver-handled commands */
329 	struct st_ccb *ccb;
330 	struct st_ccb *wait_ccb;
331 	__le32 *scratch;
332 
333 	char work_q_name[20];
334 	struct workqueue_struct *work_q;
335 	struct work_struct reset_work;
336 	wait_queue_head_t reset_waitq;
337 	unsigned int mu_status;
338 	unsigned int cardtype;
339 	int msi_enabled;
340 	int out_req_cnt;
341 	u32 extra_offset;
342 	u16 rq_count;
343 	u16 rq_size;
344 	u16 sts_count;
345 	u8  supports_pm;
346 	int msi_lock;
347 };
348 
349 struct st_card_info {
350 	struct req_msg * (*alloc_rq) (struct st_hba *);
351 	int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
352 	void (*send) (struct st_hba *, struct req_msg *, u16);
353 	unsigned int max_id;
354 	unsigned int max_lun;
355 	unsigned int max_channel;
356 	u16 rq_count;
357 	u16 rq_size;
358 	u16 sts_count;
359 };
360 
361 static int S6flag;
362 static int stex_halt(struct notifier_block *nb, ulong event, void *buf);
363 static struct notifier_block stex_notifier = {
364 	stex_halt, NULL, 0
365 };
366 
367 static int msi;
368 module_param(msi, int, 0);
369 MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
370 
371 static const char console_inq_page[] =
372 {
373 	0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
374 	0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20,	/* "Promise " */
375 	0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E,	/* "RAID Con" */
376 	0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20,	/* "sole    " */
377 	0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20,	/* "1.00    " */
378 	0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D,	/* "SX/RSAF-" */
379 	0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20,	/* "TE1.00  " */
380 	0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
381 };
382 
383 MODULE_AUTHOR("Ed Lin");
384 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
385 MODULE_LICENSE("GPL");
386 MODULE_VERSION(ST_DRIVER_VERSION);
387 
388 static struct status_msg *stex_get_status(struct st_hba *hba)
389 {
390 	struct status_msg *status = hba->status_buffer + hba->status_tail;
391 
392 	++hba->status_tail;
393 	hba->status_tail %= hba->sts_count+1;
394 
395 	return status;
396 }
397 
398 static void stex_invalid_field(struct scsi_cmnd *cmd,
399 			       void (*done)(struct scsi_cmnd *))
400 {
401 	/* "Invalid field in cdb" */
402 	scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0);
403 	done(cmd);
404 }
405 
406 static struct req_msg *stex_alloc_req(struct st_hba *hba)
407 {
408 	struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
409 
410 	++hba->req_head;
411 	hba->req_head %= hba->rq_count+1;
412 
413 	return req;
414 }
415 
416 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
417 {
418 	return (struct req_msg *)(hba->dma_mem +
419 		hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
420 }
421 
422 static int stex_map_sg(struct st_hba *hba,
423 	struct req_msg *req, struct st_ccb *ccb)
424 {
425 	struct scsi_cmnd *cmd;
426 	struct scatterlist *sg;
427 	struct st_sgtable *dst;
428 	struct st_sgitem *table;
429 	int i, nseg;
430 
431 	cmd = ccb->cmd;
432 	nseg = scsi_dma_map(cmd);
433 	BUG_ON(nseg < 0);
434 	if (nseg) {
435 		dst = (struct st_sgtable *)req->variable;
436 
437 		ccb->sg_count = nseg;
438 		dst->sg_count = cpu_to_le16((u16)nseg);
439 		dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
440 		dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
441 
442 		table = (struct st_sgitem *)(dst + 1);
443 		scsi_for_each_sg(cmd, sg, nseg, i) {
444 			table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
445 			table[i].addr = cpu_to_le64(sg_dma_address(sg));
446 			table[i].ctrl = SG_CF_64B | SG_CF_HOST;
447 		}
448 		table[--i].ctrl |= SG_CF_EOT;
449 	}
450 
451 	return nseg;
452 }
453 
454 static int stex_ss_map_sg(struct st_hba *hba,
455 	struct req_msg *req, struct st_ccb *ccb)
456 {
457 	struct scsi_cmnd *cmd;
458 	struct scatterlist *sg;
459 	struct st_sgtable *dst;
460 	struct st_ss_sgitem *table;
461 	int i, nseg;
462 
463 	cmd = ccb->cmd;
464 	nseg = scsi_dma_map(cmd);
465 	BUG_ON(nseg < 0);
466 	if (nseg) {
467 		dst = (struct st_sgtable *)req->variable;
468 
469 		ccb->sg_count = nseg;
470 		dst->sg_count = cpu_to_le16((u16)nseg);
471 		dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
472 		dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
473 
474 		table = (struct st_ss_sgitem *)(dst + 1);
475 		scsi_for_each_sg(cmd, sg, nseg, i) {
476 			table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
477 			table[i].addr =
478 				cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
479 			table[i].addr_hi =
480 				cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
481 		}
482 	}
483 
484 	return nseg;
485 }
486 
487 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
488 {
489 	struct st_frame *p;
490 	size_t count = sizeof(struct st_frame);
491 
492 	p = hba->copy_buffer;
493 	scsi_sg_copy_to_buffer(ccb->cmd, p, count);
494 	memset(p->base, 0, sizeof(u32)*6);
495 	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
496 	p->rom_addr = 0;
497 
498 	p->drv_ver.major = ST_VER_MAJOR;
499 	p->drv_ver.minor = ST_VER_MINOR;
500 	p->drv_ver.oem = ST_OEM;
501 	p->drv_ver.build = ST_BUILD_VER;
502 
503 	p->bus = hba->pdev->bus->number;
504 	p->slot = hba->pdev->devfn;
505 	p->irq_level = 0;
506 	p->irq_vec = hba->pdev->irq;
507 	p->id = hba->pdev->vendor << 16 | hba->pdev->device;
508 	p->subid =
509 		hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
510 
511 	scsi_sg_copy_from_buffer(ccb->cmd, p, count);
512 }
513 
514 static void
515 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
516 {
517 	req->tag = cpu_to_le16(tag);
518 
519 	hba->ccb[tag].req = req;
520 	hba->out_req_cnt++;
521 
522 	writel(hba->req_head, hba->mmio_base + IMR0);
523 	writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
524 	readl(hba->mmio_base + IDBL); /* flush */
525 }
526 
527 static void
528 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
529 {
530 	struct scsi_cmnd *cmd;
531 	struct st_msg_header *msg_h;
532 	dma_addr_t addr;
533 
534 	req->tag = cpu_to_le16(tag);
535 
536 	hba->ccb[tag].req = req;
537 	hba->out_req_cnt++;
538 
539 	cmd = hba->ccb[tag].cmd;
540 	msg_h = (struct st_msg_header *)req - 1;
541 	if (likely(cmd)) {
542 		msg_h->channel = (u8)cmd->device->channel;
543 		msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
544 	}
545 	addr = hba->dma_handle + hba->req_head * hba->rq_size;
546 	addr += (hba->ccb[tag].sg_count+4)/11;
547 	msg_h->handle = cpu_to_le64(addr);
548 
549 	++hba->req_head;
550 	hba->req_head %= hba->rq_count+1;
551 	if (hba->cardtype == st_P3) {
552 		writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
553 		writel(addr, hba->mmio_base + YH2I_REQ);
554 	} else {
555 		writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
556 		readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
557 		writel(addr, hba->mmio_base + YH2I_REQ);
558 		readl(hba->mmio_base + YH2I_REQ); /* flush */
559 	}
560 }
561 
562 static void return_abnormal_state(struct st_hba *hba, int status)
563 {
564 	struct st_ccb *ccb;
565 	unsigned long flags;
566 	u16 tag;
567 
568 	spin_lock_irqsave(hba->host->host_lock, flags);
569 	for (tag = 0; tag < hba->host->can_queue; tag++) {
570 		ccb = &hba->ccb[tag];
571 		if (ccb->req == NULL)
572 			continue;
573 		ccb->req = NULL;
574 		if (ccb->cmd) {
575 			scsi_dma_unmap(ccb->cmd);
576 			ccb->cmd->result = status << 16;
577 			scsi_done(ccb->cmd);
578 			ccb->cmd = NULL;
579 		}
580 	}
581 	spin_unlock_irqrestore(hba->host->host_lock, flags);
582 }
583 static int
584 stex_slave_config(struct scsi_device *sdev)
585 {
586 	sdev->use_10_for_rw = 1;
587 	sdev->use_10_for_ms = 1;
588 	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
589 
590 	return 0;
591 }
592 
593 static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
594 {
595 	void (*done)(struct scsi_cmnd *) = scsi_done;
596 	struct st_hba *hba;
597 	struct Scsi_Host *host;
598 	unsigned int id, lun;
599 	struct req_msg *req;
600 	u16 tag;
601 
602 	host = cmd->device->host;
603 	id = cmd->device->id;
604 	lun = cmd->device->lun;
605 	hba = (struct st_hba *) &host->hostdata[0];
606 	if (hba->mu_status == MU_STATE_NOCONNECT) {
607 		cmd->result = DID_NO_CONNECT;
608 		done(cmd);
609 		return 0;
610 	}
611 	if (unlikely(hba->mu_status != MU_STATE_STARTED))
612 		return SCSI_MLQUEUE_HOST_BUSY;
613 
614 	switch (cmd->cmnd[0]) {
615 	case MODE_SENSE_10:
616 	{
617 		static char ms10_caching_page[12] =
618 			{ 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
619 		unsigned char page;
620 
621 		page = cmd->cmnd[2] & 0x3f;
622 		if (page == 0x8 || page == 0x3f) {
623 			scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
624 						 sizeof(ms10_caching_page));
625 			cmd->result = DID_OK << 16;
626 			done(cmd);
627 		} else
628 			stex_invalid_field(cmd, done);
629 		return 0;
630 	}
631 	case REPORT_LUNS:
632 		/*
633 		 * The shasta firmware does not report actual luns in the
634 		 * target, so fail the command to force sequential lun scan.
635 		 * Also, the console device does not support this command.
636 		 */
637 		if (hba->cardtype == st_shasta || id == host->max_id - 1) {
638 			stex_invalid_field(cmd, done);
639 			return 0;
640 		}
641 		break;
642 	case TEST_UNIT_READY:
643 		if (id == host->max_id - 1) {
644 			cmd->result = DID_OK << 16;
645 			done(cmd);
646 			return 0;
647 		}
648 		break;
649 	case INQUIRY:
650 		if (lun >= host->max_lun) {
651 			cmd->result = DID_NO_CONNECT << 16;
652 			done(cmd);
653 			return 0;
654 		}
655 		if (id != host->max_id - 1)
656 			break;
657 		if (!lun && !cmd->device->channel &&
658 			(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
659 			scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
660 						 sizeof(console_inq_page));
661 			cmd->result = DID_OK << 16;
662 			done(cmd);
663 		} else
664 			stex_invalid_field(cmd, done);
665 		return 0;
666 	case PASSTHRU_CMD:
667 		if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
668 			struct st_drvver ver;
669 			size_t cp_len = sizeof(ver);
670 
671 			ver.major = ST_VER_MAJOR;
672 			ver.minor = ST_VER_MINOR;
673 			ver.oem = ST_OEM;
674 			ver.build = ST_BUILD_VER;
675 			ver.signature[0] = PASSTHRU_SIGNATURE;
676 			ver.console_id = host->max_id - 1;
677 			ver.host_no = hba->host->host_no;
678 			cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
679 			if (sizeof(ver) == cp_len)
680 				cmd->result = DID_OK << 16;
681 			else
682 				cmd->result = DID_ERROR << 16;
683 			done(cmd);
684 			return 0;
685 		}
686 		break;
687 	default:
688 		break;
689 	}
690 
691 	tag = scsi_cmd_to_rq(cmd)->tag;
692 
693 	if (unlikely(tag >= host->can_queue))
694 		return SCSI_MLQUEUE_HOST_BUSY;
695 
696 	req = hba->alloc_rq(hba);
697 
698 	req->lun = lun;
699 	req->target = id;
700 
701 	/* cdb */
702 	memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
703 
704 	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
705 		req->data_dir = MSG_DATA_DIR_IN;
706 	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
707 		req->data_dir = MSG_DATA_DIR_OUT;
708 	else
709 		req->data_dir = MSG_DATA_DIR_ND;
710 
711 	hba->ccb[tag].cmd = cmd;
712 	hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
713 	hba->ccb[tag].sense_buffer = cmd->sense_buffer;
714 
715 	if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
716 		hba->ccb[tag].sg_count = 0;
717 		memset(&req->variable[0], 0, 8);
718 	}
719 
720 	hba->send(hba, req, tag);
721 	return 0;
722 }
723 
724 static DEF_SCSI_QCMD(stex_queuecommand)
725 
726 static void stex_scsi_done(struct st_ccb *ccb)
727 {
728 	struct scsi_cmnd *cmd = ccb->cmd;
729 	int result;
730 
731 	if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
732 		result = ccb->scsi_status;
733 		switch (ccb->scsi_status) {
734 		case SAM_STAT_GOOD:
735 			result |= DID_OK << 16;
736 			break;
737 		case SAM_STAT_CHECK_CONDITION:
738 			result |= DID_OK << 16;
739 			break;
740 		case SAM_STAT_BUSY:
741 			result |= DID_BUS_BUSY << 16;
742 			break;
743 		default:
744 			result |= DID_ERROR << 16;
745 			break;
746 		}
747 	}
748 	else if (ccb->srb_status & SRB_SEE_SENSE)
749 		result = SAM_STAT_CHECK_CONDITION;
750 	else switch (ccb->srb_status) {
751 		case SRB_STATUS_SELECTION_TIMEOUT:
752 			result = DID_NO_CONNECT << 16;
753 			break;
754 		case SRB_STATUS_BUSY:
755 			result = DID_BUS_BUSY << 16;
756 			break;
757 		case SRB_STATUS_INVALID_REQUEST:
758 		case SRB_STATUS_ERROR:
759 		default:
760 			result = DID_ERROR << 16;
761 			break;
762 	}
763 
764 	cmd->result = result;
765 	scsi_done(cmd);
766 }
767 
768 static void stex_copy_data(struct st_ccb *ccb,
769 	struct status_msg *resp, unsigned int variable)
770 {
771 	if (resp->scsi_status != SAM_STAT_GOOD) {
772 		if (ccb->sense_buffer != NULL)
773 			memcpy(ccb->sense_buffer, resp->variable,
774 				min(variable, ccb->sense_bufflen));
775 		return;
776 	}
777 
778 	if (ccb->cmd == NULL)
779 		return;
780 	scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
781 }
782 
783 static void stex_check_cmd(struct st_hba *hba,
784 	struct st_ccb *ccb, struct status_msg *resp)
785 {
786 	if (ccb->cmd->cmnd[0] == MGT_CMD &&
787 		resp->scsi_status != SAM_STAT_CHECK_CONDITION)
788 		scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
789 			le32_to_cpu(*(__le32 *)&resp->variable[0]));
790 }
791 
792 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
793 {
794 	void __iomem *base = hba->mmio_base;
795 	struct status_msg *resp;
796 	struct st_ccb *ccb;
797 	unsigned int size;
798 	u16 tag;
799 
800 	if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
801 		return;
802 
803 	/* status payloads */
804 	hba->status_head = readl(base + OMR1);
805 	if (unlikely(hba->status_head > hba->sts_count)) {
806 		printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
807 			pci_name(hba->pdev));
808 		return;
809 	}
810 
811 	/*
812 	 * it's not a valid status payload if:
813 	 * 1. there are no pending requests(e.g. during init stage)
814 	 * 2. there are some pending requests, but the controller is in
815 	 *     reset status, and its type is not st_yosemite
816 	 * firmware of st_yosemite in reset status will return pending requests
817 	 * to driver, so we allow it to pass
818 	 */
819 	if (unlikely(hba->out_req_cnt <= 0 ||
820 			(hba->mu_status == MU_STATE_RESETTING &&
821 			 hba->cardtype != st_yosemite))) {
822 		hba->status_tail = hba->status_head;
823 		goto update_status;
824 	}
825 
826 	while (hba->status_tail != hba->status_head) {
827 		resp = stex_get_status(hba);
828 		tag = le16_to_cpu(resp->tag);
829 		if (unlikely(tag >= hba->host->can_queue)) {
830 			printk(KERN_WARNING DRV_NAME
831 				"(%s): invalid tag\n", pci_name(hba->pdev));
832 			continue;
833 		}
834 
835 		hba->out_req_cnt--;
836 		ccb = &hba->ccb[tag];
837 		if (unlikely(hba->wait_ccb == ccb))
838 			hba->wait_ccb = NULL;
839 		if (unlikely(ccb->req == NULL)) {
840 			printk(KERN_WARNING DRV_NAME
841 				"(%s): lagging req\n", pci_name(hba->pdev));
842 			continue;
843 		}
844 
845 		size = resp->payload_sz * sizeof(u32); /* payload size */
846 		if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
847 			size > sizeof(*resp))) {
848 			printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
849 				pci_name(hba->pdev));
850 		} else {
851 			size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
852 			if (size)
853 				stex_copy_data(ccb, resp, size);
854 		}
855 
856 		ccb->req = NULL;
857 		ccb->srb_status = resp->srb_status;
858 		ccb->scsi_status = resp->scsi_status;
859 
860 		if (likely(ccb->cmd != NULL)) {
861 			if (hba->cardtype == st_yosemite)
862 				stex_check_cmd(hba, ccb, resp);
863 
864 			if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
865 				ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
866 				stex_controller_info(hba, ccb);
867 
868 			scsi_dma_unmap(ccb->cmd);
869 			stex_scsi_done(ccb);
870 		} else
871 			ccb->req_type = 0;
872 	}
873 
874 update_status:
875 	writel(hba->status_head, base + IMR1);
876 	readl(base + IMR1); /* flush */
877 }
878 
879 static irqreturn_t stex_intr(int irq, void *__hba)
880 {
881 	struct st_hba *hba = __hba;
882 	void __iomem *base = hba->mmio_base;
883 	u32 data;
884 	unsigned long flags;
885 
886 	spin_lock_irqsave(hba->host->host_lock, flags);
887 
888 	data = readl(base + ODBL);
889 
890 	if (data && data != 0xffffffff) {
891 		/* clear the interrupt */
892 		writel(data, base + ODBL);
893 		readl(base + ODBL); /* flush */
894 		stex_mu_intr(hba, data);
895 		spin_unlock_irqrestore(hba->host->host_lock, flags);
896 		if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
897 			hba->cardtype == st_shasta))
898 			queue_work(hba->work_q, &hba->reset_work);
899 		return IRQ_HANDLED;
900 	}
901 
902 	spin_unlock_irqrestore(hba->host->host_lock, flags);
903 
904 	return IRQ_NONE;
905 }
906 
907 static void stex_ss_mu_intr(struct st_hba *hba)
908 {
909 	struct status_msg *resp;
910 	struct st_ccb *ccb;
911 	__le32 *scratch;
912 	unsigned int size;
913 	int count = 0;
914 	u32 value;
915 	u16 tag;
916 
917 	if (unlikely(hba->out_req_cnt <= 0 ||
918 			hba->mu_status == MU_STATE_RESETTING))
919 		return;
920 
921 	while (count < hba->sts_count) {
922 		scratch = hba->scratch + hba->status_tail;
923 		value = le32_to_cpu(*scratch);
924 		if (unlikely(!(value & SS_STS_NORMAL)))
925 			return;
926 
927 		resp = hba->status_buffer + hba->status_tail;
928 		*scratch = 0;
929 		++count;
930 		++hba->status_tail;
931 		hba->status_tail %= hba->sts_count+1;
932 
933 		tag = (u16)value;
934 		if (unlikely(tag >= hba->host->can_queue)) {
935 			printk(KERN_WARNING DRV_NAME
936 				"(%s): invalid tag\n", pci_name(hba->pdev));
937 			continue;
938 		}
939 
940 		hba->out_req_cnt--;
941 		ccb = &hba->ccb[tag];
942 		if (unlikely(hba->wait_ccb == ccb))
943 			hba->wait_ccb = NULL;
944 		if (unlikely(ccb->req == NULL)) {
945 			printk(KERN_WARNING DRV_NAME
946 				"(%s): lagging req\n", pci_name(hba->pdev));
947 			continue;
948 		}
949 
950 		ccb->req = NULL;
951 		if (likely(value & SS_STS_DONE)) { /* normal case */
952 			ccb->srb_status = SRB_STATUS_SUCCESS;
953 			ccb->scsi_status = SAM_STAT_GOOD;
954 		} else {
955 			ccb->srb_status = resp->srb_status;
956 			ccb->scsi_status = resp->scsi_status;
957 			size = resp->payload_sz * sizeof(u32);
958 			if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
959 				size > sizeof(*resp))) {
960 				printk(KERN_WARNING DRV_NAME
961 					"(%s): bad status size\n",
962 					pci_name(hba->pdev));
963 			} else {
964 				size -= sizeof(*resp) - STATUS_VAR_LEN;
965 				if (size)
966 					stex_copy_data(ccb, resp, size);
967 			}
968 			if (likely(ccb->cmd != NULL))
969 				stex_check_cmd(hba, ccb, resp);
970 		}
971 
972 		if (likely(ccb->cmd != NULL)) {
973 			scsi_dma_unmap(ccb->cmd);
974 			stex_scsi_done(ccb);
975 		} else
976 			ccb->req_type = 0;
977 	}
978 }
979 
980 static irqreturn_t stex_ss_intr(int irq, void *__hba)
981 {
982 	struct st_hba *hba = __hba;
983 	void __iomem *base = hba->mmio_base;
984 	u32 data;
985 	unsigned long flags;
986 
987 	spin_lock_irqsave(hba->host->host_lock, flags);
988 
989 	if (hba->cardtype == st_yel) {
990 		data = readl(base + YI2H_INT);
991 		if (data && data != 0xffffffff) {
992 			/* clear the interrupt */
993 			writel(data, base + YI2H_INT_C);
994 			stex_ss_mu_intr(hba);
995 			spin_unlock_irqrestore(hba->host->host_lock, flags);
996 			if (unlikely(data & SS_I2H_REQUEST_RESET))
997 				queue_work(hba->work_q, &hba->reset_work);
998 			return IRQ_HANDLED;
999 		}
1000 	} else {
1001 		data = readl(base + PSCRATCH4);
1002 		if (data != 0xffffffff) {
1003 			if (data != 0) {
1004 				/* clear the interrupt */
1005 				writel(data, base + PSCRATCH1);
1006 				writel((1 << 22), base + YH2I_INT);
1007 			}
1008 			stex_ss_mu_intr(hba);
1009 			spin_unlock_irqrestore(hba->host->host_lock, flags);
1010 			if (unlikely(data & SS_I2H_REQUEST_RESET))
1011 				queue_work(hba->work_q, &hba->reset_work);
1012 			return IRQ_HANDLED;
1013 		}
1014 	}
1015 
1016 	spin_unlock_irqrestore(hba->host->host_lock, flags);
1017 
1018 	return IRQ_NONE;
1019 }
1020 
1021 static int stex_common_handshake(struct st_hba *hba)
1022 {
1023 	void __iomem *base = hba->mmio_base;
1024 	struct handshake_frame *h;
1025 	dma_addr_t status_phys;
1026 	u32 data;
1027 	unsigned long before;
1028 
1029 	if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1030 		writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1031 		readl(base + IDBL);
1032 		before = jiffies;
1033 		while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1034 			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1035 				printk(KERN_ERR DRV_NAME
1036 					"(%s): no handshake signature\n",
1037 					pci_name(hba->pdev));
1038 				return -1;
1039 			}
1040 			rmb();
1041 			msleep(1);
1042 		}
1043 	}
1044 
1045 	udelay(10);
1046 
1047 	data = readl(base + OMR1);
1048 	if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
1049 		data &= 0x0000ffff;
1050 		if (hba->host->can_queue > data) {
1051 			hba->host->can_queue = data;
1052 			hba->host->cmd_per_lun = data;
1053 		}
1054 	}
1055 
1056 	h = (struct handshake_frame *)hba->status_buffer;
1057 	h->rb_phy = cpu_to_le64(hba->dma_handle);
1058 	h->req_sz = cpu_to_le16(hba->rq_size);
1059 	h->req_cnt = cpu_to_le16(hba->rq_count+1);
1060 	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1061 	h->status_cnt = cpu_to_le16(hba->sts_count+1);
1062 	h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1063 	h->partner_type = HMU_PARTNER_TYPE;
1064 	if (hba->extra_offset) {
1065 		h->extra_offset = cpu_to_le32(hba->extra_offset);
1066 		h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1067 	} else
1068 		h->extra_offset = h->extra_size = 0;
1069 
1070 	status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1071 	writel(status_phys, base + IMR0);
1072 	readl(base + IMR0);
1073 	writel((status_phys >> 16) >> 16, base + IMR1);
1074 	readl(base + IMR1);
1075 
1076 	writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1077 	readl(base + OMR0);
1078 	writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1079 	readl(base + IDBL); /* flush */
1080 
1081 	udelay(10);
1082 	before = jiffies;
1083 	while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1084 		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1085 			printk(KERN_ERR DRV_NAME
1086 				"(%s): no signature after handshake frame\n",
1087 				pci_name(hba->pdev));
1088 			return -1;
1089 		}
1090 		rmb();
1091 		msleep(1);
1092 	}
1093 
1094 	writel(0, base + IMR0);
1095 	readl(base + IMR0);
1096 	writel(0, base + OMR0);
1097 	readl(base + OMR0);
1098 	writel(0, base + IMR1);
1099 	readl(base + IMR1);
1100 	writel(0, base + OMR1);
1101 	readl(base + OMR1); /* flush */
1102 	return 0;
1103 }
1104 
1105 static int stex_ss_handshake(struct st_hba *hba)
1106 {
1107 	void __iomem *base = hba->mmio_base;
1108 	struct st_msg_header *msg_h;
1109 	struct handshake_frame *h;
1110 	__le32 *scratch;
1111 	u32 data, scratch_size, mailboxdata, operationaldata;
1112 	unsigned long before;
1113 	int ret = 0;
1114 
1115 	before = jiffies;
1116 
1117 	if (hba->cardtype == st_yel) {
1118 		operationaldata = readl(base + YIOA_STATUS);
1119 		while (operationaldata != SS_MU_OPERATIONAL) {
1120 			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1121 				printk(KERN_ERR DRV_NAME
1122 					"(%s): firmware not operational\n",
1123 					pci_name(hba->pdev));
1124 				return -1;
1125 			}
1126 			msleep(1);
1127 			operationaldata = readl(base + YIOA_STATUS);
1128 		}
1129 	} else {
1130 		operationaldata = readl(base + PSCRATCH3);
1131 		while (operationaldata != SS_MU_OPERATIONAL) {
1132 			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1133 				printk(KERN_ERR DRV_NAME
1134 					"(%s): firmware not operational\n",
1135 					pci_name(hba->pdev));
1136 				return -1;
1137 			}
1138 			msleep(1);
1139 			operationaldata = readl(base + PSCRATCH3);
1140 		}
1141 	}
1142 
1143 	msg_h = (struct st_msg_header *)hba->dma_mem;
1144 	msg_h->handle = cpu_to_le64(hba->dma_handle);
1145 	msg_h->flag = SS_HEAD_HANDSHAKE;
1146 
1147 	h = (struct handshake_frame *)(msg_h + 1);
1148 	h->rb_phy = cpu_to_le64(hba->dma_handle);
1149 	h->req_sz = cpu_to_le16(hba->rq_size);
1150 	h->req_cnt = cpu_to_le16(hba->rq_count+1);
1151 	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1152 	h->status_cnt = cpu_to_le16(hba->sts_count+1);
1153 	h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1154 	h->partner_type = HMU_PARTNER_TYPE;
1155 	h->extra_offset = h->extra_size = 0;
1156 	scratch_size = (hba->sts_count+1)*sizeof(u32);
1157 	h->scratch_size = cpu_to_le32(scratch_size);
1158 
1159 	if (hba->cardtype == st_yel) {
1160 		data = readl(base + YINT_EN);
1161 		data &= ~4;
1162 		writel(data, base + YINT_EN);
1163 		writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1164 		readl(base + YH2I_REQ_HI);
1165 		writel(hba->dma_handle, base + YH2I_REQ);
1166 		readl(base + YH2I_REQ); /* flush */
1167 	} else {
1168 		data = readl(base + YINT_EN);
1169 		data &= ~(1 << 0);
1170 		data &= ~(1 << 2);
1171 		writel(data, base + YINT_EN);
1172 		if (hba->msi_lock == 0) {
1173 			/* P3 MSI Register cannot access twice */
1174 			writel((1 << 6), base + YH2I_INT);
1175 			hba->msi_lock  = 1;
1176 		}
1177 		writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1178 		writel(hba->dma_handle, base + YH2I_REQ);
1179 	}
1180 
1181 	before = jiffies;
1182 	scratch = hba->scratch;
1183 	if (hba->cardtype == st_yel) {
1184 		while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1185 			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1186 				printk(KERN_ERR DRV_NAME
1187 					"(%s): no signature after handshake frame\n",
1188 					pci_name(hba->pdev));
1189 				ret = -1;
1190 				break;
1191 			}
1192 			rmb();
1193 			msleep(1);
1194 		}
1195 	} else {
1196 		mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
1197 		while (mailboxdata != SS_STS_HANDSHAKE) {
1198 			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1199 				printk(KERN_ERR DRV_NAME
1200 					"(%s): no signature after handshake frame\n",
1201 					pci_name(hba->pdev));
1202 				ret = -1;
1203 				break;
1204 			}
1205 			rmb();
1206 			msleep(1);
1207 			mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
1208 		}
1209 	}
1210 	memset(scratch, 0, scratch_size);
1211 	msg_h->flag = 0;
1212 
1213 	return ret;
1214 }
1215 
1216 static int stex_handshake(struct st_hba *hba)
1217 {
1218 	int err;
1219 	unsigned long flags;
1220 	unsigned int mu_status;
1221 
1222 	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1223 		err = stex_ss_handshake(hba);
1224 	else
1225 		err = stex_common_handshake(hba);
1226 	spin_lock_irqsave(hba->host->host_lock, flags);
1227 	mu_status = hba->mu_status;
1228 	if (err == 0) {
1229 		hba->req_head = 0;
1230 		hba->req_tail = 0;
1231 		hba->status_head = 0;
1232 		hba->status_tail = 0;
1233 		hba->out_req_cnt = 0;
1234 		hba->mu_status = MU_STATE_STARTED;
1235 	} else
1236 		hba->mu_status = MU_STATE_FAILED;
1237 	if (mu_status == MU_STATE_RESETTING)
1238 		wake_up_all(&hba->reset_waitq);
1239 	spin_unlock_irqrestore(hba->host->host_lock, flags);
1240 	return err;
1241 }
1242 
1243 static int stex_abort(struct scsi_cmnd *cmd)
1244 {
1245 	struct Scsi_Host *host = cmd->device->host;
1246 	struct st_hba *hba = (struct st_hba *)host->hostdata;
1247 	u16 tag = scsi_cmd_to_rq(cmd)->tag;
1248 	void __iomem *base;
1249 	u32 data;
1250 	int result = SUCCESS;
1251 	unsigned long flags;
1252 
1253 	scmd_printk(KERN_INFO, cmd, "aborting command\n");
1254 
1255 	base = hba->mmio_base;
1256 	spin_lock_irqsave(host->host_lock, flags);
1257 	if (tag < host->can_queue &&
1258 		hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1259 		hba->wait_ccb = &hba->ccb[tag];
1260 	else
1261 		goto out;
1262 
1263 	if (hba->cardtype == st_yel) {
1264 		data = readl(base + YI2H_INT);
1265 		if (data == 0 || data == 0xffffffff)
1266 			goto fail_out;
1267 
1268 		writel(data, base + YI2H_INT_C);
1269 		stex_ss_mu_intr(hba);
1270 	} else if (hba->cardtype == st_P3) {
1271 		data = readl(base + PSCRATCH4);
1272 		if (data == 0xffffffff)
1273 			goto fail_out;
1274 		if (data != 0) {
1275 			writel(data, base + PSCRATCH1);
1276 			writel((1 << 22), base + YH2I_INT);
1277 		}
1278 		stex_ss_mu_intr(hba);
1279 	} else {
1280 		data = readl(base + ODBL);
1281 		if (data == 0 || data == 0xffffffff)
1282 			goto fail_out;
1283 
1284 		writel(data, base + ODBL);
1285 		readl(base + ODBL); /* flush */
1286 		stex_mu_intr(hba, data);
1287 	}
1288 	if (hba->wait_ccb == NULL) {
1289 		printk(KERN_WARNING DRV_NAME
1290 			"(%s): lost interrupt\n", pci_name(hba->pdev));
1291 		goto out;
1292 	}
1293 
1294 fail_out:
1295 	scsi_dma_unmap(cmd);
1296 	hba->wait_ccb->req = NULL; /* nullify the req's future return */
1297 	hba->wait_ccb = NULL;
1298 	result = FAILED;
1299 out:
1300 	spin_unlock_irqrestore(host->host_lock, flags);
1301 	return result;
1302 }
1303 
1304 static void stex_hard_reset(struct st_hba *hba)
1305 {
1306 	struct pci_bus *bus;
1307 	int i;
1308 	u16 pci_cmd;
1309 	u8 pci_bctl;
1310 
1311 	for (i = 0; i < 16; i++)
1312 		pci_read_config_dword(hba->pdev, i * 4,
1313 			&hba->pdev->saved_config_space[i]);
1314 
1315 	/* Reset secondary bus. Our controller(MU/ATU) is the only device on
1316 	   secondary bus. Consult Intel 80331/3 developer's manual for detail */
1317 	bus = hba->pdev->bus;
1318 	pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1319 	pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1320 	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1321 
1322 	/*
1323 	 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1324 	 * require more time to finish bus reset. Use 100 ms here for safety
1325 	 */
1326 	msleep(100);
1327 	pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1328 	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1329 
1330 	for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1331 		pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1332 		if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1333 			break;
1334 		msleep(1);
1335 	}
1336 
1337 	ssleep(5);
1338 	for (i = 0; i < 16; i++)
1339 		pci_write_config_dword(hba->pdev, i * 4,
1340 			hba->pdev->saved_config_space[i]);
1341 }
1342 
1343 static int stex_yos_reset(struct st_hba *hba)
1344 {
1345 	void __iomem *base;
1346 	unsigned long flags, before;
1347 	int ret = 0;
1348 
1349 	base = hba->mmio_base;
1350 	writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1351 	readl(base + IDBL); /* flush */
1352 	before = jiffies;
1353 	while (hba->out_req_cnt > 0) {
1354 		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1355 			printk(KERN_WARNING DRV_NAME
1356 				"(%s): reset timeout\n", pci_name(hba->pdev));
1357 			ret = -1;
1358 			break;
1359 		}
1360 		msleep(1);
1361 	}
1362 
1363 	spin_lock_irqsave(hba->host->host_lock, flags);
1364 	if (ret == -1)
1365 		hba->mu_status = MU_STATE_FAILED;
1366 	else
1367 		hba->mu_status = MU_STATE_STARTED;
1368 	wake_up_all(&hba->reset_waitq);
1369 	spin_unlock_irqrestore(hba->host->host_lock, flags);
1370 
1371 	return ret;
1372 }
1373 
1374 static void stex_ss_reset(struct st_hba *hba)
1375 {
1376 	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1377 	readl(hba->mmio_base + YH2I_INT);
1378 	ssleep(5);
1379 }
1380 
1381 static void stex_p3_reset(struct st_hba *hba)
1382 {
1383 	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1384 	ssleep(5);
1385 }
1386 
1387 static int stex_do_reset(struct st_hba *hba)
1388 {
1389 	unsigned long flags;
1390 	unsigned int mu_status = MU_STATE_RESETTING;
1391 
1392 	spin_lock_irqsave(hba->host->host_lock, flags);
1393 	if (hba->mu_status == MU_STATE_STARTING) {
1394 		spin_unlock_irqrestore(hba->host->host_lock, flags);
1395 		printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1396 			pci_name(hba->pdev));
1397 		return 0;
1398 	}
1399 	while (hba->mu_status == MU_STATE_RESETTING) {
1400 		spin_unlock_irqrestore(hba->host->host_lock, flags);
1401 		wait_event_timeout(hba->reset_waitq,
1402 				   hba->mu_status != MU_STATE_RESETTING,
1403 				   MU_MAX_DELAY * HZ);
1404 		spin_lock_irqsave(hba->host->host_lock, flags);
1405 		mu_status = hba->mu_status;
1406 	}
1407 
1408 	if (mu_status != MU_STATE_RESETTING) {
1409 		spin_unlock_irqrestore(hba->host->host_lock, flags);
1410 		return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1411 	}
1412 
1413 	hba->mu_status = MU_STATE_RESETTING;
1414 	spin_unlock_irqrestore(hba->host->host_lock, flags);
1415 
1416 	if (hba->cardtype == st_yosemite)
1417 		return stex_yos_reset(hba);
1418 
1419 	if (hba->cardtype == st_shasta)
1420 		stex_hard_reset(hba);
1421 	else if (hba->cardtype == st_yel)
1422 		stex_ss_reset(hba);
1423 	else if (hba->cardtype == st_P3)
1424 		stex_p3_reset(hba);
1425 
1426 	return_abnormal_state(hba, DID_RESET);
1427 
1428 	if (stex_handshake(hba) == 0)
1429 		return 0;
1430 
1431 	printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1432 		pci_name(hba->pdev));
1433 	return -1;
1434 }
1435 
1436 static int stex_reset(struct scsi_cmnd *cmd)
1437 {
1438 	struct st_hba *hba;
1439 
1440 	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1441 
1442 	shost_printk(KERN_INFO, cmd->device->host,
1443 		     "resetting host\n");
1444 
1445 	return stex_do_reset(hba) ? FAILED : SUCCESS;
1446 }
1447 
1448 static void stex_reset_work(struct work_struct *work)
1449 {
1450 	struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1451 
1452 	stex_do_reset(hba);
1453 }
1454 
1455 static int stex_biosparam(struct scsi_device *sdev,
1456 	struct block_device *bdev, sector_t capacity, int geom[])
1457 {
1458 	int heads = 255, sectors = 63;
1459 
1460 	if (capacity < 0x200000) {
1461 		heads = 64;
1462 		sectors = 32;
1463 	}
1464 
1465 	sector_div(capacity, heads * sectors);
1466 
1467 	geom[0] = heads;
1468 	geom[1] = sectors;
1469 	geom[2] = capacity;
1470 
1471 	return 0;
1472 }
1473 
1474 static struct scsi_host_template driver_template = {
1475 	.module				= THIS_MODULE,
1476 	.name				= DRV_NAME,
1477 	.proc_name			= DRV_NAME,
1478 	.bios_param			= stex_biosparam,
1479 	.queuecommand			= stex_queuecommand,
1480 	.slave_configure		= stex_slave_config,
1481 	.eh_abort_handler		= stex_abort,
1482 	.eh_host_reset_handler		= stex_reset,
1483 	.this_id			= -1,
1484 	.dma_boundary			= PAGE_SIZE - 1,
1485 };
1486 
1487 static struct pci_device_id stex_pci_tbl[] = {
1488 	/* st_shasta */
1489 	{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1490 		st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1491 	{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1492 		st_shasta }, /* SuperTrak EX12350 */
1493 	{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1494 		st_shasta }, /* SuperTrak EX4350 */
1495 	{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1496 		st_shasta }, /* SuperTrak EX24350 */
1497 
1498 	/* st_vsc */
1499 	{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1500 
1501 	/* st_yosemite */
1502 	{ 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1503 
1504 	/* st_seq */
1505 	{ 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1506 
1507 	/* st_yel */
1508 	{ 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1509 	{ 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
1510 
1511 	/* st_P3, pluto */
1512 	{ PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
1513 		0x8870, 0, 0, st_P3 },
1514 	/* st_P3, p3 */
1515 	{ PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
1516 		0x4300, 0, 0, st_P3 },
1517 
1518 	/* st_P3, SymplyStor4E */
1519 	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1520 		0x4311, 0, 0, st_P3 },
1521 	/* st_P3, SymplyStor8E */
1522 	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1523 		0x4312, 0, 0, st_P3 },
1524 	/* st_P3, SymplyStor4 */
1525 	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1526 		0x4321, 0, 0, st_P3 },
1527 	/* st_P3, SymplyStor8 */
1528 	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1529 		0x4322, 0, 0, st_P3 },
1530 	{ }	/* terminate list */
1531 };
1532 
1533 static struct st_card_info stex_card_info[] = {
1534 	/* st_shasta */
1535 	{
1536 		.max_id		= 17,
1537 		.max_lun	= 8,
1538 		.max_channel	= 0,
1539 		.rq_count	= 32,
1540 		.rq_size	= 1048,
1541 		.sts_count	= 32,
1542 		.alloc_rq	= stex_alloc_req,
1543 		.map_sg		= stex_map_sg,
1544 		.send		= stex_send_cmd,
1545 	},
1546 
1547 	/* st_vsc */
1548 	{
1549 		.max_id		= 129,
1550 		.max_lun	= 1,
1551 		.max_channel	= 0,
1552 		.rq_count	= 32,
1553 		.rq_size	= 1048,
1554 		.sts_count	= 32,
1555 		.alloc_rq	= stex_alloc_req,
1556 		.map_sg		= stex_map_sg,
1557 		.send		= stex_send_cmd,
1558 	},
1559 
1560 	/* st_yosemite */
1561 	{
1562 		.max_id		= 2,
1563 		.max_lun	= 256,
1564 		.max_channel	= 0,
1565 		.rq_count	= 256,
1566 		.rq_size	= 1048,
1567 		.sts_count	= 256,
1568 		.alloc_rq	= stex_alloc_req,
1569 		.map_sg		= stex_map_sg,
1570 		.send		= stex_send_cmd,
1571 	},
1572 
1573 	/* st_seq */
1574 	{
1575 		.max_id		= 129,
1576 		.max_lun	= 1,
1577 		.max_channel	= 0,
1578 		.rq_count	= 32,
1579 		.rq_size	= 1048,
1580 		.sts_count	= 32,
1581 		.alloc_rq	= stex_alloc_req,
1582 		.map_sg		= stex_map_sg,
1583 		.send		= stex_send_cmd,
1584 	},
1585 
1586 	/* st_yel */
1587 	{
1588 		.max_id		= 129,
1589 		.max_lun	= 256,
1590 		.max_channel	= 3,
1591 		.rq_count	= 801,
1592 		.rq_size	= 512,
1593 		.sts_count	= 801,
1594 		.alloc_rq	= stex_ss_alloc_req,
1595 		.map_sg		= stex_ss_map_sg,
1596 		.send		= stex_ss_send_cmd,
1597 	},
1598 
1599 	/* st_P3 */
1600 	{
1601 		.max_id		= 129,
1602 		.max_lun	= 256,
1603 		.max_channel	= 0,
1604 		.rq_count	= 801,
1605 		.rq_size	= 512,
1606 		.sts_count	= 801,
1607 		.alloc_rq	= stex_ss_alloc_req,
1608 		.map_sg		= stex_ss_map_sg,
1609 		.send		= stex_ss_send_cmd,
1610 	},
1611 };
1612 
1613 static int stex_request_irq(struct st_hba *hba)
1614 {
1615 	struct pci_dev *pdev = hba->pdev;
1616 	int status;
1617 
1618 	if (msi || hba->cardtype == st_P3) {
1619 		status = pci_enable_msi(pdev);
1620 		if (status != 0)
1621 			printk(KERN_ERR DRV_NAME
1622 				"(%s): error %d setting up MSI\n",
1623 				pci_name(pdev), status);
1624 		else
1625 			hba->msi_enabled = 1;
1626 	} else
1627 		hba->msi_enabled = 0;
1628 
1629 	status = request_irq(pdev->irq,
1630 		(hba->cardtype == st_yel || hba->cardtype == st_P3) ?
1631 		stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1632 
1633 	if (status != 0) {
1634 		if (hba->msi_enabled)
1635 			pci_disable_msi(pdev);
1636 	}
1637 	return status;
1638 }
1639 
1640 static void stex_free_irq(struct st_hba *hba)
1641 {
1642 	struct pci_dev *pdev = hba->pdev;
1643 
1644 	free_irq(pdev->irq, hba);
1645 	if (hba->msi_enabled)
1646 		pci_disable_msi(pdev);
1647 }
1648 
1649 static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1650 {
1651 	struct st_hba *hba;
1652 	struct Scsi_Host *host;
1653 	const struct st_card_info *ci = NULL;
1654 	u32 sts_offset, cp_offset, scratch_offset;
1655 	int err;
1656 
1657 	err = pci_enable_device(pdev);
1658 	if (err)
1659 		return err;
1660 
1661 	pci_set_master(pdev);
1662 
1663 	S6flag = 0;
1664 	register_reboot_notifier(&stex_notifier);
1665 
1666 	host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1667 
1668 	if (!host) {
1669 		printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1670 			pci_name(pdev));
1671 		err = -ENOMEM;
1672 		goto out_disable;
1673 	}
1674 
1675 	hba = (struct st_hba *)host->hostdata;
1676 	memset(hba, 0, sizeof(struct st_hba));
1677 
1678 	err = pci_request_regions(pdev, DRV_NAME);
1679 	if (err < 0) {
1680 		printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1681 			pci_name(pdev));
1682 		goto out_scsi_host_put;
1683 	}
1684 
1685 	hba->mmio_base = pci_ioremap_bar(pdev, 0);
1686 	if ( !hba->mmio_base) {
1687 		printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1688 			pci_name(pdev));
1689 		err = -ENOMEM;
1690 		goto out_release_regions;
1691 	}
1692 
1693 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1694 	if (err)
1695 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1696 	if (err) {
1697 		printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1698 			pci_name(pdev));
1699 		goto out_iounmap;
1700 	}
1701 
1702 	hba->cardtype = (unsigned int) id->driver_data;
1703 	ci = &stex_card_info[hba->cardtype];
1704 	switch (id->subdevice) {
1705 	case 0x4221:
1706 	case 0x4222:
1707 	case 0x4223:
1708 	case 0x4224:
1709 	case 0x4225:
1710 	case 0x4226:
1711 	case 0x4227:
1712 	case 0x4261:
1713 	case 0x4262:
1714 	case 0x4263:
1715 	case 0x4264:
1716 	case 0x4265:
1717 		break;
1718 	default:
1719 		if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1720 			hba->supports_pm = 1;
1721 	}
1722 
1723 	sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1724 	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1725 		sts_offset += (ci->sts_count+1) * sizeof(u32);
1726 	cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1727 	hba->dma_size = cp_offset + sizeof(struct st_frame);
1728 	if (hba->cardtype == st_seq ||
1729 		(hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1730 		hba->extra_offset = hba->dma_size;
1731 		hba->dma_size += ST_ADDITIONAL_MEM;
1732 	}
1733 	hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1734 		hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1735 	if (!hba->dma_mem) {
1736 		/* Retry minimum coherent mapping for st_seq and st_vsc */
1737 		if (hba->cardtype == st_seq ||
1738 		    (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1739 			printk(KERN_WARNING DRV_NAME
1740 				"(%s): allocating min buffer for controller\n",
1741 				pci_name(pdev));
1742 			hba->dma_size = hba->extra_offset
1743 				+ ST_ADDITIONAL_MEM_MIN;
1744 			hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1745 				hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1746 		}
1747 
1748 		if (!hba->dma_mem) {
1749 			err = -ENOMEM;
1750 			printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1751 				pci_name(pdev));
1752 			goto out_iounmap;
1753 		}
1754 	}
1755 
1756 	hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1757 	if (!hba->ccb) {
1758 		err = -ENOMEM;
1759 		printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1760 			pci_name(pdev));
1761 		goto out_pci_free;
1762 	}
1763 
1764 	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1765 		hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1766 	hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1767 	hba->copy_buffer = hba->dma_mem + cp_offset;
1768 	hba->rq_count = ci->rq_count;
1769 	hba->rq_size = ci->rq_size;
1770 	hba->sts_count = ci->sts_count;
1771 	hba->alloc_rq = ci->alloc_rq;
1772 	hba->map_sg = ci->map_sg;
1773 	hba->send = ci->send;
1774 	hba->mu_status = MU_STATE_STARTING;
1775 	hba->msi_lock = 0;
1776 
1777 	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1778 		host->sg_tablesize = 38;
1779 	else
1780 		host->sg_tablesize = 32;
1781 	host->can_queue = ci->rq_count;
1782 	host->cmd_per_lun = ci->rq_count;
1783 	host->max_id = ci->max_id;
1784 	host->max_lun = ci->max_lun;
1785 	host->max_channel = ci->max_channel;
1786 	host->unique_id = host->host_no;
1787 	host->max_cmd_len = STEX_CDB_LENGTH;
1788 
1789 	hba->host = host;
1790 	hba->pdev = pdev;
1791 	init_waitqueue_head(&hba->reset_waitq);
1792 
1793 	snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1794 		 "stex_wq_%d", host->host_no);
1795 	hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1796 	if (!hba->work_q) {
1797 		printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1798 			pci_name(pdev));
1799 		err = -ENOMEM;
1800 		goto out_ccb_free;
1801 	}
1802 	INIT_WORK(&hba->reset_work, stex_reset_work);
1803 
1804 	err = stex_request_irq(hba);
1805 	if (err) {
1806 		printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1807 			pci_name(pdev));
1808 		goto out_free_wq;
1809 	}
1810 
1811 	err = stex_handshake(hba);
1812 	if (err)
1813 		goto out_free_irq;
1814 
1815 	pci_set_drvdata(pdev, hba);
1816 
1817 	err = scsi_add_host(host, &pdev->dev);
1818 	if (err) {
1819 		printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1820 			pci_name(pdev));
1821 		goto out_free_irq;
1822 	}
1823 
1824 	scsi_scan_host(host);
1825 
1826 	return 0;
1827 
1828 out_free_irq:
1829 	stex_free_irq(hba);
1830 out_free_wq:
1831 	destroy_workqueue(hba->work_q);
1832 out_ccb_free:
1833 	kfree(hba->ccb);
1834 out_pci_free:
1835 	dma_free_coherent(&pdev->dev, hba->dma_size,
1836 			  hba->dma_mem, hba->dma_handle);
1837 out_iounmap:
1838 	iounmap(hba->mmio_base);
1839 out_release_regions:
1840 	pci_release_regions(pdev);
1841 out_scsi_host_put:
1842 	scsi_host_put(host);
1843 out_disable:
1844 	pci_disable_device(pdev);
1845 
1846 	return err;
1847 }
1848 
1849 static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1850 {
1851 	struct req_msg *req;
1852 	struct st_msg_header *msg_h;
1853 	unsigned long flags;
1854 	unsigned long before;
1855 	u16 tag = 0;
1856 
1857 	spin_lock_irqsave(hba->host->host_lock, flags);
1858 
1859 	if ((hba->cardtype == st_yel || hba->cardtype == st_P3) &&
1860 		hba->supports_pm == 1) {
1861 		if (st_sleep_mic == ST_NOTHANDLED) {
1862 			spin_unlock_irqrestore(hba->host->host_lock, flags);
1863 			return;
1864 		}
1865 	}
1866 	req = hba->alloc_rq(hba);
1867 	if (hba->cardtype == st_yel || hba->cardtype == st_P3) {
1868 		msg_h = (struct st_msg_header *)req - 1;
1869 		memset(msg_h, 0, hba->rq_size);
1870 	} else
1871 		memset(req, 0, hba->rq_size);
1872 
1873 	if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel
1874 		|| hba->cardtype == st_P3)
1875 		&& st_sleep_mic == ST_IGNORED) {
1876 		req->cdb[0] = MGT_CMD;
1877 		req->cdb[1] = MGT_CMD_SIGNATURE;
1878 		req->cdb[2] = CTLR_CONFIG_CMD;
1879 		req->cdb[3] = CTLR_SHUTDOWN;
1880 	} else if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1881 		&& st_sleep_mic != ST_IGNORED) {
1882 		req->cdb[0] = MGT_CMD;
1883 		req->cdb[1] = MGT_CMD_SIGNATURE;
1884 		req->cdb[2] = CTLR_CONFIG_CMD;
1885 		req->cdb[3] = PMIC_SHUTDOWN;
1886 		req->cdb[4] = st_sleep_mic;
1887 	} else {
1888 		req->cdb[0] = CONTROLLER_CMD;
1889 		req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1890 		req->cdb[2] = CTLR_POWER_SAVING;
1891 	}
1892 	hba->ccb[tag].cmd = NULL;
1893 	hba->ccb[tag].sg_count = 0;
1894 	hba->ccb[tag].sense_bufflen = 0;
1895 	hba->ccb[tag].sense_buffer = NULL;
1896 	hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1897 	hba->send(hba, req, tag);
1898 	spin_unlock_irqrestore(hba->host->host_lock, flags);
1899 	before = jiffies;
1900 	while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1901 		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1902 			hba->ccb[tag].req_type = 0;
1903 			hba->mu_status = MU_STATE_STOP;
1904 			return;
1905 		}
1906 		msleep(1);
1907 	}
1908 	hba->mu_status = MU_STATE_STOP;
1909 }
1910 
1911 static void stex_hba_free(struct st_hba *hba)
1912 {
1913 	stex_free_irq(hba);
1914 
1915 	destroy_workqueue(hba->work_q);
1916 
1917 	iounmap(hba->mmio_base);
1918 
1919 	pci_release_regions(hba->pdev);
1920 
1921 	kfree(hba->ccb);
1922 
1923 	dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1924 			  hba->dma_mem, hba->dma_handle);
1925 }
1926 
1927 static void stex_remove(struct pci_dev *pdev)
1928 {
1929 	struct st_hba *hba = pci_get_drvdata(pdev);
1930 
1931 	hba->mu_status = MU_STATE_NOCONNECT;
1932 	return_abnormal_state(hba, DID_NO_CONNECT);
1933 	scsi_remove_host(hba->host);
1934 
1935 	scsi_block_requests(hba->host);
1936 
1937 	stex_hba_free(hba);
1938 
1939 	scsi_host_put(hba->host);
1940 
1941 	pci_disable_device(pdev);
1942 
1943 	unregister_reboot_notifier(&stex_notifier);
1944 }
1945 
1946 static void stex_shutdown(struct pci_dev *pdev)
1947 {
1948 	struct st_hba *hba = pci_get_drvdata(pdev);
1949 
1950 	if (hba->supports_pm == 0) {
1951 		stex_hba_stop(hba, ST_IGNORED);
1952 	} else if (hba->supports_pm == 1 && S6flag) {
1953 		unregister_reboot_notifier(&stex_notifier);
1954 		stex_hba_stop(hba, ST_S6);
1955 	} else
1956 		stex_hba_stop(hba, ST_S5);
1957 }
1958 
1959 static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
1960 {
1961 	switch (state.event) {
1962 	case PM_EVENT_SUSPEND:
1963 		return ST_S3;
1964 	case PM_EVENT_HIBERNATE:
1965 		hba->msi_lock = 0;
1966 		return ST_S4;
1967 	default:
1968 		return ST_NOTHANDLED;
1969 	}
1970 }
1971 
1972 static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
1973 {
1974 	struct st_hba *hba = pci_get_drvdata(pdev);
1975 
1976 	if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1977 		&& hba->supports_pm == 1)
1978 		stex_hba_stop(hba, stex_choice_sleep_mic(hba, state));
1979 	else
1980 		stex_hba_stop(hba, ST_IGNORED);
1981 	return 0;
1982 }
1983 
1984 static int stex_resume(struct pci_dev *pdev)
1985 {
1986 	struct st_hba *hba = pci_get_drvdata(pdev);
1987 
1988 	hba->mu_status = MU_STATE_STARTING;
1989 	stex_handshake(hba);
1990 	return 0;
1991 }
1992 
1993 static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf)
1994 {
1995 	S6flag = 1;
1996 	return NOTIFY_OK;
1997 }
1998 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1999 
2000 static struct pci_driver stex_pci_driver = {
2001 	.name		= DRV_NAME,
2002 	.id_table	= stex_pci_tbl,
2003 	.probe		= stex_probe,
2004 	.remove		= stex_remove,
2005 	.shutdown	= stex_shutdown,
2006 	.suspend	= stex_suspend,
2007 	.resume		= stex_resume,
2008 };
2009 
2010 static int __init stex_init(void)
2011 {
2012 	printk(KERN_INFO DRV_NAME
2013 		": Promise SuperTrak EX Driver version: %s\n",
2014 		 ST_DRIVER_VERSION);
2015 
2016 	return pci_register_driver(&stex_pci_driver);
2017 }
2018 
2019 static void __exit stex_exit(void)
2020 {
2021 	pci_unregister_driver(&stex_pci_driver);
2022 }
2023 
2024 module_init(stex_init);
2025 module_exit(stex_exit);
2026