xref: /openbmc/linux/drivers/scsi/myrb.c (revision 4bf3bd0f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /**
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /**
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /**
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /**
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /**
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	WARN_ON(in_interrupt());
198 	wait_for_completion(&cmpl);
199 	return cmd_blk->status;
200 }
201 
202 /**
203  * myrb_exec_type3 - executes a type 3 command and waits for completion.
204  *
205  * Return: command status
206  */
207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 		enum myrb_cmd_opcode op, dma_addr_t addr)
209 {
210 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 	unsigned short status;
213 
214 	mutex_lock(&cb->dcmd_mutex);
215 	myrb_reset_cmd(cmd_blk);
216 	mbox->type3.id = MYRB_DCMD_TAG;
217 	mbox->type3.opcode = op;
218 	mbox->type3.addr = addr;
219 	status = myrb_exec_cmd(cb, cmd_blk);
220 	mutex_unlock(&cb->dcmd_mutex);
221 	return status;
222 }
223 
224 /**
225  * myrb_exec_type3D - executes a type 3D command and waits for completion.
226  *
227  * Return: command status
228  */
229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 		struct myrb_pdev_state *pdev_info)
232 {
233 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 	unsigned short status;
236 	dma_addr_t pdev_info_addr;
237 
238 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 					sizeof(struct myrb_pdev_state),
240 					DMA_FROM_DEVICE);
241 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 		return MYRB_STATUS_SUBSYS_FAILED;
243 
244 	mutex_lock(&cb->dcmd_mutex);
245 	myrb_reset_cmd(cmd_blk);
246 	mbox->type3D.id = MYRB_DCMD_TAG;
247 	mbox->type3D.opcode = op;
248 	mbox->type3D.channel = sdev->channel;
249 	mbox->type3D.target = sdev->id;
250 	mbox->type3D.addr = pdev_info_addr;
251 	status = myrb_exec_cmd(cb, cmd_blk);
252 	mutex_unlock(&cb->dcmd_mutex);
253 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 	if (status == MYRB_STATUS_SUCCESS &&
256 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 		myrb_translate_devstate(pdev_info);
258 
259 	return status;
260 }
261 
262 static char *myrb_event_msg[] = {
263 	"killed because write recovery failed",
264 	"killed because of SCSI bus reset failure",
265 	"killed because of double check condition",
266 	"killed because it was removed",
267 	"killed because of gross error on SCSI chip",
268 	"killed because of bad tag returned from drive",
269 	"killed because of timeout on SCSI command",
270 	"killed because of reset SCSI command issued from system",
271 	"killed because busy or parity error count exceeded limit",
272 	"killed because of 'kill drive' command from system",
273 	"killed because of selection timeout",
274 	"killed due to SCSI phase sequence error",
275 	"killed due to unknown status",
276 };
277 
278 /**
279  * myrb_get_event - get event log from HBA
280  * @cb: pointer to the hba structure
281  * @event: number of the event
282  *
283  * Execute a type 3E command and logs the event message
284  */
285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 {
287 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 	struct myrb_log_entry *ev_buf;
290 	dma_addr_t ev_addr;
291 	unsigned short status;
292 
293 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 				    sizeof(struct myrb_log_entry),
295 				    &ev_addr, GFP_KERNEL);
296 	if (!ev_buf)
297 		return;
298 
299 	myrb_reset_cmd(cmd_blk);
300 	mbox->type3E.id = MYRB_MCMD_TAG;
301 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 	mbox->type3E.opqual = 1;
304 	mbox->type3E.ev_seq = event;
305 	mbox->type3E.addr = ev_addr;
306 	status = myrb_exec_cmd(cb, cmd_blk);
307 	if (status != MYRB_STATUS_SUCCESS)
308 		shost_printk(KERN_INFO, cb->host,
309 			     "Failed to get event log %d, status %04x\n",
310 			     event, status);
311 
312 	else if (ev_buf->seq_num == event) {
313 		struct scsi_sense_hdr sshdr;
314 
315 		memset(&sshdr, 0, sizeof(sshdr));
316 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317 
318 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 		    sshdr.asc == 0x80 &&
320 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 			shost_printk(KERN_CRIT, cb->host,
322 				     "Physical drive %d:%d: %s\n",
323 				     ev_buf->channel, ev_buf->target,
324 				     myrb_event_msg[sshdr.ascq]);
325 		else
326 			shost_printk(KERN_CRIT, cb->host,
327 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 				     ev_buf->channel, ev_buf->target,
329 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
330 	}
331 
332 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
333 			  ev_buf, ev_addr);
334 }
335 
336 /**
337  * myrb_get_errtable - retrieves the error table from the controller
338  *
339  * Executes a type 3 command and logs the error table from the controller.
340  */
341 static void myrb_get_errtable(struct myrb_hba *cb)
342 {
343 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 	unsigned short status;
346 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347 
348 	memcpy(&old_table, cb->err_table, sizeof(old_table));
349 
350 	myrb_reset_cmd(cmd_blk);
351 	mbox->type3.id = MYRB_MCMD_TAG;
352 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 	mbox->type3.addr = cb->err_table_addr;
354 	status = myrb_exec_cmd(cb, cmd_blk);
355 	if (status == MYRB_STATUS_SUCCESS) {
356 		struct myrb_error_entry *table = cb->err_table;
357 		struct myrb_error_entry *new, *old;
358 		size_t err_table_offset;
359 		struct scsi_device *sdev;
360 
361 		shost_for_each_device(sdev, cb->host) {
362 			if (sdev->channel >= myrb_logical_channel(cb->host))
363 				continue;
364 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 				+ sdev->id;
366 			new = table + err_table_offset;
367 			old = &old_table[err_table_offset];
368 			if (new->parity_err == old->parity_err &&
369 			    new->soft_err == old->soft_err &&
370 			    new->hard_err == old->hard_err &&
371 			    new->misc_err == old->misc_err)
372 				continue;
373 			sdev_printk(KERN_CRIT, sdev,
374 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 				    new->parity_err, new->soft_err,
376 				    new->hard_err, new->misc_err);
377 		}
378 	}
379 }
380 
381 /**
382  * myrb_get_ldev_info - retrieves the logical device table from the controller
383  *
384  * Executes a type 3 command and updates the logical device table.
385  *
386  * Return: command status
387  */
388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 {
390 	unsigned short status;
391 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 	struct Scsi_Host *shost = cb->host;
393 
394 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 				 cb->ldev_info_addr);
396 	if (status != MYRB_STATUS_SUCCESS)
397 		return status;
398 
399 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 		struct myrb_ldev_info *old = NULL;
401 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 		struct scsi_device *sdev;
403 
404 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
405 					  ldev_num, 0);
406 		if (!sdev) {
407 			if (new->state == MYRB_DEVICE_OFFLINE)
408 				continue;
409 			shost_printk(KERN_INFO, shost,
410 				     "Adding Logical Drive %d in state %s\n",
411 				     ldev_num, myrb_devstate_name(new->state));
412 			scsi_add_device(shost, myrb_logical_channel(shost),
413 					ldev_num, 0);
414 			continue;
415 		}
416 		old = sdev->hostdata;
417 		if (new->state != old->state)
418 			shost_printk(KERN_INFO, shost,
419 				     "Logical Drive %d is now %s\n",
420 				     ldev_num, myrb_devstate_name(new->state));
421 		if (new->wb_enabled != old->wb_enabled)
422 			sdev_printk(KERN_INFO, sdev,
423 				    "Logical Drive is now WRITE %s\n",
424 				    (new->wb_enabled ? "BACK" : "THRU"));
425 		memcpy(old, new, sizeof(*new));
426 		scsi_device_put(sdev);
427 	}
428 	return status;
429 }
430 
431 /**
432  * myrb_get_rbld_progress - get rebuild progress information
433  *
434  * Executes a type 3 command and returns the rebuild progress
435  * information.
436  *
437  * Return: command status
438  */
439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 		struct myrb_rbld_progress *rbld)
441 {
442 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 	struct myrb_rbld_progress *rbld_buf;
445 	dma_addr_t rbld_addr;
446 	unsigned short status;
447 
448 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 				      sizeof(struct myrb_rbld_progress),
450 				      &rbld_addr, GFP_KERNEL);
451 	if (!rbld_buf)
452 		return MYRB_STATUS_RBLD_NOT_CHECKED;
453 
454 	myrb_reset_cmd(cmd_blk);
455 	mbox->type3.id = MYRB_MCMD_TAG;
456 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 	mbox->type3.addr = rbld_addr;
458 	status = myrb_exec_cmd(cb, cmd_blk);
459 	if (rbld)
460 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 			  rbld_buf, rbld_addr);
463 	return status;
464 }
465 
466 /**
467  * myrb_update_rbld_progress - updates the rebuild status
468  *
469  * Updates the rebuild status for the attached logical devices.
470  *
471  */
472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
473 {
474 	struct myrb_rbld_progress rbld_buf;
475 	unsigned short status;
476 
477 	status = myrb_get_rbld_progress(cb, &rbld_buf);
478 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 		status = MYRB_STATUS_RBLD_SUCCESS;
481 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 		unsigned int blocks_done =
483 			rbld_buf.ldev_size - rbld_buf.blocks_left;
484 		struct scsi_device *sdev;
485 
486 		sdev = scsi_device_lookup(cb->host,
487 					  myrb_logical_channel(cb->host),
488 					  rbld_buf.ldev_num, 0);
489 		if (!sdev)
490 			return;
491 
492 		switch (status) {
493 		case MYRB_STATUS_SUCCESS:
494 			sdev_printk(KERN_INFO, sdev,
495 				    "Rebuild in Progress, %d%% completed\n",
496 				    (100 * (blocks_done >> 7))
497 				    / (rbld_buf.ldev_size >> 7));
498 			break;
499 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 			sdev_printk(KERN_INFO, sdev,
501 				    "Rebuild Failed due to Logical Drive Failure\n");
502 			break;
503 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 			sdev_printk(KERN_INFO, sdev,
505 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
506 			break;
507 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 			sdev_printk(KERN_INFO, sdev,
509 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
510 			break;
511 		case MYRB_STATUS_RBLD_SUCCESS:
512 			sdev_printk(KERN_INFO, sdev,
513 				    "Rebuild Completed Successfully\n");
514 			break;
515 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 			sdev_printk(KERN_INFO, sdev,
517 				     "Rebuild Successfully Terminated\n");
518 			break;
519 		default:
520 			break;
521 		}
522 		scsi_device_put(sdev);
523 	}
524 	cb->last_rbld_status = status;
525 }
526 
527 /**
528  * myrb_get_cc_progress - retrieve the rebuild status
529  *
530  * Execute a type 3 Command and fetch the rebuild / consistency check
531  * status.
532  */
533 static void myrb_get_cc_progress(struct myrb_hba *cb)
534 {
535 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 	struct myrb_rbld_progress *rbld_buf;
538 	dma_addr_t rbld_addr;
539 	unsigned short status;
540 
541 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 				      sizeof(struct myrb_rbld_progress),
543 				      &rbld_addr, GFP_KERNEL);
544 	if (!rbld_buf) {
545 		cb->need_cc_status = true;
546 		return;
547 	}
548 	myrb_reset_cmd(cmd_blk);
549 	mbox->type3.id = MYRB_MCMD_TAG;
550 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 	mbox->type3.addr = rbld_addr;
552 	status = myrb_exec_cmd(cb, cmd_blk);
553 	if (status == MYRB_STATUS_SUCCESS) {
554 		unsigned int ldev_num = rbld_buf->ldev_num;
555 		unsigned int ldev_size = rbld_buf->ldev_size;
556 		unsigned int blocks_done =
557 			ldev_size - rbld_buf->blocks_left;
558 		struct scsi_device *sdev;
559 
560 		sdev = scsi_device_lookup(cb->host,
561 					  myrb_logical_channel(cb->host),
562 					  ldev_num, 0);
563 		if (sdev) {
564 			sdev_printk(KERN_INFO, sdev,
565 				    "Consistency Check in Progress: %d%% completed\n",
566 				    (100 * (blocks_done >> 7))
567 				    / (ldev_size >> 7));
568 			scsi_device_put(sdev);
569 		}
570 	}
571 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 			  rbld_buf, rbld_addr);
573 }
574 
575 /**
576  * myrb_bgi_control - updates background initialisation status
577  *
578  * Executes a type 3B command and updates the background initialisation status
579  */
580 static void myrb_bgi_control(struct myrb_hba *cb)
581 {
582 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 	struct myrb_bgi_status *bgi, *last_bgi;
585 	dma_addr_t bgi_addr;
586 	struct scsi_device *sdev = NULL;
587 	unsigned short status;
588 
589 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 				 &bgi_addr, GFP_KERNEL);
591 	if (!bgi) {
592 		shost_printk(KERN_ERR, cb->host,
593 			     "Failed to allocate bgi memory\n");
594 		return;
595 	}
596 	myrb_reset_cmd(cmd_blk);
597 	mbox->type3B.id = MYRB_DCMD_TAG;
598 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 	mbox->type3B.optype = 0x20;
600 	mbox->type3B.addr = bgi_addr;
601 	status = myrb_exec_cmd(cb, cmd_blk);
602 	last_bgi = &cb->bgi_status;
603 	sdev = scsi_device_lookup(cb->host,
604 				  myrb_logical_channel(cb->host),
605 				  bgi->ldev_num, 0);
606 	switch (status) {
607 	case MYRB_STATUS_SUCCESS:
608 		switch (bgi->status) {
609 		case MYRB_BGI_INVALID:
610 			break;
611 		case MYRB_BGI_STARTED:
612 			if (!sdev)
613 				break;
614 			sdev_printk(KERN_INFO, sdev,
615 				    "Background Initialization Started\n");
616 			break;
617 		case MYRB_BGI_INPROGRESS:
618 			if (!sdev)
619 				break;
620 			if (bgi->blocks_done == last_bgi->blocks_done &&
621 			    bgi->ldev_num == last_bgi->ldev_num)
622 				break;
623 			sdev_printk(KERN_INFO, sdev,
624 				 "Background Initialization in Progress: %d%% completed\n",
625 				 (100 * (bgi->blocks_done >> 7))
626 				 / (bgi->ldev_size >> 7));
627 			break;
628 		case MYRB_BGI_SUSPENDED:
629 			if (!sdev)
630 				break;
631 			sdev_printk(KERN_INFO, sdev,
632 				    "Background Initialization Suspended\n");
633 			break;
634 		case MYRB_BGI_CANCELLED:
635 			if (!sdev)
636 				break;
637 			sdev_printk(KERN_INFO, sdev,
638 				    "Background Initialization Cancelled\n");
639 			break;
640 		}
641 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642 		break;
643 	case MYRB_STATUS_BGI_SUCCESS:
644 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 			sdev_printk(KERN_INFO, sdev,
646 				    "Background Initialization Completed Successfully\n");
647 		cb->bgi_status.status = MYRB_BGI_INVALID;
648 		break;
649 	case MYRB_STATUS_BGI_ABORTED:
650 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 			sdev_printk(KERN_INFO, sdev,
652 				    "Background Initialization Aborted\n");
653 		/* Fallthrough */
654 	case MYRB_STATUS_NO_BGI_INPROGRESS:
655 		cb->bgi_status.status = MYRB_BGI_INVALID;
656 		break;
657 	}
658 	if (sdev)
659 		scsi_device_put(sdev);
660 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
661 			  bgi, bgi_addr);
662 }
663 
664 /**
665  * myrb_hba_enquiry - updates the controller status
666  *
667  * Executes a DAC_V1_Enquiry command and updates the controller status.
668  *
669  * Return: command status
670  */
671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
672 {
673 	struct myrb_enquiry old, *new;
674 	unsigned short status;
675 
676 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
677 
678 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 	if (status != MYRB_STATUS_SUCCESS)
680 		return status;
681 
682 	new = cb->enquiry;
683 	if (new->ldev_count > old.ldev_count) {
684 		int ldev_num = old.ldev_count - 1;
685 
686 		while (++ldev_num < new->ldev_count)
687 			shost_printk(KERN_CRIT, cb->host,
688 				     "Logical Drive %d Now Exists\n",
689 				     ldev_num);
690 	}
691 	if (new->ldev_count < old.ldev_count) {
692 		int ldev_num = new->ldev_count - 1;
693 
694 		while (++ldev_num < old.ldev_count)
695 			shost_printk(KERN_CRIT, cb->host,
696 				     "Logical Drive %d No Longer Exists\n",
697 				     ldev_num);
698 	}
699 	if (new->status.deferred != old.status.deferred)
700 		shost_printk(KERN_CRIT, cb->host,
701 			     "Deferred Write Error Flag is now %s\n",
702 			     (new->status.deferred ? "TRUE" : "FALSE"));
703 	if (new->ev_seq != old.ev_seq) {
704 		cb->new_ev_seq = new->ev_seq;
705 		cb->need_err_info = true;
706 		shost_printk(KERN_INFO, cb->host,
707 			     "Event log %d/%d (%d/%d) available\n",
708 			     cb->old_ev_seq, cb->new_ev_seq,
709 			     old.ev_seq, new->ev_seq);
710 	}
711 	if ((new->ldev_critical > 0 &&
712 	     new->ldev_critical != old.ldev_critical) ||
713 	    (new->ldev_offline > 0 &&
714 	     new->ldev_offline != old.ldev_offline) ||
715 	    (new->ldev_count != old.ldev_count)) {
716 		shost_printk(KERN_INFO, cb->host,
717 			     "Logical drive count changed (%d/%d/%d)\n",
718 			     new->ldev_critical,
719 			     new->ldev_offline,
720 			     new->ldev_count);
721 		cb->need_ldev_info = true;
722 	}
723 	if (new->pdev_dead > 0 ||
724 	    new->pdev_dead != old.pdev_dead ||
725 	    time_after_eq(jiffies, cb->secondary_monitor_time
726 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 		cb->need_bgi_status = cb->bgi_status_supported;
728 		cb->secondary_monitor_time = jiffies;
729 	}
730 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 		cb->need_rbld = true;
735 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
736 	}
737 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
738 		switch (new->rbld) {
739 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 			shost_printk(KERN_INFO, cb->host,
741 				     "Consistency Check Completed Successfully\n");
742 			break;
743 		case MYRB_STDBY_RBLD_IN_PROGRESS:
744 		case MYRB_BG_RBLD_IN_PROGRESS:
745 			break;
746 		case MYRB_BG_CHECK_IN_PROGRESS:
747 			cb->need_cc_status = true;
748 			break;
749 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 			shost_printk(KERN_INFO, cb->host,
751 				     "Consistency Check Completed with Error\n");
752 			break;
753 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 			shost_printk(KERN_INFO, cb->host,
755 				     "Consistency Check Failed - Physical Device Failed\n");
756 			break;
757 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 			shost_printk(KERN_INFO, cb->host,
759 				     "Consistency Check Failed - Logical Drive Failed\n");
760 			break;
761 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 			shost_printk(KERN_INFO, cb->host,
763 				     "Consistency Check Failed - Other Causes\n");
764 			break;
765 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 			shost_printk(KERN_INFO, cb->host,
767 				     "Consistency Check Successfully Terminated\n");
768 			break;
769 		}
770 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 		cb->need_cc_status = true;
772 
773 	return MYRB_STATUS_SUCCESS;
774 }
775 
776 /**
777  * myrb_set_pdev_state - sets the device state for a physical device
778  *
779  * Return: command status
780  */
781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 		struct scsi_device *sdev, enum myrb_devstate state)
783 {
784 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 	unsigned short status;
787 
788 	mutex_lock(&cb->dcmd_mutex);
789 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 	mbox->type3D.id = MYRB_DCMD_TAG;
791 	mbox->type3D.channel = sdev->channel;
792 	mbox->type3D.target = sdev->id;
793 	mbox->type3D.state = state & 0x1F;
794 	status = myrb_exec_cmd(cb, cmd_blk);
795 	mutex_unlock(&cb->dcmd_mutex);
796 
797 	return status;
798 }
799 
800 /**
801  * myrb_enable_mmio - enables the Memory Mailbox Interface
802  *
803  * PD and P controller types have no memory mailbox, but still need the
804  * other dma mapped memory.
805  *
806  * Return: true on success, false otherwise.
807  */
808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
809 {
810 	void __iomem *base = cb->io_base;
811 	struct pci_dev *pdev = cb->pdev;
812 	size_t err_table_size;
813 	size_t ldev_info_size;
814 	union myrb_cmd_mbox *cmd_mbox_mem;
815 	struct myrb_stat_mbox *stat_mbox_mem;
816 	union myrb_cmd_mbox mbox;
817 	unsigned short status;
818 
819 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
820 
821 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 		dev_err(&pdev->dev, "DMA mask out of range\n");
823 		return false;
824 	}
825 
826 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 					 sizeof(struct myrb_enquiry),
828 					 &cb->enquiry_addr, GFP_KERNEL);
829 	if (!cb->enquiry)
830 		return false;
831 
832 	err_table_size = sizeof(struct myrb_error_entry) *
833 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 					   &cb->err_table_addr, GFP_KERNEL);
836 	if (!cb->err_table)
837 		return false;
838 
839 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 					       &cb->ldev_info_addr, GFP_KERNEL);
842 	if (!cb->ldev_info_buf)
843 		return false;
844 
845 	/*
846 	 * Skip mailbox initialisation for PD and P Controllers
847 	 */
848 	if (!mmio_init_fn)
849 		return true;
850 
851 	/* These are the base addresses for the command memory mailbox array */
852 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854 						cb->cmd_mbox_size,
855 						&cb->cmd_mbox_addr,
856 						GFP_KERNEL);
857 	if (!cb->first_cmd_mbox)
858 		return false;
859 
860 	cmd_mbox_mem = cb->first_cmd_mbox;
861 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 	cb->last_cmd_mbox = cmd_mbox_mem;
863 	cb->next_cmd_mbox = cb->first_cmd_mbox;
864 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866 
867 	/* These are the base addresses for the status memory mailbox array */
868 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 	    sizeof(struct myrb_stat_mbox);
870 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871 						 cb->stat_mbox_size,
872 						 &cb->stat_mbox_addr,
873 						 GFP_KERNEL);
874 	if (!cb->first_stat_mbox)
875 		return false;
876 
877 	stat_mbox_mem = cb->first_stat_mbox;
878 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 	cb->last_stat_mbox = stat_mbox_mem;
880 	cb->next_stat_mbox = cb->first_stat_mbox;
881 
882 	/* Enable the Memory Mailbox Interface. */
883 	cb->dual_mode_interface = true;
884 	mbox.typeX.opcode = 0x2B;
885 	mbox.typeX.id = 0;
886 	mbox.typeX.opcode2 = 0x14;
887 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
889 
890 	status = mmio_init_fn(pdev, base, &mbox);
891 	if (status != MYRB_STATUS_SUCCESS) {
892 		cb->dual_mode_interface = false;
893 		mbox.typeX.opcode2 = 0x10;
894 		status = mmio_init_fn(pdev, base, &mbox);
895 		if (status != MYRB_STATUS_SUCCESS) {
896 			dev_err(&pdev->dev,
897 				"Failed to enable mailbox, statux %02X\n",
898 				status);
899 			return false;
900 		}
901 	}
902 	return true;
903 }
904 
905 /**
906  * myrb_get_hba_config - reads the configuration information
907  *
908  * Reads the configuration information from the controller and
909  * initializes the controller structure.
910  *
911  * Return: 0 on success, errno otherwise
912  */
913 static int myrb_get_hba_config(struct myrb_hba *cb)
914 {
915 	struct myrb_enquiry2 *enquiry2;
916 	dma_addr_t enquiry2_addr;
917 	struct myrb_config2 *config2;
918 	dma_addr_t config2_addr;
919 	struct Scsi_Host *shost = cb->host;
920 	struct pci_dev *pdev = cb->pdev;
921 	int pchan_max = 0, pchan_cur = 0;
922 	unsigned short status;
923 	int ret = -ENODEV, memsize = 0;
924 
925 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 				      &enquiry2_addr, GFP_KERNEL);
927 	if (!enquiry2) {
928 		shost_printk(KERN_ERR, cb->host,
929 			     "Failed to allocate V1 enquiry2 memory\n");
930 		return -ENOMEM;
931 	}
932 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 				     &config2_addr, GFP_KERNEL);
934 	if (!config2) {
935 		shost_printk(KERN_ERR, cb->host,
936 			     "Failed to allocate V1 config2 memory\n");
937 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 				  enquiry2, enquiry2_addr);
939 		return -ENOMEM;
940 	}
941 	mutex_lock(&cb->dma_mutex);
942 	status = myrb_hba_enquiry(cb);
943 	mutex_unlock(&cb->dma_mutex);
944 	if (status != MYRB_STATUS_SUCCESS) {
945 		shost_printk(KERN_WARNING, cb->host,
946 			     "Failed it issue V1 Enquiry\n");
947 		goto out_free;
948 	}
949 
950 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 	if (status != MYRB_STATUS_SUCCESS) {
952 		shost_printk(KERN_WARNING, cb->host,
953 			     "Failed to issue V1 Enquiry2\n");
954 		goto out_free;
955 	}
956 
957 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 	if (status != MYRB_STATUS_SUCCESS) {
959 		shost_printk(KERN_WARNING, cb->host,
960 			     "Failed to issue ReadConfig2\n");
961 		goto out_free;
962 	}
963 
964 	status = myrb_get_ldev_info(cb);
965 	if (status != MYRB_STATUS_SUCCESS) {
966 		shost_printk(KERN_WARNING, cb->host,
967 			     "Failed to get logical drive information\n");
968 		goto out_free;
969 	}
970 
971 	/*
972 	 * Initialize the Controller Model Name and Full Model Name fields.
973 	 */
974 	switch (enquiry2->hw.sub_model) {
975 	case DAC960_V1_P_PD_PU:
976 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 			strcpy(cb->model_name, "DAC960PU");
978 		else
979 			strcpy(cb->model_name, "DAC960PD");
980 		break;
981 	case DAC960_V1_PL:
982 		strcpy(cb->model_name, "DAC960PL");
983 		break;
984 	case DAC960_V1_PG:
985 		strcpy(cb->model_name, "DAC960PG");
986 		break;
987 	case DAC960_V1_PJ:
988 		strcpy(cb->model_name, "DAC960PJ");
989 		break;
990 	case DAC960_V1_PR:
991 		strcpy(cb->model_name, "DAC960PR");
992 		break;
993 	case DAC960_V1_PT:
994 		strcpy(cb->model_name, "DAC960PT");
995 		break;
996 	case DAC960_V1_PTL0:
997 		strcpy(cb->model_name, "DAC960PTL0");
998 		break;
999 	case DAC960_V1_PRL:
1000 		strcpy(cb->model_name, "DAC960PRL");
1001 		break;
1002 	case DAC960_V1_PTL1:
1003 		strcpy(cb->model_name, "DAC960PTL1");
1004 		break;
1005 	case DAC960_V1_1164P:
1006 		strcpy(cb->model_name, "eXtremeRAID 1100");
1007 		break;
1008 	default:
1009 		shost_printk(KERN_WARNING, cb->host,
1010 			     "Unknown Model %X\n",
1011 			     enquiry2->hw.sub_model);
1012 		goto out;
1013 	}
1014 	/*
1015 	 * Initialize the Controller Firmware Version field and verify that it
1016 	 * is a supported firmware version.
1017 	 * The supported firmware versions are:
1018 	 *
1019 	 * DAC1164P		    5.06 and above
1020 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1021 	 * DAC960PU/PD/PL	    3.51 and above
1022 	 * DAC960PU/PD/PL/P	    2.73 and above
1023 	 */
1024 #if defined(CONFIG_ALPHA)
1025 	/*
1026 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 	 * the last custom FW revision to be released by DEC for these older
1029 	 * controllers, appears to work quite well with this driver.
1030 	 *
1031 	 * Cards tested successfully were several versions each of the PD and
1032 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 	 * back of the board, of:
1035 	 *
1036 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1037 	 *         or D040349 (3-channel)
1038 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1039 	 *         or D040397 (3-channel)
1040 	 */
1041 # define FIRMWARE_27X	"2.70"
1042 #else
1043 # define FIRMWARE_27X	"2.73"
1044 #endif
1045 
1046 	if (enquiry2->fw.major_version == 0) {
1047 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 		enquiry2->fw.firmware_type = '0';
1050 		enquiry2->fw.turn_id = 0;
1051 	}
1052 	sprintf(cb->fw_version, "%d.%02d-%c-%02d",
1053 		enquiry2->fw.major_version,
1054 		enquiry2->fw.minor_version,
1055 		enquiry2->fw.firmware_type,
1056 		enquiry2->fw.turn_id);
1057 	if (!((enquiry2->fw.major_version == 5 &&
1058 	       enquiry2->fw.minor_version >= 6) ||
1059 	      (enquiry2->fw.major_version == 4 &&
1060 	       enquiry2->fw.minor_version >= 6) ||
1061 	      (enquiry2->fw.major_version == 3 &&
1062 	       enquiry2->fw.minor_version >= 51) ||
1063 	      (enquiry2->fw.major_version == 2 &&
1064 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1065 		shost_printk(KERN_WARNING, cb->host,
1066 			"Firmware Version '%s' unsupported\n",
1067 			cb->fw_version);
1068 		goto out;
1069 	}
1070 	/*
1071 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1072 	 * Enclosure Management Enabled fields.
1073 	 */
1074 	switch (enquiry2->hw.model) {
1075 	case MYRB_5_CHANNEL_BOARD:
1076 		pchan_max = 5;
1077 		break;
1078 	case MYRB_3_CHANNEL_BOARD:
1079 	case MYRB_3_CHANNEL_ASIC_DAC:
1080 		pchan_max = 3;
1081 		break;
1082 	case MYRB_2_CHANNEL_BOARD:
1083 		pchan_max = 2;
1084 		break;
1085 	default:
1086 		pchan_max = enquiry2->cfg_chan;
1087 		break;
1088 	}
1089 	pchan_cur = enquiry2->cur_chan;
1090 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1091 		cb->bus_width = 32;
1092 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1093 		cb->bus_width = 16;
1094 	else
1095 		cb->bus_width = 8;
1096 	cb->ldev_block_size = enquiry2->ldev_block_size;
1097 	shost->max_channel = pchan_cur;
1098 	shost->max_id = enquiry2->max_targets;
1099 	memsize = enquiry2->mem_size >> 20;
1100 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1101 	/*
1102 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1103 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1104 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1105 	 * The Driver Queue Depth must be at most one less than the
1106 	 * Controller Queue Depth to allow for an automatic drive
1107 	 * rebuild operation.
1108 	 */
1109 	shost->can_queue = cb->enquiry->max_tcq;
1110 	if (shost->can_queue < 3)
1111 		shost->can_queue = enquiry2->max_cmds;
1112 	if (shost->can_queue < 3)
1113 		/* Play safe and disable TCQ */
1114 		shost->can_queue = 1;
1115 
1116 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1117 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1118 	shost->max_sectors = enquiry2->max_sectors;
1119 	shost->sg_tablesize = enquiry2->max_sge;
1120 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1121 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1122 	/*
1123 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1124 	 */
1125 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1126 		>> (10 - MYRB_BLKSIZE_BITS);
1127 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1128 		>> (10 - MYRB_BLKSIZE_BITS);
1129 	/* Assume 255/63 translation */
1130 	cb->ldev_geom_heads = 255;
1131 	cb->ldev_geom_sectors = 63;
1132 	if (config2->drive_geometry) {
1133 		cb->ldev_geom_heads = 128;
1134 		cb->ldev_geom_sectors = 32;
1135 	}
1136 
1137 	/*
1138 	 * Initialize the Background Initialization Status.
1139 	 */
1140 	if ((cb->fw_version[0] == '4' &&
1141 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1142 	    (cb->fw_version[0] == '5' &&
1143 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1144 		cb->bgi_status_supported = true;
1145 		myrb_bgi_control(cb);
1146 	}
1147 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1148 	ret = 0;
1149 
1150 out:
1151 	shost_printk(KERN_INFO, cb->host,
1152 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1153 	shost_printk(KERN_INFO, cb->host,
1154 		"  Firmware Version: %s, Memory Size: %dMB\n",
1155 		cb->fw_version, memsize);
1156 	if (cb->io_addr == 0)
1157 		shost_printk(KERN_INFO, cb->host,
1158 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1159 			(unsigned long)cb->pci_addr, cb->irq);
1160 	else
1161 		shost_printk(KERN_INFO, cb->host,
1162 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1163 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1164 			cb->irq);
1165 	shost_printk(KERN_INFO, cb->host,
1166 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1167 		cb->host->can_queue, cb->host->max_sectors);
1168 	shost_printk(KERN_INFO, cb->host,
1169 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1170 		     cb->host->can_queue, cb->host->sg_tablesize,
1171 		     MYRB_SCATTER_GATHER_LIMIT);
1172 	shost_printk(KERN_INFO, cb->host,
1173 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1174 		     cb->stripe_size, cb->segment_size,
1175 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1176 		     cb->safte_enabled ?
1177 		     "  SAF-TE Enclosure Management Enabled" : "");
1178 	shost_printk(KERN_INFO, cb->host,
1179 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1180 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1181 		     cb->host->max_id);
1182 
1183 	shost_printk(KERN_INFO, cb->host,
1184 		     "  Logical: 1/1 channels, %d/%d disks\n",
1185 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1186 
1187 out_free:
1188 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1189 			  enquiry2, enquiry2_addr);
1190 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1191 			  config2, config2_addr);
1192 
1193 	return ret;
1194 }
1195 
1196 /**
1197  * myrb_unmap - unmaps controller structures
1198  */
1199 static void myrb_unmap(struct myrb_hba *cb)
1200 {
1201 	if (cb->ldev_info_buf) {
1202 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1203 			MYRB_MAX_LDEVS;
1204 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1205 				  cb->ldev_info_buf, cb->ldev_info_addr);
1206 		cb->ldev_info_buf = NULL;
1207 	}
1208 	if (cb->err_table) {
1209 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1210 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1211 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1212 				  cb->err_table, cb->err_table_addr);
1213 		cb->err_table = NULL;
1214 	}
1215 	if (cb->enquiry) {
1216 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1217 				  cb->enquiry, cb->enquiry_addr);
1218 		cb->enquiry = NULL;
1219 	}
1220 	if (cb->first_stat_mbox) {
1221 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1222 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1223 		cb->first_stat_mbox = NULL;
1224 	}
1225 	if (cb->first_cmd_mbox) {
1226 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1227 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1228 		cb->first_cmd_mbox = NULL;
1229 	}
1230 }
1231 
1232 /**
1233  * myrb_cleanup - cleanup controller structures
1234  */
1235 static void myrb_cleanup(struct myrb_hba *cb)
1236 {
1237 	struct pci_dev *pdev = cb->pdev;
1238 
1239 	/* Free the memory mailbox, status, and related structures */
1240 	myrb_unmap(cb);
1241 
1242 	if (cb->mmio_base) {
1243 		cb->disable_intr(cb->io_base);
1244 		iounmap(cb->mmio_base);
1245 	}
1246 	if (cb->irq)
1247 		free_irq(cb->irq, cb);
1248 	if (cb->io_addr)
1249 		release_region(cb->io_addr, 0x80);
1250 	pci_set_drvdata(pdev, NULL);
1251 	pci_disable_device(pdev);
1252 	scsi_host_put(cb->host);
1253 }
1254 
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1256 {
1257 	struct Scsi_Host *shost = scmd->device->host;
1258 	struct myrb_hba *cb = shost_priv(shost);
1259 
1260 	cb->reset(cb->io_base);
1261 	return SUCCESS;
1262 }
1263 
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 		struct scsi_cmnd *scmd)
1266 {
1267 	struct myrb_hba *cb = shost_priv(shost);
1268 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270 	struct myrb_dcdb *dcdb;
1271 	dma_addr_t dcdb_addr;
1272 	struct scsi_device *sdev = scmd->device;
1273 	struct scatterlist *sgl;
1274 	unsigned long flags;
1275 	int nsge;
1276 
1277 	myrb_reset_cmd(cmd_blk);
1278 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1279 	if (!dcdb)
1280 		return SCSI_MLQUEUE_HOST_BUSY;
1281 	nsge = scsi_dma_map(scmd);
1282 	if (nsge > 1) {
1283 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284 		scmd->result = (DID_ERROR << 16);
1285 		scmd->scsi_done(scmd);
1286 		return 0;
1287 	}
1288 
1289 	mbox->type3.opcode = MYRB_CMD_DCDB;
1290 	mbox->type3.id = scmd->request->tag + 3;
1291 	mbox->type3.addr = dcdb_addr;
1292 	dcdb->channel = sdev->channel;
1293 	dcdb->target = sdev->id;
1294 	switch (scmd->sc_data_direction) {
1295 	case DMA_NONE:
1296 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1297 		break;
1298 	case DMA_TO_DEVICE:
1299 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1300 		break;
1301 	case DMA_FROM_DEVICE:
1302 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1303 		break;
1304 	default:
1305 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1306 		break;
1307 	}
1308 	dcdb->early_status = false;
1309 	if (scmd->request->timeout <= 10)
1310 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311 	else if (scmd->request->timeout <= 60)
1312 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313 	else if (scmd->request->timeout <= 600)
1314 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1315 	else
1316 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317 	dcdb->no_autosense = false;
1318 	dcdb->allow_disconnect = true;
1319 	sgl = scsi_sglist(scmd);
1320 	dcdb->dma_addr = sg_dma_address(sgl);
1321 	if (sg_dma_len(sgl) > USHRT_MAX) {
1322 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1324 	} else {
1325 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1326 		dcdb->xfer_len_hi4 = 0;
1327 	}
1328 	dcdb->cdb_len = scmd->cmd_len;
1329 	dcdb->sense_len = sizeof(dcdb->sense);
1330 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1331 
1332 	spin_lock_irqsave(&cb->queue_lock, flags);
1333 	cb->qcmd(cb, cmd_blk);
1334 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1335 	return 0;
1336 }
1337 
1338 static void myrb_inquiry(struct myrb_hba *cb,
1339 		struct scsi_cmnd *scmd)
1340 {
1341 	unsigned char inq[36] = {
1342 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 		0x20, 0x20, 0x20, 0x20,
1347 	};
1348 
1349 	if (cb->bus_width > 16)
1350 		inq[7] |= 1 << 6;
1351 	if (cb->bus_width > 8)
1352 		inq[7] |= 1 << 5;
1353 	memcpy(&inq[16], cb->model_name, 16);
1354 	memcpy(&inq[32], cb->fw_version, 1);
1355 	memcpy(&inq[33], &cb->fw_version[2], 2);
1356 	memcpy(&inq[35], &cb->fw_version[7], 1);
1357 
1358 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1359 }
1360 
1361 static void
1362 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363 		struct myrb_ldev_info *ldev_info)
1364 {
1365 	unsigned char modes[32], *mode_pg;
1366 	bool dbd;
1367 	size_t mode_len;
1368 
1369 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1370 	if (dbd) {
1371 		mode_len = 24;
1372 		mode_pg = &modes[4];
1373 	} else {
1374 		mode_len = 32;
1375 		mode_pg = &modes[12];
1376 	}
1377 	memset(modes, 0, sizeof(modes));
1378 	modes[0] = mode_len - 1;
1379 	if (!dbd) {
1380 		unsigned char *block_desc = &modes[4];
1381 
1382 		modes[3] = 8;
1383 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1385 	}
1386 	mode_pg[0] = 0x08;
1387 	mode_pg[1] = 0x12;
1388 	if (ldev_info->wb_enabled)
1389 		mode_pg[2] |= 0x04;
1390 	if (cb->segment_size) {
1391 		mode_pg[2] |= 0x08;
1392 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1393 	}
1394 
1395 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1396 }
1397 
1398 static void myrb_request_sense(struct myrb_hba *cb,
1399 		struct scsi_cmnd *scmd)
1400 {
1401 	scsi_build_sense_buffer(0, scmd->sense_buffer,
1402 				NO_SENSE, 0, 0);
1403 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 				 SCSI_SENSE_BUFFERSIZE);
1405 }
1406 
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 		struct myrb_ldev_info *ldev_info)
1409 {
1410 	unsigned char data[8];
1411 
1412 	dev_dbg(&scmd->device->sdev_gendev,
1413 		"Capacity %u, blocksize %u\n",
1414 		ldev_info->size, cb->ldev_block_size);
1415 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 	scsi_sg_copy_from_buffer(scmd, data, 8);
1418 }
1419 
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 		struct scsi_cmnd *scmd)
1422 {
1423 	struct myrb_hba *cb = shost_priv(shost);
1424 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 	struct myrb_ldev_info *ldev_info;
1427 	struct scsi_device *sdev = scmd->device;
1428 	struct scatterlist *sgl;
1429 	unsigned long flags;
1430 	u64 lba;
1431 	u32 block_cnt;
1432 	int nsge;
1433 
1434 	ldev_info = sdev->hostdata;
1435 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 	    ldev_info->state != MYRB_DEVICE_WO) {
1437 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 		scmd->result = (DID_BAD_TARGET << 16);
1440 		scmd->scsi_done(scmd);
1441 		return 0;
1442 	}
1443 	switch (scmd->cmnd[0]) {
1444 	case TEST_UNIT_READY:
1445 		scmd->result = (DID_OK << 16);
1446 		scmd->scsi_done(scmd);
1447 		return 0;
1448 	case INQUIRY:
1449 		if (scmd->cmnd[1] & 1) {
1450 			/* Illegal request, invalid field in CDB */
1451 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1452 						ILLEGAL_REQUEST, 0x24, 0);
1453 			scmd->result = (DRIVER_SENSE << 24) |
1454 				SAM_STAT_CHECK_CONDITION;
1455 		} else {
1456 			myrb_inquiry(cb, scmd);
1457 			scmd->result = (DID_OK << 16);
1458 		}
1459 		scmd->scsi_done(scmd);
1460 		return 0;
1461 	case SYNCHRONIZE_CACHE:
1462 		scmd->result = (DID_OK << 16);
1463 		scmd->scsi_done(scmd);
1464 		return 0;
1465 	case MODE_SENSE:
1466 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1467 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1468 			/* Illegal request, invalid field in CDB */
1469 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1470 						ILLEGAL_REQUEST, 0x24, 0);
1471 			scmd->result = (DRIVER_SENSE << 24) |
1472 				SAM_STAT_CHECK_CONDITION;
1473 		} else {
1474 			myrb_mode_sense(cb, scmd, ldev_info);
1475 			scmd->result = (DID_OK << 16);
1476 		}
1477 		scmd->scsi_done(scmd);
1478 		return 0;
1479 	case READ_CAPACITY:
1480 		if ((scmd->cmnd[1] & 1) ||
1481 		    (scmd->cmnd[8] & 1)) {
1482 			/* Illegal request, invalid field in CDB */
1483 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1484 						ILLEGAL_REQUEST, 0x24, 0);
1485 			scmd->result = (DRIVER_SENSE << 24) |
1486 				SAM_STAT_CHECK_CONDITION;
1487 			scmd->scsi_done(scmd);
1488 			return 0;
1489 		}
1490 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1491 		if (lba) {
1492 			/* Illegal request, invalid field in CDB */
1493 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1494 						ILLEGAL_REQUEST, 0x24, 0);
1495 			scmd->result = (DRIVER_SENSE << 24) |
1496 				SAM_STAT_CHECK_CONDITION;
1497 			scmd->scsi_done(scmd);
1498 			return 0;
1499 		}
1500 		myrb_read_capacity(cb, scmd, ldev_info);
1501 		scmd->scsi_done(scmd);
1502 		return 0;
1503 	case REQUEST_SENSE:
1504 		myrb_request_sense(cb, scmd);
1505 		scmd->result = (DID_OK << 16);
1506 		return 0;
1507 	case SEND_DIAGNOSTIC:
1508 		if (scmd->cmnd[1] != 0x04) {
1509 			/* Illegal request, invalid field in CDB */
1510 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1511 						ILLEGAL_REQUEST, 0x24, 0);
1512 			scmd->result = (DRIVER_SENSE << 24) |
1513 				SAM_STAT_CHECK_CONDITION;
1514 		} else {
1515 			/* Assume good status */
1516 			scmd->result = (DID_OK << 16);
1517 		}
1518 		scmd->scsi_done(scmd);
1519 		return 0;
1520 	case READ_6:
1521 		if (ldev_info->state == MYRB_DEVICE_WO) {
1522 			/* Data protect, attempt to read invalid data */
1523 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1524 						DATA_PROTECT, 0x21, 0x06);
1525 			scmd->result = (DRIVER_SENSE << 24) |
1526 				SAM_STAT_CHECK_CONDITION;
1527 			scmd->scsi_done(scmd);
1528 			return 0;
1529 		}
1530 	case WRITE_6:
1531 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1532 		       (scmd->cmnd[2] << 8) |
1533 		       scmd->cmnd[3]);
1534 		block_cnt = scmd->cmnd[4];
1535 		break;
1536 	case READ_10:
1537 		if (ldev_info->state == MYRB_DEVICE_WO) {
1538 			/* Data protect, attempt to read invalid data */
1539 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1540 						DATA_PROTECT, 0x21, 0x06);
1541 			scmd->result = (DRIVER_SENSE << 24) |
1542 				SAM_STAT_CHECK_CONDITION;
1543 			scmd->scsi_done(scmd);
1544 			return 0;
1545 		}
1546 	case WRITE_10:
1547 	case VERIFY:		/* 0x2F */
1548 	case WRITE_VERIFY:	/* 0x2E */
1549 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1550 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1551 		break;
1552 	case READ_12:
1553 		if (ldev_info->state == MYRB_DEVICE_WO) {
1554 			/* Data protect, attempt to read invalid data */
1555 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1556 						DATA_PROTECT, 0x21, 0x06);
1557 			scmd->result = (DRIVER_SENSE << 24) |
1558 				SAM_STAT_CHECK_CONDITION;
1559 			scmd->scsi_done(scmd);
1560 			return 0;
1561 		}
1562 	case WRITE_12:
1563 	case VERIFY_12: /* 0xAF */
1564 	case WRITE_VERIFY_12:	/* 0xAE */
1565 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1566 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1567 		break;
1568 	default:
1569 		/* Illegal request, invalid opcode */
1570 		scsi_build_sense_buffer(0, scmd->sense_buffer,
1571 					ILLEGAL_REQUEST, 0x20, 0);
1572 		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1573 		scmd->scsi_done(scmd);
1574 		return 0;
1575 	}
1576 
1577 	myrb_reset_cmd(cmd_blk);
1578 	mbox->type5.id = scmd->request->tag + 3;
1579 	if (scmd->sc_data_direction == DMA_NONE)
1580 		goto submit;
1581 	nsge = scsi_dma_map(scmd);
1582 	if (nsge == 1) {
1583 		sgl = scsi_sglist(scmd);
1584 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1585 			mbox->type5.opcode = MYRB_CMD_READ;
1586 		else
1587 			mbox->type5.opcode = MYRB_CMD_WRITE;
1588 
1589 		mbox->type5.ld.xfer_len = block_cnt;
1590 		mbox->type5.ld.ldev_num = sdev->id;
1591 		mbox->type5.lba = lba;
1592 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1593 	} else {
1594 		struct myrb_sge *hw_sgl;
1595 		dma_addr_t hw_sgl_addr;
1596 		int i;
1597 
1598 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1599 		if (!hw_sgl)
1600 			return SCSI_MLQUEUE_HOST_BUSY;
1601 
1602 		cmd_blk->sgl = hw_sgl;
1603 		cmd_blk->sgl_addr = hw_sgl_addr;
1604 
1605 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1606 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1607 		else
1608 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1609 
1610 		mbox->type5.ld.xfer_len = block_cnt;
1611 		mbox->type5.ld.ldev_num = sdev->id;
1612 		mbox->type5.lba = lba;
1613 		mbox->type5.addr = hw_sgl_addr;
1614 		mbox->type5.sg_count = nsge;
1615 
1616 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1617 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1618 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1619 			hw_sgl++;
1620 		}
1621 	}
1622 submit:
1623 	spin_lock_irqsave(&cb->queue_lock, flags);
1624 	cb->qcmd(cb, cmd_blk);
1625 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1626 
1627 	return 0;
1628 }
1629 
1630 static int myrb_queuecommand(struct Scsi_Host *shost,
1631 		struct scsi_cmnd *scmd)
1632 {
1633 	struct scsi_device *sdev = scmd->device;
1634 
1635 	if (sdev->channel > myrb_logical_channel(shost)) {
1636 		scmd->result = (DID_BAD_TARGET << 16);
1637 		scmd->scsi_done(scmd);
1638 		return 0;
1639 	}
1640 	if (sdev->channel == myrb_logical_channel(shost))
1641 		return myrb_ldev_queuecommand(shost, scmd);
1642 
1643 	return myrb_pthru_queuecommand(shost, scmd);
1644 }
1645 
1646 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1647 {
1648 	struct myrb_hba *cb = shost_priv(sdev->host);
1649 	struct myrb_ldev_info *ldev_info;
1650 	unsigned short ldev_num = sdev->id;
1651 	enum raid_level level;
1652 
1653 	ldev_info = cb->ldev_info_buf + ldev_num;
1654 	if (!ldev_info)
1655 		return -ENXIO;
1656 
1657 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1658 	if (!sdev->hostdata)
1659 		return -ENOMEM;
1660 	dev_dbg(&sdev->sdev_gendev,
1661 		"slave alloc ldev %d state %x\n",
1662 		ldev_num, ldev_info->state);
1663 	memcpy(sdev->hostdata, ldev_info,
1664 	       sizeof(*ldev_info));
1665 	switch (ldev_info->raid_level) {
1666 	case MYRB_RAID_LEVEL0:
1667 		level = RAID_LEVEL_LINEAR;
1668 		break;
1669 	case MYRB_RAID_LEVEL1:
1670 		level = RAID_LEVEL_1;
1671 		break;
1672 	case MYRB_RAID_LEVEL3:
1673 		level = RAID_LEVEL_3;
1674 		break;
1675 	case MYRB_RAID_LEVEL5:
1676 		level = RAID_LEVEL_5;
1677 		break;
1678 	case MYRB_RAID_LEVEL6:
1679 		level = RAID_LEVEL_6;
1680 		break;
1681 	case MYRB_RAID_JBOD:
1682 		level = RAID_LEVEL_JBOD;
1683 		break;
1684 	default:
1685 		level = RAID_LEVEL_UNKNOWN;
1686 		break;
1687 	}
1688 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1689 	return 0;
1690 }
1691 
1692 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1693 {
1694 	struct myrb_hba *cb = shost_priv(sdev->host);
1695 	struct myrb_pdev_state *pdev_info;
1696 	unsigned short status;
1697 
1698 	if (sdev->id > MYRB_MAX_TARGETS)
1699 		return -ENXIO;
1700 
1701 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1702 	if (!pdev_info)
1703 		return -ENOMEM;
1704 
1705 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1706 				  sdev, pdev_info);
1707 	if (status != MYRB_STATUS_SUCCESS) {
1708 		dev_dbg(&sdev->sdev_gendev,
1709 			"Failed to get device state, status %x\n",
1710 			status);
1711 		kfree(pdev_info);
1712 		return -ENXIO;
1713 	}
1714 	if (!pdev_info->present) {
1715 		dev_dbg(&sdev->sdev_gendev,
1716 			"device not present, skip\n");
1717 		kfree(pdev_info);
1718 		return -ENXIO;
1719 	}
1720 	dev_dbg(&sdev->sdev_gendev,
1721 		"slave alloc pdev %d:%d state %x\n",
1722 		sdev->channel, sdev->id, pdev_info->state);
1723 	sdev->hostdata = pdev_info;
1724 
1725 	return 0;
1726 }
1727 
1728 static int myrb_slave_alloc(struct scsi_device *sdev)
1729 {
1730 	if (sdev->channel > myrb_logical_channel(sdev->host))
1731 		return -ENXIO;
1732 
1733 	if (sdev->lun > 0)
1734 		return -ENXIO;
1735 
1736 	if (sdev->channel == myrb_logical_channel(sdev->host))
1737 		return myrb_ldev_slave_alloc(sdev);
1738 
1739 	return myrb_pdev_slave_alloc(sdev);
1740 }
1741 
1742 static int myrb_slave_configure(struct scsi_device *sdev)
1743 {
1744 	struct myrb_ldev_info *ldev_info;
1745 
1746 	if (sdev->channel > myrb_logical_channel(sdev->host))
1747 		return -ENXIO;
1748 
1749 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1750 		sdev->no_uld_attach = 1;
1751 		return 0;
1752 	}
1753 	if (sdev->lun != 0)
1754 		return -ENXIO;
1755 
1756 	ldev_info = sdev->hostdata;
1757 	if (!ldev_info)
1758 		return -ENXIO;
1759 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1760 		sdev_printk(KERN_INFO, sdev,
1761 			    "Logical drive is %s\n",
1762 			    myrb_devstate_name(ldev_info->state));
1763 
1764 	sdev->tagged_supported = 1;
1765 	return 0;
1766 }
1767 
1768 static void myrb_slave_destroy(struct scsi_device *sdev)
1769 {
1770 	kfree(sdev->hostdata);
1771 }
1772 
1773 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1774 		sector_t capacity, int geom[])
1775 {
1776 	struct myrb_hba *cb = shost_priv(sdev->host);
1777 
1778 	geom[0] = cb->ldev_geom_heads;
1779 	geom[1] = cb->ldev_geom_sectors;
1780 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1781 
1782 	return 0;
1783 }
1784 
1785 static ssize_t raid_state_show(struct device *dev,
1786 		struct device_attribute *attr, char *buf)
1787 {
1788 	struct scsi_device *sdev = to_scsi_device(dev);
1789 	struct myrb_hba *cb = shost_priv(sdev->host);
1790 	int ret;
1791 
1792 	if (!sdev->hostdata)
1793 		return snprintf(buf, 16, "Unknown\n");
1794 
1795 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1796 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1797 		const char *name;
1798 
1799 		name = myrb_devstate_name(ldev_info->state);
1800 		if (name)
1801 			ret = snprintf(buf, 32, "%s\n", name);
1802 		else
1803 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1804 				       ldev_info->state);
1805 	} else {
1806 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1807 		unsigned short status;
1808 		const char *name;
1809 
1810 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1811 					  sdev, pdev_info);
1812 		if (status != MYRB_STATUS_SUCCESS)
1813 			sdev_printk(KERN_INFO, sdev,
1814 				    "Failed to get device state, status %x\n",
1815 				    status);
1816 
1817 		if (!pdev_info->present)
1818 			name = "Removed";
1819 		else
1820 			name = myrb_devstate_name(pdev_info->state);
1821 		if (name)
1822 			ret = snprintf(buf, 32, "%s\n", name);
1823 		else
1824 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1825 				       pdev_info->state);
1826 	}
1827 	return ret;
1828 }
1829 
1830 static ssize_t raid_state_store(struct device *dev,
1831 		struct device_attribute *attr, const char *buf, size_t count)
1832 {
1833 	struct scsi_device *sdev = to_scsi_device(dev);
1834 	struct myrb_hba *cb = shost_priv(sdev->host);
1835 	struct myrb_pdev_state *pdev_info;
1836 	enum myrb_devstate new_state;
1837 	unsigned short status;
1838 
1839 	if (!strncmp(buf, "kill", 4) ||
1840 	    !strncmp(buf, "offline", 7))
1841 		new_state = MYRB_DEVICE_DEAD;
1842 	else if (!strncmp(buf, "online", 6))
1843 		new_state = MYRB_DEVICE_ONLINE;
1844 	else if (!strncmp(buf, "standby", 7))
1845 		new_state = MYRB_DEVICE_STANDBY;
1846 	else
1847 		return -EINVAL;
1848 
1849 	pdev_info = sdev->hostdata;
1850 	if (!pdev_info) {
1851 		sdev_printk(KERN_INFO, sdev,
1852 			    "Failed - no physical device information\n");
1853 		return -ENXIO;
1854 	}
1855 	if (!pdev_info->present) {
1856 		sdev_printk(KERN_INFO, sdev,
1857 			    "Failed - device not present\n");
1858 		return -ENXIO;
1859 	}
1860 
1861 	if (pdev_info->state == new_state)
1862 		return count;
1863 
1864 	status = myrb_set_pdev_state(cb, sdev, new_state);
1865 	switch (status) {
1866 	case MYRB_STATUS_SUCCESS:
1867 		break;
1868 	case MYRB_STATUS_START_DEVICE_FAILED:
1869 		sdev_printk(KERN_INFO, sdev,
1870 			     "Failed - Unable to Start Device\n");
1871 		count = -EAGAIN;
1872 		break;
1873 	case MYRB_STATUS_NO_DEVICE:
1874 		sdev_printk(KERN_INFO, sdev,
1875 			    "Failed - No Device at Address\n");
1876 		count = -ENODEV;
1877 		break;
1878 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1879 		sdev_printk(KERN_INFO, sdev,
1880 			 "Failed - Invalid Channel or Target or Modifier\n");
1881 		count = -EINVAL;
1882 		break;
1883 	case MYRB_STATUS_CHANNEL_BUSY:
1884 		sdev_printk(KERN_INFO, sdev,
1885 			 "Failed - Channel Busy\n");
1886 		count = -EBUSY;
1887 		break;
1888 	default:
1889 		sdev_printk(KERN_INFO, sdev,
1890 			 "Failed - Unexpected Status %04X\n", status);
1891 		count = -EIO;
1892 		break;
1893 	}
1894 	return count;
1895 }
1896 static DEVICE_ATTR_RW(raid_state);
1897 
1898 static ssize_t raid_level_show(struct device *dev,
1899 		struct device_attribute *attr, char *buf)
1900 {
1901 	struct scsi_device *sdev = to_scsi_device(dev);
1902 
1903 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1904 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1905 		const char *name;
1906 
1907 		if (!ldev_info)
1908 			return -ENXIO;
1909 
1910 		name = myrb_raidlevel_name(ldev_info->raid_level);
1911 		if (!name)
1912 			return snprintf(buf, 32, "Invalid (%02X)\n",
1913 					ldev_info->state);
1914 		return snprintf(buf, 32, "%s\n", name);
1915 	}
1916 	return snprintf(buf, 32, "Physical Drive\n");
1917 }
1918 static DEVICE_ATTR_RO(raid_level);
1919 
1920 static ssize_t rebuild_show(struct device *dev,
1921 		struct device_attribute *attr, char *buf)
1922 {
1923 	struct scsi_device *sdev = to_scsi_device(dev);
1924 	struct myrb_hba *cb = shost_priv(sdev->host);
1925 	struct myrb_rbld_progress rbld_buf;
1926 	unsigned char status;
1927 
1928 	if (sdev->channel < myrb_logical_channel(sdev->host))
1929 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1930 
1931 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1932 
1933 	if (rbld_buf.ldev_num != sdev->id ||
1934 	    status != MYRB_STATUS_SUCCESS)
1935 		return snprintf(buf, 32, "not rebuilding\n");
1936 
1937 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1938 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1939 			rbld_buf.ldev_size);
1940 }
1941 
1942 static ssize_t rebuild_store(struct device *dev,
1943 		struct device_attribute *attr, const char *buf, size_t count)
1944 {
1945 	struct scsi_device *sdev = to_scsi_device(dev);
1946 	struct myrb_hba *cb = shost_priv(sdev->host);
1947 	struct myrb_cmdblk *cmd_blk;
1948 	union myrb_cmd_mbox *mbox;
1949 	unsigned short status;
1950 	int rc, start;
1951 	const char *msg;
1952 
1953 	rc = kstrtoint(buf, 0, &start);
1954 	if (rc)
1955 		return rc;
1956 
1957 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1958 		return -ENXIO;
1959 
1960 	status = myrb_get_rbld_progress(cb, NULL);
1961 	if (start) {
1962 		if (status == MYRB_STATUS_SUCCESS) {
1963 			sdev_printk(KERN_INFO, sdev,
1964 				    "Rebuild Not Initiated; already in progress\n");
1965 			return -EALREADY;
1966 		}
1967 		mutex_lock(&cb->dcmd_mutex);
1968 		cmd_blk = &cb->dcmd_blk;
1969 		myrb_reset_cmd(cmd_blk);
1970 		mbox = &cmd_blk->mbox;
1971 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1972 		mbox->type3D.id = MYRB_DCMD_TAG;
1973 		mbox->type3D.channel = sdev->channel;
1974 		mbox->type3D.target = sdev->id;
1975 		status = myrb_exec_cmd(cb, cmd_blk);
1976 		mutex_unlock(&cb->dcmd_mutex);
1977 	} else {
1978 		struct pci_dev *pdev = cb->pdev;
1979 		unsigned char *rate;
1980 		dma_addr_t rate_addr;
1981 
1982 		if (status != MYRB_STATUS_SUCCESS) {
1983 			sdev_printk(KERN_INFO, sdev,
1984 				    "Rebuild Not Cancelled; not in progress\n");
1985 			return 0;
1986 		}
1987 
1988 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1989 					  &rate_addr, GFP_KERNEL);
1990 		if (rate == NULL) {
1991 			sdev_printk(KERN_INFO, sdev,
1992 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1993 			return -ENOMEM;
1994 		}
1995 		mutex_lock(&cb->dcmd_mutex);
1996 		cmd_blk = &cb->dcmd_blk;
1997 		myrb_reset_cmd(cmd_blk);
1998 		mbox = &cmd_blk->mbox;
1999 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2000 		mbox->type3R.id = MYRB_DCMD_TAG;
2001 		mbox->type3R.rbld_rate = 0xFF;
2002 		mbox->type3R.addr = rate_addr;
2003 		status = myrb_exec_cmd(cb, cmd_blk);
2004 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2005 		mutex_unlock(&cb->dcmd_mutex);
2006 	}
2007 	if (status == MYRB_STATUS_SUCCESS) {
2008 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2009 			    start ? "Initiated" : "Cancelled");
2010 		return count;
2011 	}
2012 	if (!start) {
2013 		sdev_printk(KERN_INFO, sdev,
2014 			    "Rebuild Not Cancelled, status 0x%x\n",
2015 			    status);
2016 		return -EIO;
2017 	}
2018 
2019 	switch (status) {
2020 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2021 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
2022 		break;
2023 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2024 		msg = "New Disk Failed During Rebuild";
2025 		break;
2026 	case MYRB_STATUS_INVALID_ADDRESS:
2027 		msg = "Invalid Device Address";
2028 		break;
2029 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2030 		msg = "Already in Progress";
2031 		break;
2032 	default:
2033 		msg = NULL;
2034 		break;
2035 	}
2036 	if (msg)
2037 		sdev_printk(KERN_INFO, sdev,
2038 			    "Rebuild Failed - %s\n", msg);
2039 	else
2040 		sdev_printk(KERN_INFO, sdev,
2041 			    "Rebuild Failed, status 0x%x\n", status);
2042 
2043 	return -EIO;
2044 }
2045 static DEVICE_ATTR_RW(rebuild);
2046 
2047 static ssize_t consistency_check_store(struct device *dev,
2048 		struct device_attribute *attr, const char *buf, size_t count)
2049 {
2050 	struct scsi_device *sdev = to_scsi_device(dev);
2051 	struct myrb_hba *cb = shost_priv(sdev->host);
2052 	struct myrb_rbld_progress rbld_buf;
2053 	struct myrb_cmdblk *cmd_blk;
2054 	union myrb_cmd_mbox *mbox;
2055 	unsigned short ldev_num = 0xFFFF;
2056 	unsigned short status;
2057 	int rc, start;
2058 	const char *msg;
2059 
2060 	rc = kstrtoint(buf, 0, &start);
2061 	if (rc)
2062 		return rc;
2063 
2064 	if (sdev->channel < myrb_logical_channel(sdev->host))
2065 		return -ENXIO;
2066 
2067 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2068 	if (start) {
2069 		if (status == MYRB_STATUS_SUCCESS) {
2070 			sdev_printk(KERN_INFO, sdev,
2071 				    "Check Consistency Not Initiated; already in progress\n");
2072 			return -EALREADY;
2073 		}
2074 		mutex_lock(&cb->dcmd_mutex);
2075 		cmd_blk = &cb->dcmd_blk;
2076 		myrb_reset_cmd(cmd_blk);
2077 		mbox = &cmd_blk->mbox;
2078 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2079 		mbox->type3C.id = MYRB_DCMD_TAG;
2080 		mbox->type3C.ldev_num = sdev->id;
2081 		mbox->type3C.auto_restore = true;
2082 
2083 		status = myrb_exec_cmd(cb, cmd_blk);
2084 		mutex_unlock(&cb->dcmd_mutex);
2085 	} else {
2086 		struct pci_dev *pdev = cb->pdev;
2087 		unsigned char *rate;
2088 		dma_addr_t rate_addr;
2089 
2090 		if (ldev_num != sdev->id) {
2091 			sdev_printk(KERN_INFO, sdev,
2092 				    "Check Consistency Not Cancelled; not in progress\n");
2093 			return 0;
2094 		}
2095 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2096 					  &rate_addr, GFP_KERNEL);
2097 		if (rate == NULL) {
2098 			sdev_printk(KERN_INFO, sdev,
2099 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2100 			return -ENOMEM;
2101 		}
2102 		mutex_lock(&cb->dcmd_mutex);
2103 		cmd_blk = &cb->dcmd_blk;
2104 		myrb_reset_cmd(cmd_blk);
2105 		mbox = &cmd_blk->mbox;
2106 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2107 		mbox->type3R.id = MYRB_DCMD_TAG;
2108 		mbox->type3R.rbld_rate = 0xFF;
2109 		mbox->type3R.addr = rate_addr;
2110 		status = myrb_exec_cmd(cb, cmd_blk);
2111 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2112 		mutex_unlock(&cb->dcmd_mutex);
2113 	}
2114 	if (status == MYRB_STATUS_SUCCESS) {
2115 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2116 			    start ? "Initiated" : "Cancelled");
2117 		return count;
2118 	}
2119 	if (!start) {
2120 		sdev_printk(KERN_INFO, sdev,
2121 			    "Check Consistency Not Cancelled, status 0x%x\n",
2122 			    status);
2123 		return -EIO;
2124 	}
2125 
2126 	switch (status) {
2127 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2128 		msg = "Dependent Physical Device is DEAD";
2129 		break;
2130 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2131 		msg = "New Disk Failed During Rebuild";
2132 		break;
2133 	case MYRB_STATUS_INVALID_ADDRESS:
2134 		msg = "Invalid or Nonredundant Logical Drive";
2135 		break;
2136 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2137 		msg = "Already in Progress";
2138 		break;
2139 	default:
2140 		msg = NULL;
2141 		break;
2142 	}
2143 	if (msg)
2144 		sdev_printk(KERN_INFO, sdev,
2145 			    "Check Consistency Failed - %s\n", msg);
2146 	else
2147 		sdev_printk(KERN_INFO, sdev,
2148 			    "Check Consistency Failed, status 0x%x\n", status);
2149 
2150 	return -EIO;
2151 }
2152 
2153 static ssize_t consistency_check_show(struct device *dev,
2154 		struct device_attribute *attr, char *buf)
2155 {
2156 	return rebuild_show(dev, attr, buf);
2157 }
2158 static DEVICE_ATTR_RW(consistency_check);
2159 
2160 static ssize_t ctlr_num_show(struct device *dev,
2161 		struct device_attribute *attr, char *buf)
2162 {
2163 	struct Scsi_Host *shost = class_to_shost(dev);
2164 	struct myrb_hba *cb = shost_priv(shost);
2165 
2166 	return snprintf(buf, 20, "%d\n", cb->ctlr_num);
2167 }
2168 static DEVICE_ATTR_RO(ctlr_num);
2169 
2170 static ssize_t firmware_show(struct device *dev,
2171 		struct device_attribute *attr, char *buf)
2172 {
2173 	struct Scsi_Host *shost = class_to_shost(dev);
2174 	struct myrb_hba *cb = shost_priv(shost);
2175 
2176 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2177 }
2178 static DEVICE_ATTR_RO(firmware);
2179 
2180 static ssize_t model_show(struct device *dev,
2181 		struct device_attribute *attr, char *buf)
2182 {
2183 	struct Scsi_Host *shost = class_to_shost(dev);
2184 	struct myrb_hba *cb = shost_priv(shost);
2185 
2186 	return snprintf(buf, 16, "%s\n", cb->model_name);
2187 }
2188 static DEVICE_ATTR_RO(model);
2189 
2190 static ssize_t flush_cache_store(struct device *dev,
2191 		struct device_attribute *attr, const char *buf, size_t count)
2192 {
2193 	struct Scsi_Host *shost = class_to_shost(dev);
2194 	struct myrb_hba *cb = shost_priv(shost);
2195 	unsigned short status;
2196 
2197 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2198 	if (status == MYRB_STATUS_SUCCESS) {
2199 		shost_printk(KERN_INFO, shost,
2200 			     "Cache Flush Completed\n");
2201 		return count;
2202 	}
2203 	shost_printk(KERN_INFO, shost,
2204 		     "Cache Flush Failed, status %x\n", status);
2205 	return -EIO;
2206 }
2207 static DEVICE_ATTR_WO(flush_cache);
2208 
2209 static struct device_attribute *myrb_sdev_attrs[] = {
2210 	&dev_attr_rebuild,
2211 	&dev_attr_consistency_check,
2212 	&dev_attr_raid_state,
2213 	&dev_attr_raid_level,
2214 	NULL,
2215 };
2216 
2217 static struct device_attribute *myrb_shost_attrs[] = {
2218 	&dev_attr_ctlr_num,
2219 	&dev_attr_model,
2220 	&dev_attr_firmware,
2221 	&dev_attr_flush_cache,
2222 	NULL,
2223 };
2224 
2225 struct scsi_host_template myrb_template = {
2226 	.module			= THIS_MODULE,
2227 	.name			= "DAC960",
2228 	.proc_name		= "myrb",
2229 	.queuecommand		= myrb_queuecommand,
2230 	.eh_host_reset_handler	= myrb_host_reset,
2231 	.slave_alloc		= myrb_slave_alloc,
2232 	.slave_configure	= myrb_slave_configure,
2233 	.slave_destroy		= myrb_slave_destroy,
2234 	.bios_param		= myrb_biosparam,
2235 	.cmd_size		= sizeof(struct myrb_cmdblk),
2236 	.shost_attrs		= myrb_shost_attrs,
2237 	.sdev_attrs		= myrb_sdev_attrs,
2238 	.this_id		= -1,
2239 };
2240 
2241 /**
2242  * myrb_is_raid - return boolean indicating device is raid volume
2243  * @dev the device struct object
2244  */
2245 static int myrb_is_raid(struct device *dev)
2246 {
2247 	struct scsi_device *sdev = to_scsi_device(dev);
2248 
2249 	return sdev->channel == myrb_logical_channel(sdev->host);
2250 }
2251 
2252 /**
2253  * myrb_get_resync - get raid volume resync percent complete
2254  * @dev the device struct object
2255  */
2256 static void myrb_get_resync(struct device *dev)
2257 {
2258 	struct scsi_device *sdev = to_scsi_device(dev);
2259 	struct myrb_hba *cb = shost_priv(sdev->host);
2260 	struct myrb_rbld_progress rbld_buf;
2261 	unsigned int percent_complete = 0;
2262 	unsigned short status;
2263 	unsigned int ldev_size = 0, remaining = 0;
2264 
2265 	if (sdev->channel < myrb_logical_channel(sdev->host))
2266 		return;
2267 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2268 	if (status == MYRB_STATUS_SUCCESS) {
2269 		if (rbld_buf.ldev_num == sdev->id) {
2270 			ldev_size = rbld_buf.ldev_size;
2271 			remaining = rbld_buf.blocks_left;
2272 		}
2273 	}
2274 	if (remaining && ldev_size)
2275 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2276 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2277 }
2278 
2279 /**
2280  * myrb_get_state - get raid volume status
2281  * @dev the device struct object
2282  */
2283 static void myrb_get_state(struct device *dev)
2284 {
2285 	struct scsi_device *sdev = to_scsi_device(dev);
2286 	struct myrb_hba *cb = shost_priv(sdev->host);
2287 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2288 	enum raid_state state = RAID_STATE_UNKNOWN;
2289 	unsigned short status;
2290 
2291 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2292 		state = RAID_STATE_UNKNOWN;
2293 	else {
2294 		status = myrb_get_rbld_progress(cb, NULL);
2295 		if (status == MYRB_STATUS_SUCCESS)
2296 			state = RAID_STATE_RESYNCING;
2297 		else {
2298 			switch (ldev_info->state) {
2299 			case MYRB_DEVICE_ONLINE:
2300 				state = RAID_STATE_ACTIVE;
2301 				break;
2302 			case MYRB_DEVICE_WO:
2303 			case MYRB_DEVICE_CRITICAL:
2304 				state = RAID_STATE_DEGRADED;
2305 				break;
2306 			default:
2307 				state = RAID_STATE_OFFLINE;
2308 			}
2309 		}
2310 	}
2311 	raid_set_state(myrb_raid_template, dev, state);
2312 }
2313 
2314 struct raid_function_template myrb_raid_functions = {
2315 	.cookie		= &myrb_template,
2316 	.is_raid	= myrb_is_raid,
2317 	.get_resync	= myrb_get_resync,
2318 	.get_state	= myrb_get_state,
2319 };
2320 
2321 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2322 		struct scsi_cmnd *scmd)
2323 {
2324 	unsigned short status;
2325 
2326 	if (!cmd_blk)
2327 		return;
2328 
2329 	scsi_dma_unmap(scmd);
2330 
2331 	if (cmd_blk->dcdb) {
2332 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2333 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2334 			      cmd_blk->dcdb_addr);
2335 		cmd_blk->dcdb = NULL;
2336 	}
2337 	if (cmd_blk->sgl) {
2338 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2339 		cmd_blk->sgl = NULL;
2340 		cmd_blk->sgl_addr = 0;
2341 	}
2342 	status = cmd_blk->status;
2343 	switch (status) {
2344 	case MYRB_STATUS_SUCCESS:
2345 	case MYRB_STATUS_DEVICE_BUSY:
2346 		scmd->result = (DID_OK << 16) | status;
2347 		break;
2348 	case MYRB_STATUS_BAD_DATA:
2349 		dev_dbg(&scmd->device->sdev_gendev,
2350 			"Bad Data Encountered\n");
2351 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2352 			/* Unrecovered read error */
2353 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2354 						MEDIUM_ERROR, 0x11, 0);
2355 		else
2356 			/* Write error */
2357 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2358 						MEDIUM_ERROR, 0x0C, 0);
2359 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2360 		break;
2361 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2362 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2363 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2364 			/* Unrecovered read error, auto-reallocation failed */
2365 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2366 						MEDIUM_ERROR, 0x11, 0x04);
2367 		else
2368 			/* Write error, auto-reallocation failed */
2369 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2370 						MEDIUM_ERROR, 0x0C, 0x02);
2371 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2372 		break;
2373 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2374 		dev_dbg(&scmd->device->sdev_gendev,
2375 			    "Logical Drive Nonexistent or Offline");
2376 		scmd->result = (DID_BAD_TARGET << 16);
2377 		break;
2378 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2379 		dev_dbg(&scmd->device->sdev_gendev,
2380 			    "Attempt to Access Beyond End of Logical Drive");
2381 		/* Logical block address out of range */
2382 		scsi_build_sense_buffer(0, scmd->sense_buffer,
2383 					NOT_READY, 0x21, 0);
2384 		break;
2385 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2386 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2387 		scmd->result = (DID_BAD_TARGET << 16);
2388 		break;
2389 	default:
2390 		scmd_printk(KERN_ERR, scmd,
2391 			    "Unexpected Error Status %04X", status);
2392 		scmd->result = (DID_ERROR << 16);
2393 		break;
2394 	}
2395 	scmd->scsi_done(scmd);
2396 }
2397 
2398 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2399 {
2400 	if (!cmd_blk)
2401 		return;
2402 
2403 	if (cmd_blk->completion) {
2404 		complete(cmd_blk->completion);
2405 		cmd_blk->completion = NULL;
2406 	}
2407 }
2408 
2409 static void myrb_monitor(struct work_struct *work)
2410 {
2411 	struct myrb_hba *cb = container_of(work,
2412 			struct myrb_hba, monitor_work.work);
2413 	struct Scsi_Host *shost = cb->host;
2414 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2415 
2416 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2417 
2418 	if (cb->new_ev_seq > cb->old_ev_seq) {
2419 		int event = cb->old_ev_seq;
2420 
2421 		dev_dbg(&shost->shost_gendev,
2422 			"get event log no %d/%d\n",
2423 			cb->new_ev_seq, event);
2424 		myrb_get_event(cb, event);
2425 		cb->old_ev_seq = event + 1;
2426 		interval = 10;
2427 	} else if (cb->need_err_info) {
2428 		cb->need_err_info = false;
2429 		dev_dbg(&shost->shost_gendev, "get error table\n");
2430 		myrb_get_errtable(cb);
2431 		interval = 10;
2432 	} else if (cb->need_rbld && cb->rbld_first) {
2433 		cb->need_rbld = false;
2434 		dev_dbg(&shost->shost_gendev,
2435 			"get rebuild progress\n");
2436 		myrb_update_rbld_progress(cb);
2437 		interval = 10;
2438 	} else if (cb->need_ldev_info) {
2439 		cb->need_ldev_info = false;
2440 		dev_dbg(&shost->shost_gendev,
2441 			"get logical drive info\n");
2442 		myrb_get_ldev_info(cb);
2443 		interval = 10;
2444 	} else if (cb->need_rbld) {
2445 		cb->need_rbld = false;
2446 		dev_dbg(&shost->shost_gendev,
2447 			"get rebuild progress\n");
2448 		myrb_update_rbld_progress(cb);
2449 		interval = 10;
2450 	} else if (cb->need_cc_status) {
2451 		cb->need_cc_status = false;
2452 		dev_dbg(&shost->shost_gendev,
2453 			"get consistency check progress\n");
2454 		myrb_get_cc_progress(cb);
2455 		interval = 10;
2456 	} else if (cb->need_bgi_status) {
2457 		cb->need_bgi_status = false;
2458 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2459 		myrb_bgi_control(cb);
2460 		interval = 10;
2461 	} else {
2462 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2463 		mutex_lock(&cb->dma_mutex);
2464 		myrb_hba_enquiry(cb);
2465 		mutex_unlock(&cb->dma_mutex);
2466 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2467 		    cb->need_err_info || cb->need_rbld ||
2468 		    cb->need_ldev_info || cb->need_cc_status ||
2469 		    cb->need_bgi_status) {
2470 			dev_dbg(&shost->shost_gendev,
2471 				"reschedule monitor\n");
2472 			interval = 0;
2473 		}
2474 	}
2475 	if (interval > 1)
2476 		cb->primary_monitor_time = jiffies;
2477 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2478 }
2479 
2480 /**
2481  * myrb_err_status - reports controller BIOS messages
2482  *
2483  * Controller BIOS messages are passed through the Error Status Register
2484  * when the driver performs the BIOS handshaking.
2485  *
2486  * Return: true for fatal errors and false otherwise.
2487  */
2488 bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2489 		unsigned char parm0, unsigned char parm1)
2490 {
2491 	struct pci_dev *pdev = cb->pdev;
2492 
2493 	switch (error) {
2494 	case 0x00:
2495 		dev_info(&pdev->dev,
2496 			 "Physical Device %d:%d Not Responding\n",
2497 			 parm1, parm0);
2498 		break;
2499 	case 0x08:
2500 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2501 		break;
2502 	case 0x30:
2503 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2504 		break;
2505 	case 0x60:
2506 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2507 		break;
2508 	case 0x70:
2509 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2510 		break;
2511 	case 0x90:
2512 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2513 			   parm1, parm0);
2514 		break;
2515 	case 0xA0:
2516 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2517 		break;
2518 	case 0xB0:
2519 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2520 		break;
2521 	case 0xD0:
2522 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2523 		break;
2524 	case 0xF0:
2525 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2526 		return true;
2527 	default:
2528 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2529 			error);
2530 		return true;
2531 	}
2532 	return false;
2533 }
2534 
2535 /*
2536  * Hardware-specific functions
2537  */
2538 
2539 /*
2540  * DAC960 LA Series Controllers
2541  */
2542 
2543 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2544 {
2545 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2546 }
2547 
2548 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2549 {
2550 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2551 }
2552 
2553 static inline void DAC960_LA_gen_intr(void __iomem *base)
2554 {
2555 	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2556 }
2557 
2558 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2559 {
2560 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2561 }
2562 
2563 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2564 {
2565 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2566 }
2567 
2568 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2569 {
2570 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2571 
2572 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2573 }
2574 
2575 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2576 {
2577 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2578 
2579 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2580 }
2581 
2582 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2583 {
2584 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2585 }
2586 
2587 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2588 {
2589 	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2590 }
2591 
2592 static inline void DAC960_LA_ack_intr(void __iomem *base)
2593 {
2594 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2595 	       base + DAC960_LA_ODB_OFFSET);
2596 }
2597 
2598 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2599 {
2600 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2601 
2602 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2603 }
2604 
2605 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2606 {
2607 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2608 
2609 	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2610 }
2611 
2612 static inline void DAC960_LA_enable_intr(void __iomem *base)
2613 {
2614 	unsigned char odb = 0xFF;
2615 
2616 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2617 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2618 }
2619 
2620 static inline void DAC960_LA_disable_intr(void __iomem *base)
2621 {
2622 	unsigned char odb = 0xFF;
2623 
2624 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2625 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2626 }
2627 
2628 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2629 {
2630 	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2631 
2632 	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2633 }
2634 
2635 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2636 		union myrb_cmd_mbox *mbox)
2637 {
2638 	mem_mbox->words[1] = mbox->words[1];
2639 	mem_mbox->words[2] = mbox->words[2];
2640 	mem_mbox->words[3] = mbox->words[3];
2641 	/* Memory barrier to prevent reordering */
2642 	wmb();
2643 	mem_mbox->words[0] = mbox->words[0];
2644 	/* Memory barrier to force PCI access */
2645 	mb();
2646 }
2647 
2648 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2649 		union myrb_cmd_mbox *mbox)
2650 {
2651 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2652 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2653 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2654 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2655 }
2656 
2657 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2658 {
2659 	return readb(base + DAC960_LA_STSID_OFFSET);
2660 }
2661 
2662 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2663 {
2664 	return readw(base + DAC960_LA_STS_OFFSET);
2665 }
2666 
2667 static inline bool
2668 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2669 		unsigned char *param0, unsigned char *param1)
2670 {
2671 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2672 
2673 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2674 		return false;
2675 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2676 
2677 	*error = errsts;
2678 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2679 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2680 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2681 	return true;
2682 }
2683 
2684 static inline unsigned short
2685 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2686 		union myrb_cmd_mbox *mbox)
2687 {
2688 	unsigned short status;
2689 	int timeout = 0;
2690 
2691 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2692 		if (!DAC960_LA_hw_mbox_is_full(base))
2693 			break;
2694 		udelay(10);
2695 		timeout++;
2696 	}
2697 	if (DAC960_LA_hw_mbox_is_full(base)) {
2698 		dev_err(&pdev->dev,
2699 			"Timeout waiting for empty mailbox\n");
2700 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2701 	}
2702 	DAC960_LA_write_hw_mbox(base, mbox);
2703 	DAC960_LA_hw_mbox_new_cmd(base);
2704 	timeout = 0;
2705 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2706 		if (DAC960_LA_hw_mbox_status_available(base))
2707 			break;
2708 		udelay(10);
2709 		timeout++;
2710 	}
2711 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2712 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2713 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2714 	}
2715 	status = DAC960_LA_read_status(base);
2716 	DAC960_LA_ack_hw_mbox_intr(base);
2717 	DAC960_LA_ack_hw_mbox_status(base);
2718 
2719 	return status;
2720 }
2721 
2722 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2723 		struct myrb_hba *cb, void __iomem *base)
2724 {
2725 	int timeout = 0;
2726 	unsigned char error, parm0, parm1;
2727 
2728 	DAC960_LA_disable_intr(base);
2729 	DAC960_LA_ack_hw_mbox_status(base);
2730 	udelay(1000);
2731 	timeout = 0;
2732 	while (DAC960_LA_init_in_progress(base) &&
2733 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2734 		if (DAC960_LA_read_error_status(base, &error,
2735 					      &parm0, &parm1) &&
2736 		    myrb_err_status(cb, error, parm0, parm1))
2737 			return -ENODEV;
2738 		udelay(10);
2739 		timeout++;
2740 	}
2741 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2742 		dev_err(&pdev->dev,
2743 			"Timeout waiting for Controller Initialisation\n");
2744 		return -ETIMEDOUT;
2745 	}
2746 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2747 		dev_err(&pdev->dev,
2748 			"Unable to Enable Memory Mailbox Interface\n");
2749 		DAC960_LA_reset_ctrl(base);
2750 		return -ENODEV;
2751 	}
2752 	DAC960_LA_enable_intr(base);
2753 	cb->qcmd = myrb_qcmd;
2754 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2755 	if (cb->dual_mode_interface)
2756 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2757 	else
2758 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2759 	cb->disable_intr = DAC960_LA_disable_intr;
2760 	cb->reset = DAC960_LA_reset_ctrl;
2761 
2762 	return 0;
2763 }
2764 
2765 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2766 {
2767 	struct myrb_hba *cb = arg;
2768 	void __iomem *base = cb->io_base;
2769 	struct myrb_stat_mbox *next_stat_mbox;
2770 	unsigned long flags;
2771 
2772 	spin_lock_irqsave(&cb->queue_lock, flags);
2773 	DAC960_LA_ack_intr(base);
2774 	next_stat_mbox = cb->next_stat_mbox;
2775 	while (next_stat_mbox->valid) {
2776 		unsigned char id = next_stat_mbox->id;
2777 		struct scsi_cmnd *scmd = NULL;
2778 		struct myrb_cmdblk *cmd_blk = NULL;
2779 
2780 		if (id == MYRB_DCMD_TAG)
2781 			cmd_blk = &cb->dcmd_blk;
2782 		else if (id == MYRB_MCMD_TAG)
2783 			cmd_blk = &cb->mcmd_blk;
2784 		else {
2785 			scmd = scsi_host_find_tag(cb->host, id - 3);
2786 			if (scmd)
2787 				cmd_blk = scsi_cmd_priv(scmd);
2788 		}
2789 		if (cmd_blk)
2790 			cmd_blk->status = next_stat_mbox->status;
2791 		else
2792 			dev_err(&cb->pdev->dev,
2793 				"Unhandled command completion %d\n", id);
2794 
2795 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2796 		if (++next_stat_mbox > cb->last_stat_mbox)
2797 			next_stat_mbox = cb->first_stat_mbox;
2798 
2799 		if (cmd_blk) {
2800 			if (id < 3)
2801 				myrb_handle_cmdblk(cb, cmd_blk);
2802 			else
2803 				myrb_handle_scsi(cb, cmd_blk, scmd);
2804 		}
2805 	}
2806 	cb->next_stat_mbox = next_stat_mbox;
2807 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2808 	return IRQ_HANDLED;
2809 }
2810 
2811 struct myrb_privdata DAC960_LA_privdata = {
2812 	.hw_init =	DAC960_LA_hw_init,
2813 	.irq_handler =	DAC960_LA_intr_handler,
2814 	.mmio_size =	DAC960_LA_mmio_size,
2815 };
2816 
2817 /*
2818  * DAC960 PG Series Controllers
2819  */
2820 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2821 {
2822 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2823 }
2824 
2825 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2826 {
2827 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2828 }
2829 
2830 static inline void DAC960_PG_gen_intr(void __iomem *base)
2831 {
2832 	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2833 }
2834 
2835 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2836 {
2837 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2838 }
2839 
2840 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2841 {
2842 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2843 }
2844 
2845 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2846 {
2847 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2848 
2849 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2850 }
2851 
2852 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2853 {
2854 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2855 
2856 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2857 }
2858 
2859 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2860 {
2861 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2862 }
2863 
2864 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2865 {
2866 	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2867 }
2868 
2869 static inline void DAC960_PG_ack_intr(void __iomem *base)
2870 {
2871 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2872 	       base + DAC960_PG_ODB_OFFSET);
2873 }
2874 
2875 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2876 {
2877 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2878 
2879 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2880 }
2881 
2882 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2883 {
2884 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2885 
2886 	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2887 }
2888 
2889 static inline void DAC960_PG_enable_intr(void __iomem *base)
2890 {
2891 	unsigned int imask = (unsigned int)-1;
2892 
2893 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2894 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2895 }
2896 
2897 static inline void DAC960_PG_disable_intr(void __iomem *base)
2898 {
2899 	unsigned int imask = (unsigned int)-1;
2900 
2901 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2902 }
2903 
2904 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2905 {
2906 	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2907 
2908 	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2909 }
2910 
2911 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2912 		union myrb_cmd_mbox *mbox)
2913 {
2914 	mem_mbox->words[1] = mbox->words[1];
2915 	mem_mbox->words[2] = mbox->words[2];
2916 	mem_mbox->words[3] = mbox->words[3];
2917 	/* Memory barrier to prevent reordering */
2918 	wmb();
2919 	mem_mbox->words[0] = mbox->words[0];
2920 	/* Memory barrier to force PCI access */
2921 	mb();
2922 }
2923 
2924 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2925 		union myrb_cmd_mbox *mbox)
2926 {
2927 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2928 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2929 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2930 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2931 }
2932 
2933 static inline unsigned char
2934 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2935 {
2936 	return readb(base + DAC960_PG_STSID_OFFSET);
2937 }
2938 
2939 static inline unsigned short
2940 DAC960_PG_read_status(void __iomem *base)
2941 {
2942 	return readw(base + DAC960_PG_STS_OFFSET);
2943 }
2944 
2945 static inline bool
2946 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2947 		unsigned char *param0, unsigned char *param1)
2948 {
2949 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2950 
2951 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2952 		return false;
2953 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2954 	*error = errsts;
2955 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2956 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2957 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2958 	return true;
2959 }
2960 
2961 static inline unsigned short
2962 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2963 		union myrb_cmd_mbox *mbox)
2964 {
2965 	unsigned short status;
2966 	int timeout = 0;
2967 
2968 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2969 		if (!DAC960_PG_hw_mbox_is_full(base))
2970 			break;
2971 		udelay(10);
2972 		timeout++;
2973 	}
2974 	if (DAC960_PG_hw_mbox_is_full(base)) {
2975 		dev_err(&pdev->dev,
2976 			"Timeout waiting for empty mailbox\n");
2977 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2978 	}
2979 	DAC960_PG_write_hw_mbox(base, mbox);
2980 	DAC960_PG_hw_mbox_new_cmd(base);
2981 
2982 	timeout = 0;
2983 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2984 		if (DAC960_PG_hw_mbox_status_available(base))
2985 			break;
2986 		udelay(10);
2987 		timeout++;
2988 	}
2989 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2990 		dev_err(&pdev->dev,
2991 			"Timeout waiting for mailbox status\n");
2992 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2993 	}
2994 	status = DAC960_PG_read_status(base);
2995 	DAC960_PG_ack_hw_mbox_intr(base);
2996 	DAC960_PG_ack_hw_mbox_status(base);
2997 
2998 	return status;
2999 }
3000 
3001 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3002 		struct myrb_hba *cb, void __iomem *base)
3003 {
3004 	int timeout = 0;
3005 	unsigned char error, parm0, parm1;
3006 
3007 	DAC960_PG_disable_intr(base);
3008 	DAC960_PG_ack_hw_mbox_status(base);
3009 	udelay(1000);
3010 	while (DAC960_PG_init_in_progress(base) &&
3011 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3012 		if (DAC960_PG_read_error_status(base, &error,
3013 						&parm0, &parm1) &&
3014 		    myrb_err_status(cb, error, parm0, parm1))
3015 			return -EIO;
3016 		udelay(10);
3017 		timeout++;
3018 	}
3019 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3020 		dev_err(&pdev->dev,
3021 			"Timeout waiting for Controller Initialisation\n");
3022 		return -ETIMEDOUT;
3023 	}
3024 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3025 		dev_err(&pdev->dev,
3026 			"Unable to Enable Memory Mailbox Interface\n");
3027 		DAC960_PG_reset_ctrl(base);
3028 		return -ENODEV;
3029 	}
3030 	DAC960_PG_enable_intr(base);
3031 	cb->qcmd = myrb_qcmd;
3032 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3033 	if (cb->dual_mode_interface)
3034 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3035 	else
3036 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3037 	cb->disable_intr = DAC960_PG_disable_intr;
3038 	cb->reset = DAC960_PG_reset_ctrl;
3039 
3040 	return 0;
3041 }
3042 
3043 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3044 {
3045 	struct myrb_hba *cb = arg;
3046 	void __iomem *base = cb->io_base;
3047 	struct myrb_stat_mbox *next_stat_mbox;
3048 	unsigned long flags;
3049 
3050 	spin_lock_irqsave(&cb->queue_lock, flags);
3051 	DAC960_PG_ack_intr(base);
3052 	next_stat_mbox = cb->next_stat_mbox;
3053 	while (next_stat_mbox->valid) {
3054 		unsigned char id = next_stat_mbox->id;
3055 		struct scsi_cmnd *scmd = NULL;
3056 		struct myrb_cmdblk *cmd_blk = NULL;
3057 
3058 		if (id == MYRB_DCMD_TAG)
3059 			cmd_blk = &cb->dcmd_blk;
3060 		else if (id == MYRB_MCMD_TAG)
3061 			cmd_blk = &cb->mcmd_blk;
3062 		else {
3063 			scmd = scsi_host_find_tag(cb->host, id - 3);
3064 			if (scmd)
3065 				cmd_blk = scsi_cmd_priv(scmd);
3066 		}
3067 		if (cmd_blk)
3068 			cmd_blk->status = next_stat_mbox->status;
3069 		else
3070 			dev_err(&cb->pdev->dev,
3071 				"Unhandled command completion %d\n", id);
3072 
3073 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3074 		if (++next_stat_mbox > cb->last_stat_mbox)
3075 			next_stat_mbox = cb->first_stat_mbox;
3076 
3077 		if (id < 3)
3078 			myrb_handle_cmdblk(cb, cmd_blk);
3079 		else
3080 			myrb_handle_scsi(cb, cmd_blk, scmd);
3081 	}
3082 	cb->next_stat_mbox = next_stat_mbox;
3083 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3084 	return IRQ_HANDLED;
3085 }
3086 
3087 struct myrb_privdata DAC960_PG_privdata = {
3088 	.hw_init =	DAC960_PG_hw_init,
3089 	.irq_handler =	DAC960_PG_intr_handler,
3090 	.mmio_size =	DAC960_PG_mmio_size,
3091 };
3092 
3093 
3094 /*
3095  * DAC960 PD Series Controllers
3096  */
3097 
3098 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3099 {
3100 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3101 }
3102 
3103 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3104 {
3105 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3106 }
3107 
3108 static inline void DAC960_PD_gen_intr(void __iomem *base)
3109 {
3110 	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3111 }
3112 
3113 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3114 {
3115 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3116 }
3117 
3118 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3119 {
3120 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3121 
3122 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3123 }
3124 
3125 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3126 {
3127 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3128 
3129 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3130 }
3131 
3132 static inline void DAC960_PD_ack_intr(void __iomem *base)
3133 {
3134 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3135 }
3136 
3137 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3138 {
3139 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3140 
3141 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3142 }
3143 
3144 static inline void DAC960_PD_enable_intr(void __iomem *base)
3145 {
3146 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3147 }
3148 
3149 static inline void DAC960_PD_disable_intr(void __iomem *base)
3150 {
3151 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3152 }
3153 
3154 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3155 {
3156 	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3157 
3158 	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3159 }
3160 
3161 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3162 		union myrb_cmd_mbox *mbox)
3163 {
3164 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3165 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3166 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3167 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3168 }
3169 
3170 static inline unsigned char
3171 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3172 {
3173 	return readb(base + DAC960_PD_STSID_OFFSET);
3174 }
3175 
3176 static inline unsigned short
3177 DAC960_PD_read_status(void __iomem *base)
3178 {
3179 	return readw(base + DAC960_PD_STS_OFFSET);
3180 }
3181 
3182 static inline bool
3183 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3184 		unsigned char *param0, unsigned char *param1)
3185 {
3186 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3187 
3188 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3189 		return false;
3190 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3191 	*error = errsts;
3192 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3193 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3194 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3195 	return true;
3196 }
3197 
3198 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3199 {
3200 	void __iomem *base = cb->io_base;
3201 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3202 
3203 	while (DAC960_PD_hw_mbox_is_full(base))
3204 		udelay(1);
3205 	DAC960_PD_write_cmd_mbox(base, mbox);
3206 	DAC960_PD_hw_mbox_new_cmd(base);
3207 }
3208 
3209 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3210 		struct myrb_hba *cb, void __iomem *base)
3211 {
3212 	int timeout = 0;
3213 	unsigned char error, parm0, parm1;
3214 
3215 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3216 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3217 			(unsigned long)cb->io_addr);
3218 		return -EBUSY;
3219 	}
3220 	DAC960_PD_disable_intr(base);
3221 	DAC960_PD_ack_hw_mbox_status(base);
3222 	udelay(1000);
3223 	while (DAC960_PD_init_in_progress(base) &&
3224 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3225 		if (DAC960_PD_read_error_status(base, &error,
3226 					      &parm0, &parm1) &&
3227 		    myrb_err_status(cb, error, parm0, parm1))
3228 			return -EIO;
3229 		udelay(10);
3230 		timeout++;
3231 	}
3232 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3233 		dev_err(&pdev->dev,
3234 			"Timeout waiting for Controller Initialisation\n");
3235 		return -ETIMEDOUT;
3236 	}
3237 	if (!myrb_enable_mmio(cb, NULL)) {
3238 		dev_err(&pdev->dev,
3239 			"Unable to Enable Memory Mailbox Interface\n");
3240 		DAC960_PD_reset_ctrl(base);
3241 		return -ENODEV;
3242 	}
3243 	DAC960_PD_enable_intr(base);
3244 	cb->qcmd = DAC960_PD_qcmd;
3245 	cb->disable_intr = DAC960_PD_disable_intr;
3246 	cb->reset = DAC960_PD_reset_ctrl;
3247 
3248 	return 0;
3249 }
3250 
3251 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3252 {
3253 	struct myrb_hba *cb = arg;
3254 	void __iomem *base = cb->io_base;
3255 	unsigned long flags;
3256 
3257 	spin_lock_irqsave(&cb->queue_lock, flags);
3258 	while (DAC960_PD_hw_mbox_status_available(base)) {
3259 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3260 		struct scsi_cmnd *scmd = NULL;
3261 		struct myrb_cmdblk *cmd_blk = NULL;
3262 
3263 		if (id == MYRB_DCMD_TAG)
3264 			cmd_blk = &cb->dcmd_blk;
3265 		else if (id == MYRB_MCMD_TAG)
3266 			cmd_blk = &cb->mcmd_blk;
3267 		else {
3268 			scmd = scsi_host_find_tag(cb->host, id - 3);
3269 			if (scmd)
3270 				cmd_blk = scsi_cmd_priv(scmd);
3271 		}
3272 		if (cmd_blk)
3273 			cmd_blk->status = DAC960_PD_read_status(base);
3274 		else
3275 			dev_err(&cb->pdev->dev,
3276 				"Unhandled command completion %d\n", id);
3277 
3278 		DAC960_PD_ack_intr(base);
3279 		DAC960_PD_ack_hw_mbox_status(base);
3280 
3281 		if (id < 3)
3282 			myrb_handle_cmdblk(cb, cmd_blk);
3283 		else
3284 			myrb_handle_scsi(cb, cmd_blk, scmd);
3285 	}
3286 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3287 	return IRQ_HANDLED;
3288 }
3289 
3290 struct myrb_privdata DAC960_PD_privdata = {
3291 	.hw_init =	DAC960_PD_hw_init,
3292 	.irq_handler =	DAC960_PD_intr_handler,
3293 	.mmio_size =	DAC960_PD_mmio_size,
3294 };
3295 
3296 
3297 /*
3298  * DAC960 P Series Controllers
3299  *
3300  * Similar to the DAC960 PD Series Controllers, but some commands have
3301  * to be translated.
3302  */
3303 
3304 static inline void myrb_translate_enquiry(void *enq)
3305 {
3306 	memcpy(enq + 132, enq + 36, 64);
3307 	memset(enq + 36, 0, 96);
3308 }
3309 
3310 static inline void myrb_translate_devstate(void *state)
3311 {
3312 	memcpy(state + 2, state + 3, 1);
3313 	memmove(state + 4, state + 5, 2);
3314 	memmove(state + 6, state + 8, 4);
3315 }
3316 
3317 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3318 {
3319 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3320 	int ldev_num = mbox->type5.ld.ldev_num;
3321 
3322 	mbox->bytes[3] &= 0x7;
3323 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3324 	mbox->bytes[7] = ldev_num;
3325 }
3326 
3327 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3328 {
3329 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3330 	int ldev_num = mbox->bytes[7];
3331 
3332 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3333 	mbox->bytes[3] &= 0x7;
3334 	mbox->bytes[3] |= ldev_num << 3;
3335 }
3336 
3337 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3338 {
3339 	void __iomem *base = cb->io_base;
3340 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3341 
3342 	switch (mbox->common.opcode) {
3343 	case MYRB_CMD_ENQUIRY:
3344 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3345 		break;
3346 	case MYRB_CMD_GET_DEVICE_STATE:
3347 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3348 		break;
3349 	case MYRB_CMD_READ:
3350 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3351 		myrb_translate_to_rw_command(cmd_blk);
3352 		break;
3353 	case MYRB_CMD_WRITE:
3354 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3355 		myrb_translate_to_rw_command(cmd_blk);
3356 		break;
3357 	case MYRB_CMD_READ_SG:
3358 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3359 		myrb_translate_to_rw_command(cmd_blk);
3360 		break;
3361 	case MYRB_CMD_WRITE_SG:
3362 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3363 		myrb_translate_to_rw_command(cmd_blk);
3364 		break;
3365 	default:
3366 		break;
3367 	}
3368 	while (DAC960_PD_hw_mbox_is_full(base))
3369 		udelay(1);
3370 	DAC960_PD_write_cmd_mbox(base, mbox);
3371 	DAC960_PD_hw_mbox_new_cmd(base);
3372 }
3373 
3374 
3375 static int DAC960_P_hw_init(struct pci_dev *pdev,
3376 		struct myrb_hba *cb, void __iomem *base)
3377 {
3378 	int timeout = 0;
3379 	unsigned char error, parm0, parm1;
3380 
3381 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3382 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3383 			(unsigned long)cb->io_addr);
3384 		return -EBUSY;
3385 	}
3386 	DAC960_PD_disable_intr(base);
3387 	DAC960_PD_ack_hw_mbox_status(base);
3388 	udelay(1000);
3389 	while (DAC960_PD_init_in_progress(base) &&
3390 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3391 		if (DAC960_PD_read_error_status(base, &error,
3392 						&parm0, &parm1) &&
3393 		    myrb_err_status(cb, error, parm0, parm1))
3394 			return -EAGAIN;
3395 		udelay(10);
3396 		timeout++;
3397 	}
3398 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3399 		dev_err(&pdev->dev,
3400 			"Timeout waiting for Controller Initialisation\n");
3401 		return -ETIMEDOUT;
3402 	}
3403 	if (!myrb_enable_mmio(cb, NULL)) {
3404 		dev_err(&pdev->dev,
3405 			"Unable to allocate DMA mapped memory\n");
3406 		DAC960_PD_reset_ctrl(base);
3407 		return -ETIMEDOUT;
3408 	}
3409 	DAC960_PD_enable_intr(base);
3410 	cb->qcmd = DAC960_P_qcmd;
3411 	cb->disable_intr = DAC960_PD_disable_intr;
3412 	cb->reset = DAC960_PD_reset_ctrl;
3413 
3414 	return 0;
3415 }
3416 
3417 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3418 {
3419 	struct myrb_hba *cb = arg;
3420 	void __iomem *base = cb->io_base;
3421 	unsigned long flags;
3422 
3423 	spin_lock_irqsave(&cb->queue_lock, flags);
3424 	while (DAC960_PD_hw_mbox_status_available(base)) {
3425 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3426 		struct scsi_cmnd *scmd = NULL;
3427 		struct myrb_cmdblk *cmd_blk = NULL;
3428 		union myrb_cmd_mbox *mbox;
3429 		enum myrb_cmd_opcode op;
3430 
3431 
3432 		if (id == MYRB_DCMD_TAG)
3433 			cmd_blk = &cb->dcmd_blk;
3434 		else if (id == MYRB_MCMD_TAG)
3435 			cmd_blk = &cb->mcmd_blk;
3436 		else {
3437 			scmd = scsi_host_find_tag(cb->host, id - 3);
3438 			if (scmd)
3439 				cmd_blk = scsi_cmd_priv(scmd);
3440 		}
3441 		if (cmd_blk)
3442 			cmd_blk->status = DAC960_PD_read_status(base);
3443 		else
3444 			dev_err(&cb->pdev->dev,
3445 				"Unhandled command completion %d\n", id);
3446 
3447 		DAC960_PD_ack_intr(base);
3448 		DAC960_PD_ack_hw_mbox_status(base);
3449 
3450 		if (!cmd_blk)
3451 			continue;
3452 
3453 		mbox = &cmd_blk->mbox;
3454 		op = mbox->common.opcode;
3455 		switch (op) {
3456 		case MYRB_CMD_ENQUIRY_OLD:
3457 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3458 			myrb_translate_enquiry(cb->enquiry);
3459 			break;
3460 		case MYRB_CMD_READ_OLD:
3461 			mbox->common.opcode = MYRB_CMD_READ;
3462 			myrb_translate_from_rw_command(cmd_blk);
3463 			break;
3464 		case MYRB_CMD_WRITE_OLD:
3465 			mbox->common.opcode = MYRB_CMD_WRITE;
3466 			myrb_translate_from_rw_command(cmd_blk);
3467 			break;
3468 		case MYRB_CMD_READ_SG_OLD:
3469 			mbox->common.opcode = MYRB_CMD_READ_SG;
3470 			myrb_translate_from_rw_command(cmd_blk);
3471 			break;
3472 		case MYRB_CMD_WRITE_SG_OLD:
3473 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3474 			myrb_translate_from_rw_command(cmd_blk);
3475 			break;
3476 		default:
3477 			break;
3478 		}
3479 		if (id < 3)
3480 			myrb_handle_cmdblk(cb, cmd_blk);
3481 		else
3482 			myrb_handle_scsi(cb, cmd_blk, scmd);
3483 	}
3484 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3485 	return IRQ_HANDLED;
3486 }
3487 
3488 struct myrb_privdata DAC960_P_privdata = {
3489 	.hw_init =	DAC960_P_hw_init,
3490 	.irq_handler =	DAC960_P_intr_handler,
3491 	.mmio_size =	DAC960_PD_mmio_size,
3492 };
3493 
3494 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3495 		const struct pci_device_id *entry)
3496 {
3497 	struct myrb_privdata *privdata =
3498 		(struct myrb_privdata *)entry->driver_data;
3499 	irq_handler_t irq_handler = privdata->irq_handler;
3500 	unsigned int mmio_size = privdata->mmio_size;
3501 	struct Scsi_Host *shost;
3502 	struct myrb_hba *cb = NULL;
3503 
3504 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3505 	if (!shost) {
3506 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3507 		return NULL;
3508 	}
3509 	shost->max_cmd_len = 12;
3510 	shost->max_lun = 256;
3511 	cb = shost_priv(shost);
3512 	mutex_init(&cb->dcmd_mutex);
3513 	mutex_init(&cb->dma_mutex);
3514 	cb->pdev = pdev;
3515 
3516 	if (pci_enable_device(pdev))
3517 		goto failure;
3518 
3519 	if (privdata->hw_init == DAC960_PD_hw_init ||
3520 	    privdata->hw_init == DAC960_P_hw_init) {
3521 		cb->io_addr = pci_resource_start(pdev, 0);
3522 		cb->pci_addr = pci_resource_start(pdev, 1);
3523 	} else
3524 		cb->pci_addr = pci_resource_start(pdev, 0);
3525 
3526 	pci_set_drvdata(pdev, cb);
3527 	spin_lock_init(&cb->queue_lock);
3528 	if (mmio_size < PAGE_SIZE)
3529 		mmio_size = PAGE_SIZE;
3530 	cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
3531 	if (cb->mmio_base == NULL) {
3532 		dev_err(&pdev->dev,
3533 			"Unable to map Controller Register Window\n");
3534 		goto failure;
3535 	}
3536 
3537 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3538 	if (privdata->hw_init(pdev, cb, cb->io_base))
3539 		goto failure;
3540 
3541 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3542 		dev_err(&pdev->dev,
3543 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3544 		goto failure;
3545 	}
3546 	cb->irq = pdev->irq;
3547 	return cb;
3548 
3549 failure:
3550 	dev_err(&pdev->dev,
3551 		"Failed to initialize Controller\n");
3552 	myrb_cleanup(cb);
3553 	return NULL;
3554 }
3555 
3556 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3557 {
3558 	struct myrb_hba *cb;
3559 	int ret;
3560 
3561 	cb = myrb_detect(dev, entry);
3562 	if (!cb)
3563 		return -ENODEV;
3564 
3565 	ret = myrb_get_hba_config(cb);
3566 	if (ret < 0) {
3567 		myrb_cleanup(cb);
3568 		return ret;
3569 	}
3570 
3571 	if (!myrb_create_mempools(dev, cb)) {
3572 		ret = -ENOMEM;
3573 		goto failed;
3574 	}
3575 
3576 	ret = scsi_add_host(cb->host, &dev->dev);
3577 	if (ret) {
3578 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3579 		myrb_destroy_mempools(cb);
3580 		goto failed;
3581 	}
3582 	scsi_scan_host(cb->host);
3583 	return 0;
3584 failed:
3585 	myrb_cleanup(cb);
3586 	return ret;
3587 }
3588 
3589 
3590 static void myrb_remove(struct pci_dev *pdev)
3591 {
3592 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3593 
3594 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3595 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3596 	myrb_cleanup(cb);
3597 	myrb_destroy_mempools(cb);
3598 }
3599 
3600 
3601 static const struct pci_device_id myrb_id_table[] = {
3602 	{
3603 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3604 			       PCI_DEVICE_ID_DEC_21285,
3605 			       PCI_VENDOR_ID_MYLEX,
3606 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3607 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3608 	},
3609 	{
3610 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3611 	},
3612 	{
3613 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3614 	},
3615 	{
3616 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3617 	},
3618 	{0, },
3619 };
3620 
3621 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3622 
3623 static struct pci_driver myrb_pci_driver = {
3624 	.name		= "myrb",
3625 	.id_table	= myrb_id_table,
3626 	.probe		= myrb_probe,
3627 	.remove		= myrb_remove,
3628 };
3629 
3630 static int __init myrb_init_module(void)
3631 {
3632 	int ret;
3633 
3634 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3635 	if (!myrb_raid_template)
3636 		return -ENODEV;
3637 
3638 	ret = pci_register_driver(&myrb_pci_driver);
3639 	if (ret)
3640 		raid_class_release(myrb_raid_template);
3641 
3642 	return ret;
3643 }
3644 
3645 static void __exit myrb_cleanup_module(void)
3646 {
3647 	pci_unregister_driver(&myrb_pci_driver);
3648 	raid_class_release(myrb_raid_template);
3649 }
3650 
3651 module_init(myrb_init_module);
3652 module_exit(myrb_cleanup_module);
3653 
3654 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3655 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3656 MODULE_LICENSE("GPL");
3657