xref: /openbmc/linux/drivers/scsi/myrb.c (revision 0661cb2a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /*
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /*
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /*
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /*
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /*
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	wait_for_completion(&cmpl);
198 	return cmd_blk->status;
199 }
200 
201 /*
202  * myrb_exec_type3 - executes a type 3 command and waits for completion.
203  *
204  * Return: command status
205  */
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 		enum myrb_cmd_opcode op, dma_addr_t addr)
208 {
209 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 	unsigned short status;
212 
213 	mutex_lock(&cb->dcmd_mutex);
214 	myrb_reset_cmd(cmd_blk);
215 	mbox->type3.id = MYRB_DCMD_TAG;
216 	mbox->type3.opcode = op;
217 	mbox->type3.addr = addr;
218 	status = myrb_exec_cmd(cb, cmd_blk);
219 	mutex_unlock(&cb->dcmd_mutex);
220 	return status;
221 }
222 
223 /*
224  * myrb_exec_type3D - executes a type 3D command and waits for completion.
225  *
226  * Return: command status
227  */
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 		struct myrb_pdev_state *pdev_info)
231 {
232 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 	unsigned short status;
235 	dma_addr_t pdev_info_addr;
236 
237 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 					sizeof(struct myrb_pdev_state),
239 					DMA_FROM_DEVICE);
240 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 		return MYRB_STATUS_SUBSYS_FAILED;
242 
243 	mutex_lock(&cb->dcmd_mutex);
244 	myrb_reset_cmd(cmd_blk);
245 	mbox->type3D.id = MYRB_DCMD_TAG;
246 	mbox->type3D.opcode = op;
247 	mbox->type3D.channel = sdev->channel;
248 	mbox->type3D.target = sdev->id;
249 	mbox->type3D.addr = pdev_info_addr;
250 	status = myrb_exec_cmd(cb, cmd_blk);
251 	mutex_unlock(&cb->dcmd_mutex);
252 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 	if (status == MYRB_STATUS_SUCCESS &&
255 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 		myrb_translate_devstate(pdev_info);
257 
258 	return status;
259 }
260 
261 static char *myrb_event_msg[] = {
262 	"killed because write recovery failed",
263 	"killed because of SCSI bus reset failure",
264 	"killed because of double check condition",
265 	"killed because it was removed",
266 	"killed because of gross error on SCSI chip",
267 	"killed because of bad tag returned from drive",
268 	"killed because of timeout on SCSI command",
269 	"killed because of reset SCSI command issued from system",
270 	"killed because busy or parity error count exceeded limit",
271 	"killed because of 'kill drive' command from system",
272 	"killed because of selection timeout",
273 	"killed due to SCSI phase sequence error",
274 	"killed due to unknown status",
275 };
276 
277 /**
278  * myrb_get_event - get event log from HBA
279  * @cb: pointer to the hba structure
280  * @event: number of the event
281  *
282  * Execute a type 3E command and logs the event message
283  */
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
285 {
286 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 	struct myrb_log_entry *ev_buf;
289 	dma_addr_t ev_addr;
290 	unsigned short status;
291 
292 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 				    sizeof(struct myrb_log_entry),
294 				    &ev_addr, GFP_KERNEL);
295 	if (!ev_buf)
296 		return;
297 
298 	myrb_reset_cmd(cmd_blk);
299 	mbox->type3E.id = MYRB_MCMD_TAG;
300 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 	mbox->type3E.opqual = 1;
303 	mbox->type3E.ev_seq = event;
304 	mbox->type3E.addr = ev_addr;
305 	status = myrb_exec_cmd(cb, cmd_blk);
306 	if (status != MYRB_STATUS_SUCCESS)
307 		shost_printk(KERN_INFO, cb->host,
308 			     "Failed to get event log %d, status %04x\n",
309 			     event, status);
310 
311 	else if (ev_buf->seq_num == event) {
312 		struct scsi_sense_hdr sshdr;
313 
314 		memset(&sshdr, 0, sizeof(sshdr));
315 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
316 
317 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
318 		    sshdr.asc == 0x80 &&
319 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 			shost_printk(KERN_CRIT, cb->host,
321 				     "Physical drive %d:%d: %s\n",
322 				     ev_buf->channel, ev_buf->target,
323 				     myrb_event_msg[sshdr.ascq]);
324 		else
325 			shost_printk(KERN_CRIT, cb->host,
326 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 				     ev_buf->channel, ev_buf->target,
328 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
329 	}
330 
331 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
332 			  ev_buf, ev_addr);
333 }
334 
335 /*
336  * myrb_get_errtable - retrieves the error table from the controller
337  *
338  * Executes a type 3 command and logs the error table from the controller.
339  */
340 static void myrb_get_errtable(struct myrb_hba *cb)
341 {
342 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 	unsigned short status;
345 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
346 
347 	memcpy(&old_table, cb->err_table, sizeof(old_table));
348 
349 	myrb_reset_cmd(cmd_blk);
350 	mbox->type3.id = MYRB_MCMD_TAG;
351 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 	mbox->type3.addr = cb->err_table_addr;
353 	status = myrb_exec_cmd(cb, cmd_blk);
354 	if (status == MYRB_STATUS_SUCCESS) {
355 		struct myrb_error_entry *table = cb->err_table;
356 		struct myrb_error_entry *new, *old;
357 		size_t err_table_offset;
358 		struct scsi_device *sdev;
359 
360 		shost_for_each_device(sdev, cb->host) {
361 			if (sdev->channel >= myrb_logical_channel(cb->host))
362 				continue;
363 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
364 				+ sdev->id;
365 			new = table + err_table_offset;
366 			old = &old_table[err_table_offset];
367 			if (new->parity_err == old->parity_err &&
368 			    new->soft_err == old->soft_err &&
369 			    new->hard_err == old->hard_err &&
370 			    new->misc_err == old->misc_err)
371 				continue;
372 			sdev_printk(KERN_CRIT, sdev,
373 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 				    new->parity_err, new->soft_err,
375 				    new->hard_err, new->misc_err);
376 		}
377 	}
378 }
379 
380 /*
381  * myrb_get_ldev_info - retrieves the logical device table from the controller
382  *
383  * Executes a type 3 command and updates the logical device table.
384  *
385  * Return: command status
386  */
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
388 {
389 	unsigned short status;
390 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 	struct Scsi_Host *shost = cb->host;
392 
393 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394 				 cb->ldev_info_addr);
395 	if (status != MYRB_STATUS_SUCCESS)
396 		return status;
397 
398 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 		struct myrb_ldev_info *old = NULL;
400 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 		struct scsi_device *sdev;
402 
403 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
404 					  ldev_num, 0);
405 		if (!sdev) {
406 			if (new->state == MYRB_DEVICE_OFFLINE)
407 				continue;
408 			shost_printk(KERN_INFO, shost,
409 				     "Adding Logical Drive %d in state %s\n",
410 				     ldev_num, myrb_devstate_name(new->state));
411 			scsi_add_device(shost, myrb_logical_channel(shost),
412 					ldev_num, 0);
413 			continue;
414 		}
415 		old = sdev->hostdata;
416 		if (new->state != old->state)
417 			shost_printk(KERN_INFO, shost,
418 				     "Logical Drive %d is now %s\n",
419 				     ldev_num, myrb_devstate_name(new->state));
420 		if (new->wb_enabled != old->wb_enabled)
421 			sdev_printk(KERN_INFO, sdev,
422 				    "Logical Drive is now WRITE %s\n",
423 				    (new->wb_enabled ? "BACK" : "THRU"));
424 		memcpy(old, new, sizeof(*new));
425 		scsi_device_put(sdev);
426 	}
427 	return status;
428 }
429 
430 /*
431  * myrb_get_rbld_progress - get rebuild progress information
432  *
433  * Executes a type 3 command and returns the rebuild progress
434  * information.
435  *
436  * Return: command status
437  */
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 		struct myrb_rbld_progress *rbld)
440 {
441 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 	struct myrb_rbld_progress *rbld_buf;
444 	dma_addr_t rbld_addr;
445 	unsigned short status;
446 
447 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 				      sizeof(struct myrb_rbld_progress),
449 				      &rbld_addr, GFP_KERNEL);
450 	if (!rbld_buf)
451 		return MYRB_STATUS_RBLD_NOT_CHECKED;
452 
453 	myrb_reset_cmd(cmd_blk);
454 	mbox->type3.id = MYRB_MCMD_TAG;
455 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 	mbox->type3.addr = rbld_addr;
457 	status = myrb_exec_cmd(cb, cmd_blk);
458 	if (rbld)
459 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 			  rbld_buf, rbld_addr);
462 	return status;
463 }
464 
465 /*
466  * myrb_update_rbld_progress - updates the rebuild status
467  *
468  * Updates the rebuild status for the attached logical devices.
469  */
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
471 {
472 	struct myrb_rbld_progress rbld_buf;
473 	unsigned short status;
474 
475 	status = myrb_get_rbld_progress(cb, &rbld_buf);
476 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 		status = MYRB_STATUS_RBLD_SUCCESS;
479 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 		unsigned int blocks_done =
481 			rbld_buf.ldev_size - rbld_buf.blocks_left;
482 		struct scsi_device *sdev;
483 
484 		sdev = scsi_device_lookup(cb->host,
485 					  myrb_logical_channel(cb->host),
486 					  rbld_buf.ldev_num, 0);
487 		if (!sdev)
488 			return;
489 
490 		switch (status) {
491 		case MYRB_STATUS_SUCCESS:
492 			sdev_printk(KERN_INFO, sdev,
493 				    "Rebuild in Progress, %d%% completed\n",
494 				    (100 * (blocks_done >> 7))
495 				    / (rbld_buf.ldev_size >> 7));
496 			break;
497 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 			sdev_printk(KERN_INFO, sdev,
499 				    "Rebuild Failed due to Logical Drive Failure\n");
500 			break;
501 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 			sdev_printk(KERN_INFO, sdev,
503 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
504 			break;
505 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 			sdev_printk(KERN_INFO, sdev,
507 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
508 			break;
509 		case MYRB_STATUS_RBLD_SUCCESS:
510 			sdev_printk(KERN_INFO, sdev,
511 				    "Rebuild Completed Successfully\n");
512 			break;
513 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 			sdev_printk(KERN_INFO, sdev,
515 				     "Rebuild Successfully Terminated\n");
516 			break;
517 		default:
518 			break;
519 		}
520 		scsi_device_put(sdev);
521 	}
522 	cb->last_rbld_status = status;
523 }
524 
525 /*
526  * myrb_get_cc_progress - retrieve the rebuild status
527  *
528  * Execute a type 3 Command and fetch the rebuild / consistency check
529  * status.
530  */
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
532 {
533 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 	struct myrb_rbld_progress *rbld_buf;
536 	dma_addr_t rbld_addr;
537 	unsigned short status;
538 
539 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 				      sizeof(struct myrb_rbld_progress),
541 				      &rbld_addr, GFP_KERNEL);
542 	if (!rbld_buf) {
543 		cb->need_cc_status = true;
544 		return;
545 	}
546 	myrb_reset_cmd(cmd_blk);
547 	mbox->type3.id = MYRB_MCMD_TAG;
548 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 	mbox->type3.addr = rbld_addr;
550 	status = myrb_exec_cmd(cb, cmd_blk);
551 	if (status == MYRB_STATUS_SUCCESS) {
552 		unsigned int ldev_num = rbld_buf->ldev_num;
553 		unsigned int ldev_size = rbld_buf->ldev_size;
554 		unsigned int blocks_done =
555 			ldev_size - rbld_buf->blocks_left;
556 		struct scsi_device *sdev;
557 
558 		sdev = scsi_device_lookup(cb->host,
559 					  myrb_logical_channel(cb->host),
560 					  ldev_num, 0);
561 		if (sdev) {
562 			sdev_printk(KERN_INFO, sdev,
563 				    "Consistency Check in Progress: %d%% completed\n",
564 				    (100 * (blocks_done >> 7))
565 				    / (ldev_size >> 7));
566 			scsi_device_put(sdev);
567 		}
568 	}
569 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 			  rbld_buf, rbld_addr);
571 }
572 
573 /*
574  * myrb_bgi_control - updates background initialisation status
575  *
576  * Executes a type 3B command and updates the background initialisation status
577  */
578 static void myrb_bgi_control(struct myrb_hba *cb)
579 {
580 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 	struct myrb_bgi_status *bgi, *last_bgi;
583 	dma_addr_t bgi_addr;
584 	struct scsi_device *sdev = NULL;
585 	unsigned short status;
586 
587 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 				 &bgi_addr, GFP_KERNEL);
589 	if (!bgi) {
590 		shost_printk(KERN_ERR, cb->host,
591 			     "Failed to allocate bgi memory\n");
592 		return;
593 	}
594 	myrb_reset_cmd(cmd_blk);
595 	mbox->type3B.id = MYRB_DCMD_TAG;
596 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 	mbox->type3B.optype = 0x20;
598 	mbox->type3B.addr = bgi_addr;
599 	status = myrb_exec_cmd(cb, cmd_blk);
600 	last_bgi = &cb->bgi_status;
601 	sdev = scsi_device_lookup(cb->host,
602 				  myrb_logical_channel(cb->host),
603 				  bgi->ldev_num, 0);
604 	switch (status) {
605 	case MYRB_STATUS_SUCCESS:
606 		switch (bgi->status) {
607 		case MYRB_BGI_INVALID:
608 			break;
609 		case MYRB_BGI_STARTED:
610 			if (!sdev)
611 				break;
612 			sdev_printk(KERN_INFO, sdev,
613 				    "Background Initialization Started\n");
614 			break;
615 		case MYRB_BGI_INPROGRESS:
616 			if (!sdev)
617 				break;
618 			if (bgi->blocks_done == last_bgi->blocks_done &&
619 			    bgi->ldev_num == last_bgi->ldev_num)
620 				break;
621 			sdev_printk(KERN_INFO, sdev,
622 				 "Background Initialization in Progress: %d%% completed\n",
623 				 (100 * (bgi->blocks_done >> 7))
624 				 / (bgi->ldev_size >> 7));
625 			break;
626 		case MYRB_BGI_SUSPENDED:
627 			if (!sdev)
628 				break;
629 			sdev_printk(KERN_INFO, sdev,
630 				    "Background Initialization Suspended\n");
631 			break;
632 		case MYRB_BGI_CANCELLED:
633 			if (!sdev)
634 				break;
635 			sdev_printk(KERN_INFO, sdev,
636 				    "Background Initialization Cancelled\n");
637 			break;
638 		}
639 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
640 		break;
641 	case MYRB_STATUS_BGI_SUCCESS:
642 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 			sdev_printk(KERN_INFO, sdev,
644 				    "Background Initialization Completed Successfully\n");
645 		cb->bgi_status.status = MYRB_BGI_INVALID;
646 		break;
647 	case MYRB_STATUS_BGI_ABORTED:
648 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 			sdev_printk(KERN_INFO, sdev,
650 				    "Background Initialization Aborted\n");
651 		fallthrough;
652 	case MYRB_STATUS_NO_BGI_INPROGRESS:
653 		cb->bgi_status.status = MYRB_BGI_INVALID;
654 		break;
655 	}
656 	if (sdev)
657 		scsi_device_put(sdev);
658 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
659 			  bgi, bgi_addr);
660 }
661 
662 /*
663  * myrb_hba_enquiry - updates the controller status
664  *
665  * Executes a DAC_V1_Enquiry command and updates the controller status.
666  *
667  * Return: command status
668  */
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
670 {
671 	struct myrb_enquiry old, *new;
672 	unsigned short status;
673 
674 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
675 
676 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 	if (status != MYRB_STATUS_SUCCESS)
678 		return status;
679 
680 	new = cb->enquiry;
681 	if (new->ldev_count > old.ldev_count) {
682 		int ldev_num = old.ldev_count - 1;
683 
684 		while (++ldev_num < new->ldev_count)
685 			shost_printk(KERN_CRIT, cb->host,
686 				     "Logical Drive %d Now Exists\n",
687 				     ldev_num);
688 	}
689 	if (new->ldev_count < old.ldev_count) {
690 		int ldev_num = new->ldev_count - 1;
691 
692 		while (++ldev_num < old.ldev_count)
693 			shost_printk(KERN_CRIT, cb->host,
694 				     "Logical Drive %d No Longer Exists\n",
695 				     ldev_num);
696 	}
697 	if (new->status.deferred != old.status.deferred)
698 		shost_printk(KERN_CRIT, cb->host,
699 			     "Deferred Write Error Flag is now %s\n",
700 			     (new->status.deferred ? "TRUE" : "FALSE"));
701 	if (new->ev_seq != old.ev_seq) {
702 		cb->new_ev_seq = new->ev_seq;
703 		cb->need_err_info = true;
704 		shost_printk(KERN_INFO, cb->host,
705 			     "Event log %d/%d (%d/%d) available\n",
706 			     cb->old_ev_seq, cb->new_ev_seq,
707 			     old.ev_seq, new->ev_seq);
708 	}
709 	if ((new->ldev_critical > 0 &&
710 	     new->ldev_critical != old.ldev_critical) ||
711 	    (new->ldev_offline > 0 &&
712 	     new->ldev_offline != old.ldev_offline) ||
713 	    (new->ldev_count != old.ldev_count)) {
714 		shost_printk(KERN_INFO, cb->host,
715 			     "Logical drive count changed (%d/%d/%d)\n",
716 			     new->ldev_critical,
717 			     new->ldev_offline,
718 			     new->ldev_count);
719 		cb->need_ldev_info = true;
720 	}
721 	if (new->pdev_dead > 0 ||
722 	    new->pdev_dead != old.pdev_dead ||
723 	    time_after_eq(jiffies, cb->secondary_monitor_time
724 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 		cb->need_bgi_status = cb->bgi_status_supported;
726 		cb->secondary_monitor_time = jiffies;
727 	}
728 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 		cb->need_rbld = true;
733 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
734 	}
735 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
736 		switch (new->rbld) {
737 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 			shost_printk(KERN_INFO, cb->host,
739 				     "Consistency Check Completed Successfully\n");
740 			break;
741 		case MYRB_STDBY_RBLD_IN_PROGRESS:
742 		case MYRB_BG_RBLD_IN_PROGRESS:
743 			break;
744 		case MYRB_BG_CHECK_IN_PROGRESS:
745 			cb->need_cc_status = true;
746 			break;
747 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 			shost_printk(KERN_INFO, cb->host,
749 				     "Consistency Check Completed with Error\n");
750 			break;
751 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 			shost_printk(KERN_INFO, cb->host,
753 				     "Consistency Check Failed - Physical Device Failed\n");
754 			break;
755 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 			shost_printk(KERN_INFO, cb->host,
757 				     "Consistency Check Failed - Logical Drive Failed\n");
758 			break;
759 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 			shost_printk(KERN_INFO, cb->host,
761 				     "Consistency Check Failed - Other Causes\n");
762 			break;
763 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 			shost_printk(KERN_INFO, cb->host,
765 				     "Consistency Check Successfully Terminated\n");
766 			break;
767 		}
768 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 		cb->need_cc_status = true;
770 
771 	return MYRB_STATUS_SUCCESS;
772 }
773 
774 /*
775  * myrb_set_pdev_state - sets the device state for a physical device
776  *
777  * Return: command status
778  */
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 		struct scsi_device *sdev, enum myrb_devstate state)
781 {
782 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 	unsigned short status;
785 
786 	mutex_lock(&cb->dcmd_mutex);
787 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 	mbox->type3D.id = MYRB_DCMD_TAG;
789 	mbox->type3D.channel = sdev->channel;
790 	mbox->type3D.target = sdev->id;
791 	mbox->type3D.state = state & 0x1F;
792 	status = myrb_exec_cmd(cb, cmd_blk);
793 	mutex_unlock(&cb->dcmd_mutex);
794 
795 	return status;
796 }
797 
798 /*
799  * myrb_enable_mmio - enables the Memory Mailbox Interface
800  *
801  * PD and P controller types have no memory mailbox, but still need the
802  * other dma mapped memory.
803  *
804  * Return: true on success, false otherwise.
805  */
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
807 {
808 	void __iomem *base = cb->io_base;
809 	struct pci_dev *pdev = cb->pdev;
810 	size_t err_table_size;
811 	size_t ldev_info_size;
812 	union myrb_cmd_mbox *cmd_mbox_mem;
813 	struct myrb_stat_mbox *stat_mbox_mem;
814 	union myrb_cmd_mbox mbox;
815 	unsigned short status;
816 
817 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
818 
819 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 		dev_err(&pdev->dev, "DMA mask out of range\n");
821 		return false;
822 	}
823 
824 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 					 sizeof(struct myrb_enquiry),
826 					 &cb->enquiry_addr, GFP_KERNEL);
827 	if (!cb->enquiry)
828 		return false;
829 
830 	err_table_size = sizeof(struct myrb_error_entry) *
831 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 					   &cb->err_table_addr, GFP_KERNEL);
834 	if (!cb->err_table)
835 		return false;
836 
837 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 					       &cb->ldev_info_addr, GFP_KERNEL);
840 	if (!cb->ldev_info_buf)
841 		return false;
842 
843 	/*
844 	 * Skip mailbox initialisation for PD and P Controllers
845 	 */
846 	if (!mmio_init_fn)
847 		return true;
848 
849 	/* These are the base addresses for the command memory mailbox array */
850 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
852 						cb->cmd_mbox_size,
853 						&cb->cmd_mbox_addr,
854 						GFP_KERNEL);
855 	if (!cb->first_cmd_mbox)
856 		return false;
857 
858 	cmd_mbox_mem = cb->first_cmd_mbox;
859 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 	cb->last_cmd_mbox = cmd_mbox_mem;
861 	cb->next_cmd_mbox = cb->first_cmd_mbox;
862 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
864 
865 	/* These are the base addresses for the status memory mailbox array */
866 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 	    sizeof(struct myrb_stat_mbox);
868 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
869 						 cb->stat_mbox_size,
870 						 &cb->stat_mbox_addr,
871 						 GFP_KERNEL);
872 	if (!cb->first_stat_mbox)
873 		return false;
874 
875 	stat_mbox_mem = cb->first_stat_mbox;
876 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 	cb->last_stat_mbox = stat_mbox_mem;
878 	cb->next_stat_mbox = cb->first_stat_mbox;
879 
880 	/* Enable the Memory Mailbox Interface. */
881 	cb->dual_mode_interface = true;
882 	mbox.typeX.opcode = 0x2B;
883 	mbox.typeX.id = 0;
884 	mbox.typeX.opcode2 = 0x14;
885 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
887 
888 	status = mmio_init_fn(pdev, base, &mbox);
889 	if (status != MYRB_STATUS_SUCCESS) {
890 		cb->dual_mode_interface = false;
891 		mbox.typeX.opcode2 = 0x10;
892 		status = mmio_init_fn(pdev, base, &mbox);
893 		if (status != MYRB_STATUS_SUCCESS) {
894 			dev_err(&pdev->dev,
895 				"Failed to enable mailbox, statux %02X\n",
896 				status);
897 			return false;
898 		}
899 	}
900 	return true;
901 }
902 
903 /*
904  * myrb_get_hba_config - reads the configuration information
905  *
906  * Reads the configuration information from the controller and
907  * initializes the controller structure.
908  *
909  * Return: 0 on success, errno otherwise
910  */
911 static int myrb_get_hba_config(struct myrb_hba *cb)
912 {
913 	struct myrb_enquiry2 *enquiry2;
914 	dma_addr_t enquiry2_addr;
915 	struct myrb_config2 *config2;
916 	dma_addr_t config2_addr;
917 	struct Scsi_Host *shost = cb->host;
918 	struct pci_dev *pdev = cb->pdev;
919 	int pchan_max = 0, pchan_cur = 0;
920 	unsigned short status;
921 	int ret = -ENODEV, memsize = 0;
922 
923 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 				      &enquiry2_addr, GFP_KERNEL);
925 	if (!enquiry2) {
926 		shost_printk(KERN_ERR, cb->host,
927 			     "Failed to allocate V1 enquiry2 memory\n");
928 		return -ENOMEM;
929 	}
930 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 				     &config2_addr, GFP_KERNEL);
932 	if (!config2) {
933 		shost_printk(KERN_ERR, cb->host,
934 			     "Failed to allocate V1 config2 memory\n");
935 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 				  enquiry2, enquiry2_addr);
937 		return -ENOMEM;
938 	}
939 	mutex_lock(&cb->dma_mutex);
940 	status = myrb_hba_enquiry(cb);
941 	mutex_unlock(&cb->dma_mutex);
942 	if (status != MYRB_STATUS_SUCCESS) {
943 		shost_printk(KERN_WARNING, cb->host,
944 			     "Failed it issue V1 Enquiry\n");
945 		goto out_free;
946 	}
947 
948 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 	if (status != MYRB_STATUS_SUCCESS) {
950 		shost_printk(KERN_WARNING, cb->host,
951 			     "Failed to issue V1 Enquiry2\n");
952 		goto out_free;
953 	}
954 
955 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 	if (status != MYRB_STATUS_SUCCESS) {
957 		shost_printk(KERN_WARNING, cb->host,
958 			     "Failed to issue ReadConfig2\n");
959 		goto out_free;
960 	}
961 
962 	status = myrb_get_ldev_info(cb);
963 	if (status != MYRB_STATUS_SUCCESS) {
964 		shost_printk(KERN_WARNING, cb->host,
965 			     "Failed to get logical drive information\n");
966 		goto out_free;
967 	}
968 
969 	/*
970 	 * Initialize the Controller Model Name and Full Model Name fields.
971 	 */
972 	switch (enquiry2->hw.sub_model) {
973 	case DAC960_V1_P_PD_PU:
974 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 			strcpy(cb->model_name, "DAC960PU");
976 		else
977 			strcpy(cb->model_name, "DAC960PD");
978 		break;
979 	case DAC960_V1_PL:
980 		strcpy(cb->model_name, "DAC960PL");
981 		break;
982 	case DAC960_V1_PG:
983 		strcpy(cb->model_name, "DAC960PG");
984 		break;
985 	case DAC960_V1_PJ:
986 		strcpy(cb->model_name, "DAC960PJ");
987 		break;
988 	case DAC960_V1_PR:
989 		strcpy(cb->model_name, "DAC960PR");
990 		break;
991 	case DAC960_V1_PT:
992 		strcpy(cb->model_name, "DAC960PT");
993 		break;
994 	case DAC960_V1_PTL0:
995 		strcpy(cb->model_name, "DAC960PTL0");
996 		break;
997 	case DAC960_V1_PRL:
998 		strcpy(cb->model_name, "DAC960PRL");
999 		break;
1000 	case DAC960_V1_PTL1:
1001 		strcpy(cb->model_name, "DAC960PTL1");
1002 		break;
1003 	case DAC960_V1_1164P:
1004 		strcpy(cb->model_name, "eXtremeRAID 1100");
1005 		break;
1006 	default:
1007 		shost_printk(KERN_WARNING, cb->host,
1008 			     "Unknown Model %X\n",
1009 			     enquiry2->hw.sub_model);
1010 		goto out;
1011 	}
1012 	/*
1013 	 * Initialize the Controller Firmware Version field and verify that it
1014 	 * is a supported firmware version.
1015 	 * The supported firmware versions are:
1016 	 *
1017 	 * DAC1164P		    5.06 and above
1018 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1019 	 * DAC960PU/PD/PL	    3.51 and above
1020 	 * DAC960PU/PD/PL/P	    2.73 and above
1021 	 */
1022 #if defined(CONFIG_ALPHA)
1023 	/*
1024 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 	 * the last custom FW revision to be released by DEC for these older
1027 	 * controllers, appears to work quite well with this driver.
1028 	 *
1029 	 * Cards tested successfully were several versions each of the PD and
1030 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 	 * back of the board, of:
1033 	 *
1034 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1035 	 *         or D040349 (3-channel)
1036 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1037 	 *         or D040397 (3-channel)
1038 	 */
1039 # define FIRMWARE_27X	"2.70"
1040 #else
1041 # define FIRMWARE_27X	"2.73"
1042 #endif
1043 
1044 	if (enquiry2->fw.major_version == 0) {
1045 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 		enquiry2->fw.firmware_type = '0';
1048 		enquiry2->fw.turn_id = 0;
1049 	}
1050 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1051 		"%u.%02u-%c-%02u",
1052 		enquiry2->fw.major_version,
1053 		enquiry2->fw.minor_version,
1054 		enquiry2->fw.firmware_type,
1055 		enquiry2->fw.turn_id);
1056 	if (!((enquiry2->fw.major_version == 5 &&
1057 	       enquiry2->fw.minor_version >= 6) ||
1058 	      (enquiry2->fw.major_version == 4 &&
1059 	       enquiry2->fw.minor_version >= 6) ||
1060 	      (enquiry2->fw.major_version == 3 &&
1061 	       enquiry2->fw.minor_version >= 51) ||
1062 	      (enquiry2->fw.major_version == 2 &&
1063 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 		shost_printk(KERN_WARNING, cb->host,
1065 			"Firmware Version '%s' unsupported\n",
1066 			cb->fw_version);
1067 		goto out;
1068 	}
1069 	/*
1070 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 	 * Enclosure Management Enabled fields.
1072 	 */
1073 	switch (enquiry2->hw.model) {
1074 	case MYRB_5_CHANNEL_BOARD:
1075 		pchan_max = 5;
1076 		break;
1077 	case MYRB_3_CHANNEL_BOARD:
1078 	case MYRB_3_CHANNEL_ASIC_DAC:
1079 		pchan_max = 3;
1080 		break;
1081 	case MYRB_2_CHANNEL_BOARD:
1082 		pchan_max = 2;
1083 		break;
1084 	default:
1085 		pchan_max = enquiry2->cfg_chan;
1086 		break;
1087 	}
1088 	pchan_cur = enquiry2->cur_chan;
1089 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090 		cb->bus_width = 32;
1091 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092 		cb->bus_width = 16;
1093 	else
1094 		cb->bus_width = 8;
1095 	cb->ldev_block_size = enquiry2->ldev_block_size;
1096 	shost->max_channel = pchan_cur;
1097 	shost->max_id = enquiry2->max_targets;
1098 	memsize = enquiry2->mem_size >> 20;
1099 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100 	/*
1101 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 	 * The Driver Queue Depth must be at most one less than the
1105 	 * Controller Queue Depth to allow for an automatic drive
1106 	 * rebuild operation.
1107 	 */
1108 	shost->can_queue = cb->enquiry->max_tcq;
1109 	if (shost->can_queue < 3)
1110 		shost->can_queue = enquiry2->max_cmds;
1111 	if (shost->can_queue < 3)
1112 		/* Play safe and disable TCQ */
1113 		shost->can_queue = 1;
1114 
1115 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 	shost->max_sectors = enquiry2->max_sectors;
1118 	shost->sg_tablesize = enquiry2->max_sge;
1119 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121 	/*
1122 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123 	 */
1124 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 		>> (10 - MYRB_BLKSIZE_BITS);
1126 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 		>> (10 - MYRB_BLKSIZE_BITS);
1128 	/* Assume 255/63 translation */
1129 	cb->ldev_geom_heads = 255;
1130 	cb->ldev_geom_sectors = 63;
1131 	if (config2->drive_geometry) {
1132 		cb->ldev_geom_heads = 128;
1133 		cb->ldev_geom_sectors = 32;
1134 	}
1135 
1136 	/*
1137 	 * Initialize the Background Initialization Status.
1138 	 */
1139 	if ((cb->fw_version[0] == '4' &&
1140 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1141 	    (cb->fw_version[0] == '5' &&
1142 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1143 		cb->bgi_status_supported = true;
1144 		myrb_bgi_control(cb);
1145 	}
1146 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147 	ret = 0;
1148 
1149 out:
1150 	shost_printk(KERN_INFO, cb->host,
1151 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1152 	shost_printk(KERN_INFO, cb->host,
1153 		"  Firmware Version: %s, Memory Size: %dMB\n",
1154 		cb->fw_version, memsize);
1155 	if (cb->io_addr == 0)
1156 		shost_printk(KERN_INFO, cb->host,
1157 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 			(unsigned long)cb->pci_addr, cb->irq);
1159 	else
1160 		shost_printk(KERN_INFO, cb->host,
1161 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163 			cb->irq);
1164 	shost_printk(KERN_INFO, cb->host,
1165 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 		cb->host->can_queue, cb->host->max_sectors);
1167 	shost_printk(KERN_INFO, cb->host,
1168 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 		     cb->host->can_queue, cb->host->sg_tablesize,
1170 		     MYRB_SCATTER_GATHER_LIMIT);
1171 	shost_printk(KERN_INFO, cb->host,
1172 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 		     cb->stripe_size, cb->segment_size,
1174 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175 		     cb->safte_enabled ?
1176 		     "  SAF-TE Enclosure Management Enabled" : "");
1177 	shost_printk(KERN_INFO, cb->host,
1178 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1179 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180 		     cb->host->max_id);
1181 
1182 	shost_printk(KERN_INFO, cb->host,
1183 		     "  Logical: 1/1 channels, %d/%d disks\n",
1184 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185 
1186 out_free:
1187 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 			  enquiry2, enquiry2_addr);
1189 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 			  config2, config2_addr);
1191 
1192 	return ret;
1193 }
1194 
1195 /*
1196  * myrb_unmap - unmaps controller structures
1197  */
1198 static void myrb_unmap(struct myrb_hba *cb)
1199 {
1200 	if (cb->ldev_info_buf) {
1201 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202 			MYRB_MAX_LDEVS;
1203 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 				  cb->ldev_info_buf, cb->ldev_info_addr);
1205 		cb->ldev_info_buf = NULL;
1206 	}
1207 	if (cb->err_table) {
1208 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 				  cb->err_table, cb->err_table_addr);
1212 		cb->err_table = NULL;
1213 	}
1214 	if (cb->enquiry) {
1215 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 				  cb->enquiry, cb->enquiry_addr);
1217 		cb->enquiry = NULL;
1218 	}
1219 	if (cb->first_stat_mbox) {
1220 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1222 		cb->first_stat_mbox = NULL;
1223 	}
1224 	if (cb->first_cmd_mbox) {
1225 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 		cb->first_cmd_mbox = NULL;
1228 	}
1229 }
1230 
1231 /*
1232  * myrb_cleanup - cleanup controller structures
1233  */
1234 static void myrb_cleanup(struct myrb_hba *cb)
1235 {
1236 	struct pci_dev *pdev = cb->pdev;
1237 
1238 	/* Free the memory mailbox, status, and related structures */
1239 	myrb_unmap(cb);
1240 
1241 	if (cb->mmio_base) {
1242 		cb->disable_intr(cb->io_base);
1243 		iounmap(cb->mmio_base);
1244 	}
1245 	if (cb->irq)
1246 		free_irq(cb->irq, cb);
1247 	if (cb->io_addr)
1248 		release_region(cb->io_addr, 0x80);
1249 	pci_set_drvdata(pdev, NULL);
1250 	pci_disable_device(pdev);
1251 	scsi_host_put(cb->host);
1252 }
1253 
1254 static int myrb_host_reset(struct scsi_cmnd *scmd)
1255 {
1256 	struct Scsi_Host *shost = scmd->device->host;
1257 	struct myrb_hba *cb = shost_priv(shost);
1258 
1259 	cb->reset(cb->io_base);
1260 	return SUCCESS;
1261 }
1262 
1263 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1264 		struct scsi_cmnd *scmd)
1265 {
1266 	struct myrb_hba *cb = shost_priv(shost);
1267 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1268 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1269 	struct myrb_dcdb *dcdb;
1270 	dma_addr_t dcdb_addr;
1271 	struct scsi_device *sdev = scmd->device;
1272 	struct scatterlist *sgl;
1273 	unsigned long flags;
1274 	int nsge;
1275 
1276 	myrb_reset_cmd(cmd_blk);
1277 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1278 	if (!dcdb)
1279 		return SCSI_MLQUEUE_HOST_BUSY;
1280 	nsge = scsi_dma_map(scmd);
1281 	if (nsge > 1) {
1282 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1283 		scmd->result = (DID_ERROR << 16);
1284 		scmd->scsi_done(scmd);
1285 		return 0;
1286 	}
1287 
1288 	mbox->type3.opcode = MYRB_CMD_DCDB;
1289 	mbox->type3.id = scmd->request->tag + 3;
1290 	mbox->type3.addr = dcdb_addr;
1291 	dcdb->channel = sdev->channel;
1292 	dcdb->target = sdev->id;
1293 	switch (scmd->sc_data_direction) {
1294 	case DMA_NONE:
1295 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1296 		break;
1297 	case DMA_TO_DEVICE:
1298 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1299 		break;
1300 	case DMA_FROM_DEVICE:
1301 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1302 		break;
1303 	default:
1304 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1305 		break;
1306 	}
1307 	dcdb->early_status = false;
1308 	if (scmd->request->timeout <= 10)
1309 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1310 	else if (scmd->request->timeout <= 60)
1311 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1312 	else if (scmd->request->timeout <= 600)
1313 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1314 	else
1315 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1316 	dcdb->no_autosense = false;
1317 	dcdb->allow_disconnect = true;
1318 	sgl = scsi_sglist(scmd);
1319 	dcdb->dma_addr = sg_dma_address(sgl);
1320 	if (sg_dma_len(sgl) > USHRT_MAX) {
1321 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1322 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1323 	} else {
1324 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1325 		dcdb->xfer_len_hi4 = 0;
1326 	}
1327 	dcdb->cdb_len = scmd->cmd_len;
1328 	dcdb->sense_len = sizeof(dcdb->sense);
1329 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1330 
1331 	spin_lock_irqsave(&cb->queue_lock, flags);
1332 	cb->qcmd(cb, cmd_blk);
1333 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1334 	return 0;
1335 }
1336 
1337 static void myrb_inquiry(struct myrb_hba *cb,
1338 		struct scsi_cmnd *scmd)
1339 {
1340 	unsigned char inq[36] = {
1341 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1342 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1343 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1344 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20,
1346 	};
1347 
1348 	if (cb->bus_width > 16)
1349 		inq[7] |= 1 << 6;
1350 	if (cb->bus_width > 8)
1351 		inq[7] |= 1 << 5;
1352 	memcpy(&inq[16], cb->model_name, 16);
1353 	memcpy(&inq[32], cb->fw_version, 1);
1354 	memcpy(&inq[33], &cb->fw_version[2], 2);
1355 	memcpy(&inq[35], &cb->fw_version[7], 1);
1356 
1357 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1358 }
1359 
1360 static void
1361 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1362 		struct myrb_ldev_info *ldev_info)
1363 {
1364 	unsigned char modes[32], *mode_pg;
1365 	bool dbd;
1366 	size_t mode_len;
1367 
1368 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1369 	if (dbd) {
1370 		mode_len = 24;
1371 		mode_pg = &modes[4];
1372 	} else {
1373 		mode_len = 32;
1374 		mode_pg = &modes[12];
1375 	}
1376 	memset(modes, 0, sizeof(modes));
1377 	modes[0] = mode_len - 1;
1378 	if (!dbd) {
1379 		unsigned char *block_desc = &modes[4];
1380 
1381 		modes[3] = 8;
1382 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1383 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1384 	}
1385 	mode_pg[0] = 0x08;
1386 	mode_pg[1] = 0x12;
1387 	if (ldev_info->wb_enabled)
1388 		mode_pg[2] |= 0x04;
1389 	if (cb->segment_size) {
1390 		mode_pg[2] |= 0x08;
1391 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1392 	}
1393 
1394 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1395 }
1396 
1397 static void myrb_request_sense(struct myrb_hba *cb,
1398 		struct scsi_cmnd *scmd)
1399 {
1400 	scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1401 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1402 				 SCSI_SENSE_BUFFERSIZE);
1403 }
1404 
1405 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1406 		struct myrb_ldev_info *ldev_info)
1407 {
1408 	unsigned char data[8];
1409 
1410 	dev_dbg(&scmd->device->sdev_gendev,
1411 		"Capacity %u, blocksize %u\n",
1412 		ldev_info->size, cb->ldev_block_size);
1413 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1414 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1415 	scsi_sg_copy_from_buffer(scmd, data, 8);
1416 }
1417 
1418 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1419 		struct scsi_cmnd *scmd)
1420 {
1421 	struct myrb_hba *cb = shost_priv(shost);
1422 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1423 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1424 	struct myrb_ldev_info *ldev_info;
1425 	struct scsi_device *sdev = scmd->device;
1426 	struct scatterlist *sgl;
1427 	unsigned long flags;
1428 	u64 lba;
1429 	u32 block_cnt;
1430 	int nsge;
1431 
1432 	ldev_info = sdev->hostdata;
1433 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1434 	    ldev_info->state != MYRB_DEVICE_WO) {
1435 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1436 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1437 		scmd->result = (DID_BAD_TARGET << 16);
1438 		scmd->scsi_done(scmd);
1439 		return 0;
1440 	}
1441 	switch (scmd->cmnd[0]) {
1442 	case TEST_UNIT_READY:
1443 		scmd->result = (DID_OK << 16);
1444 		scmd->scsi_done(scmd);
1445 		return 0;
1446 	case INQUIRY:
1447 		if (scmd->cmnd[1] & 1) {
1448 			/* Illegal request, invalid field in CDB */
1449 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1450 		} else {
1451 			myrb_inquiry(cb, scmd);
1452 			scmd->result = (DID_OK << 16);
1453 		}
1454 		scmd->scsi_done(scmd);
1455 		return 0;
1456 	case SYNCHRONIZE_CACHE:
1457 		scmd->result = (DID_OK << 16);
1458 		scmd->scsi_done(scmd);
1459 		return 0;
1460 	case MODE_SENSE:
1461 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1462 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1463 			/* Illegal request, invalid field in CDB */
1464 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1465 		} else {
1466 			myrb_mode_sense(cb, scmd, ldev_info);
1467 			scmd->result = (DID_OK << 16);
1468 		}
1469 		scmd->scsi_done(scmd);
1470 		return 0;
1471 	case READ_CAPACITY:
1472 		if ((scmd->cmnd[1] & 1) ||
1473 		    (scmd->cmnd[8] & 1)) {
1474 			/* Illegal request, invalid field in CDB */
1475 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1476 			scmd->scsi_done(scmd);
1477 			return 0;
1478 		}
1479 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1480 		if (lba) {
1481 			/* Illegal request, invalid field in CDB */
1482 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1483 			scmd->scsi_done(scmd);
1484 			return 0;
1485 		}
1486 		myrb_read_capacity(cb, scmd, ldev_info);
1487 		scmd->scsi_done(scmd);
1488 		return 0;
1489 	case REQUEST_SENSE:
1490 		myrb_request_sense(cb, scmd);
1491 		scmd->result = (DID_OK << 16);
1492 		return 0;
1493 	case SEND_DIAGNOSTIC:
1494 		if (scmd->cmnd[1] != 0x04) {
1495 			/* Illegal request, invalid field in CDB */
1496 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1497 		} else {
1498 			/* Assume good status */
1499 			scmd->result = (DID_OK << 16);
1500 		}
1501 		scmd->scsi_done(scmd);
1502 		return 0;
1503 	case READ_6:
1504 		if (ldev_info->state == MYRB_DEVICE_WO) {
1505 			/* Data protect, attempt to read invalid data */
1506 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1507 			scmd->scsi_done(scmd);
1508 			return 0;
1509 		}
1510 		fallthrough;
1511 	case WRITE_6:
1512 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1513 		       (scmd->cmnd[2] << 8) |
1514 		       scmd->cmnd[3]);
1515 		block_cnt = scmd->cmnd[4];
1516 		break;
1517 	case READ_10:
1518 		if (ldev_info->state == MYRB_DEVICE_WO) {
1519 			/* Data protect, attempt to read invalid data */
1520 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1521 			scmd->scsi_done(scmd);
1522 			return 0;
1523 		}
1524 		fallthrough;
1525 	case WRITE_10:
1526 	case VERIFY:		/* 0x2F */
1527 	case WRITE_VERIFY:	/* 0x2E */
1528 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1529 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1530 		break;
1531 	case READ_12:
1532 		if (ldev_info->state == MYRB_DEVICE_WO) {
1533 			/* Data protect, attempt to read invalid data */
1534 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1535 			scmd->scsi_done(scmd);
1536 			return 0;
1537 		}
1538 		fallthrough;
1539 	case WRITE_12:
1540 	case VERIFY_12: /* 0xAF */
1541 	case WRITE_VERIFY_12:	/* 0xAE */
1542 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1543 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1544 		break;
1545 	default:
1546 		/* Illegal request, invalid opcode */
1547 		scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1548 		scmd->scsi_done(scmd);
1549 		return 0;
1550 	}
1551 
1552 	myrb_reset_cmd(cmd_blk);
1553 	mbox->type5.id = scmd->request->tag + 3;
1554 	if (scmd->sc_data_direction == DMA_NONE)
1555 		goto submit;
1556 	nsge = scsi_dma_map(scmd);
1557 	if (nsge == 1) {
1558 		sgl = scsi_sglist(scmd);
1559 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1560 			mbox->type5.opcode = MYRB_CMD_READ;
1561 		else
1562 			mbox->type5.opcode = MYRB_CMD_WRITE;
1563 
1564 		mbox->type5.ld.xfer_len = block_cnt;
1565 		mbox->type5.ld.ldev_num = sdev->id;
1566 		mbox->type5.lba = lba;
1567 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1568 	} else {
1569 		struct myrb_sge *hw_sgl;
1570 		dma_addr_t hw_sgl_addr;
1571 		int i;
1572 
1573 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1574 		if (!hw_sgl)
1575 			return SCSI_MLQUEUE_HOST_BUSY;
1576 
1577 		cmd_blk->sgl = hw_sgl;
1578 		cmd_blk->sgl_addr = hw_sgl_addr;
1579 
1580 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1581 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1582 		else
1583 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1584 
1585 		mbox->type5.ld.xfer_len = block_cnt;
1586 		mbox->type5.ld.ldev_num = sdev->id;
1587 		mbox->type5.lba = lba;
1588 		mbox->type5.addr = hw_sgl_addr;
1589 		mbox->type5.sg_count = nsge;
1590 
1591 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1592 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1593 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1594 			hw_sgl++;
1595 		}
1596 	}
1597 submit:
1598 	spin_lock_irqsave(&cb->queue_lock, flags);
1599 	cb->qcmd(cb, cmd_blk);
1600 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1601 
1602 	return 0;
1603 }
1604 
1605 static int myrb_queuecommand(struct Scsi_Host *shost,
1606 		struct scsi_cmnd *scmd)
1607 {
1608 	struct scsi_device *sdev = scmd->device;
1609 
1610 	if (sdev->channel > myrb_logical_channel(shost)) {
1611 		scmd->result = (DID_BAD_TARGET << 16);
1612 		scmd->scsi_done(scmd);
1613 		return 0;
1614 	}
1615 	if (sdev->channel == myrb_logical_channel(shost))
1616 		return myrb_ldev_queuecommand(shost, scmd);
1617 
1618 	return myrb_pthru_queuecommand(shost, scmd);
1619 }
1620 
1621 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1622 {
1623 	struct myrb_hba *cb = shost_priv(sdev->host);
1624 	struct myrb_ldev_info *ldev_info;
1625 	unsigned short ldev_num = sdev->id;
1626 	enum raid_level level;
1627 
1628 	ldev_info = cb->ldev_info_buf + ldev_num;
1629 	if (!ldev_info)
1630 		return -ENXIO;
1631 
1632 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1633 	if (!sdev->hostdata)
1634 		return -ENOMEM;
1635 	dev_dbg(&sdev->sdev_gendev,
1636 		"slave alloc ldev %d state %x\n",
1637 		ldev_num, ldev_info->state);
1638 	memcpy(sdev->hostdata, ldev_info,
1639 	       sizeof(*ldev_info));
1640 	switch (ldev_info->raid_level) {
1641 	case MYRB_RAID_LEVEL0:
1642 		level = RAID_LEVEL_LINEAR;
1643 		break;
1644 	case MYRB_RAID_LEVEL1:
1645 		level = RAID_LEVEL_1;
1646 		break;
1647 	case MYRB_RAID_LEVEL3:
1648 		level = RAID_LEVEL_3;
1649 		break;
1650 	case MYRB_RAID_LEVEL5:
1651 		level = RAID_LEVEL_5;
1652 		break;
1653 	case MYRB_RAID_LEVEL6:
1654 		level = RAID_LEVEL_6;
1655 		break;
1656 	case MYRB_RAID_JBOD:
1657 		level = RAID_LEVEL_JBOD;
1658 		break;
1659 	default:
1660 		level = RAID_LEVEL_UNKNOWN;
1661 		break;
1662 	}
1663 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1664 	return 0;
1665 }
1666 
1667 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1668 {
1669 	struct myrb_hba *cb = shost_priv(sdev->host);
1670 	struct myrb_pdev_state *pdev_info;
1671 	unsigned short status;
1672 
1673 	if (sdev->id > MYRB_MAX_TARGETS)
1674 		return -ENXIO;
1675 
1676 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1677 	if (!pdev_info)
1678 		return -ENOMEM;
1679 
1680 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1681 				  sdev, pdev_info);
1682 	if (status != MYRB_STATUS_SUCCESS) {
1683 		dev_dbg(&sdev->sdev_gendev,
1684 			"Failed to get device state, status %x\n",
1685 			status);
1686 		kfree(pdev_info);
1687 		return -ENXIO;
1688 	}
1689 	if (!pdev_info->present) {
1690 		dev_dbg(&sdev->sdev_gendev,
1691 			"device not present, skip\n");
1692 		kfree(pdev_info);
1693 		return -ENXIO;
1694 	}
1695 	dev_dbg(&sdev->sdev_gendev,
1696 		"slave alloc pdev %d:%d state %x\n",
1697 		sdev->channel, sdev->id, pdev_info->state);
1698 	sdev->hostdata = pdev_info;
1699 
1700 	return 0;
1701 }
1702 
1703 static int myrb_slave_alloc(struct scsi_device *sdev)
1704 {
1705 	if (sdev->channel > myrb_logical_channel(sdev->host))
1706 		return -ENXIO;
1707 
1708 	if (sdev->lun > 0)
1709 		return -ENXIO;
1710 
1711 	if (sdev->channel == myrb_logical_channel(sdev->host))
1712 		return myrb_ldev_slave_alloc(sdev);
1713 
1714 	return myrb_pdev_slave_alloc(sdev);
1715 }
1716 
1717 static int myrb_slave_configure(struct scsi_device *sdev)
1718 {
1719 	struct myrb_ldev_info *ldev_info;
1720 
1721 	if (sdev->channel > myrb_logical_channel(sdev->host))
1722 		return -ENXIO;
1723 
1724 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1725 		sdev->no_uld_attach = 1;
1726 		return 0;
1727 	}
1728 	if (sdev->lun != 0)
1729 		return -ENXIO;
1730 
1731 	ldev_info = sdev->hostdata;
1732 	if (!ldev_info)
1733 		return -ENXIO;
1734 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1735 		sdev_printk(KERN_INFO, sdev,
1736 			    "Logical drive is %s\n",
1737 			    myrb_devstate_name(ldev_info->state));
1738 
1739 	sdev->tagged_supported = 1;
1740 	return 0;
1741 }
1742 
1743 static void myrb_slave_destroy(struct scsi_device *sdev)
1744 {
1745 	kfree(sdev->hostdata);
1746 }
1747 
1748 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1749 		sector_t capacity, int geom[])
1750 {
1751 	struct myrb_hba *cb = shost_priv(sdev->host);
1752 
1753 	geom[0] = cb->ldev_geom_heads;
1754 	geom[1] = cb->ldev_geom_sectors;
1755 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1756 
1757 	return 0;
1758 }
1759 
1760 static ssize_t raid_state_show(struct device *dev,
1761 		struct device_attribute *attr, char *buf)
1762 {
1763 	struct scsi_device *sdev = to_scsi_device(dev);
1764 	struct myrb_hba *cb = shost_priv(sdev->host);
1765 	int ret;
1766 
1767 	if (!sdev->hostdata)
1768 		return snprintf(buf, 16, "Unknown\n");
1769 
1770 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1771 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1772 		const char *name;
1773 
1774 		name = myrb_devstate_name(ldev_info->state);
1775 		if (name)
1776 			ret = snprintf(buf, 32, "%s\n", name);
1777 		else
1778 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1779 				       ldev_info->state);
1780 	} else {
1781 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1782 		unsigned short status;
1783 		const char *name;
1784 
1785 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1786 					  sdev, pdev_info);
1787 		if (status != MYRB_STATUS_SUCCESS)
1788 			sdev_printk(KERN_INFO, sdev,
1789 				    "Failed to get device state, status %x\n",
1790 				    status);
1791 
1792 		if (!pdev_info->present)
1793 			name = "Removed";
1794 		else
1795 			name = myrb_devstate_name(pdev_info->state);
1796 		if (name)
1797 			ret = snprintf(buf, 32, "%s\n", name);
1798 		else
1799 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1800 				       pdev_info->state);
1801 	}
1802 	return ret;
1803 }
1804 
1805 static ssize_t raid_state_store(struct device *dev,
1806 		struct device_attribute *attr, const char *buf, size_t count)
1807 {
1808 	struct scsi_device *sdev = to_scsi_device(dev);
1809 	struct myrb_hba *cb = shost_priv(sdev->host);
1810 	struct myrb_pdev_state *pdev_info;
1811 	enum myrb_devstate new_state;
1812 	unsigned short status;
1813 
1814 	if (!strncmp(buf, "kill", 4) ||
1815 	    !strncmp(buf, "offline", 7))
1816 		new_state = MYRB_DEVICE_DEAD;
1817 	else if (!strncmp(buf, "online", 6))
1818 		new_state = MYRB_DEVICE_ONLINE;
1819 	else if (!strncmp(buf, "standby", 7))
1820 		new_state = MYRB_DEVICE_STANDBY;
1821 	else
1822 		return -EINVAL;
1823 
1824 	pdev_info = sdev->hostdata;
1825 	if (!pdev_info) {
1826 		sdev_printk(KERN_INFO, sdev,
1827 			    "Failed - no physical device information\n");
1828 		return -ENXIO;
1829 	}
1830 	if (!pdev_info->present) {
1831 		sdev_printk(KERN_INFO, sdev,
1832 			    "Failed - device not present\n");
1833 		return -ENXIO;
1834 	}
1835 
1836 	if (pdev_info->state == new_state)
1837 		return count;
1838 
1839 	status = myrb_set_pdev_state(cb, sdev, new_state);
1840 	switch (status) {
1841 	case MYRB_STATUS_SUCCESS:
1842 		break;
1843 	case MYRB_STATUS_START_DEVICE_FAILED:
1844 		sdev_printk(KERN_INFO, sdev,
1845 			     "Failed - Unable to Start Device\n");
1846 		count = -EAGAIN;
1847 		break;
1848 	case MYRB_STATUS_NO_DEVICE:
1849 		sdev_printk(KERN_INFO, sdev,
1850 			    "Failed - No Device at Address\n");
1851 		count = -ENODEV;
1852 		break;
1853 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1854 		sdev_printk(KERN_INFO, sdev,
1855 			 "Failed - Invalid Channel or Target or Modifier\n");
1856 		count = -EINVAL;
1857 		break;
1858 	case MYRB_STATUS_CHANNEL_BUSY:
1859 		sdev_printk(KERN_INFO, sdev,
1860 			 "Failed - Channel Busy\n");
1861 		count = -EBUSY;
1862 		break;
1863 	default:
1864 		sdev_printk(KERN_INFO, sdev,
1865 			 "Failed - Unexpected Status %04X\n", status);
1866 		count = -EIO;
1867 		break;
1868 	}
1869 	return count;
1870 }
1871 static DEVICE_ATTR_RW(raid_state);
1872 
1873 static ssize_t raid_level_show(struct device *dev,
1874 		struct device_attribute *attr, char *buf)
1875 {
1876 	struct scsi_device *sdev = to_scsi_device(dev);
1877 
1878 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1879 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1880 		const char *name;
1881 
1882 		if (!ldev_info)
1883 			return -ENXIO;
1884 
1885 		name = myrb_raidlevel_name(ldev_info->raid_level);
1886 		if (!name)
1887 			return snprintf(buf, 32, "Invalid (%02X)\n",
1888 					ldev_info->state);
1889 		return snprintf(buf, 32, "%s\n", name);
1890 	}
1891 	return snprintf(buf, 32, "Physical Drive\n");
1892 }
1893 static DEVICE_ATTR_RO(raid_level);
1894 
1895 static ssize_t rebuild_show(struct device *dev,
1896 		struct device_attribute *attr, char *buf)
1897 {
1898 	struct scsi_device *sdev = to_scsi_device(dev);
1899 	struct myrb_hba *cb = shost_priv(sdev->host);
1900 	struct myrb_rbld_progress rbld_buf;
1901 	unsigned char status;
1902 
1903 	if (sdev->channel < myrb_logical_channel(sdev->host))
1904 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1905 
1906 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1907 
1908 	if (rbld_buf.ldev_num != sdev->id ||
1909 	    status != MYRB_STATUS_SUCCESS)
1910 		return snprintf(buf, 32, "not rebuilding\n");
1911 
1912 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1913 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1914 			rbld_buf.ldev_size);
1915 }
1916 
1917 static ssize_t rebuild_store(struct device *dev,
1918 		struct device_attribute *attr, const char *buf, size_t count)
1919 {
1920 	struct scsi_device *sdev = to_scsi_device(dev);
1921 	struct myrb_hba *cb = shost_priv(sdev->host);
1922 	struct myrb_cmdblk *cmd_blk;
1923 	union myrb_cmd_mbox *mbox;
1924 	unsigned short status;
1925 	int rc, start;
1926 	const char *msg;
1927 
1928 	rc = kstrtoint(buf, 0, &start);
1929 	if (rc)
1930 		return rc;
1931 
1932 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1933 		return -ENXIO;
1934 
1935 	status = myrb_get_rbld_progress(cb, NULL);
1936 	if (start) {
1937 		if (status == MYRB_STATUS_SUCCESS) {
1938 			sdev_printk(KERN_INFO, sdev,
1939 				    "Rebuild Not Initiated; already in progress\n");
1940 			return -EALREADY;
1941 		}
1942 		mutex_lock(&cb->dcmd_mutex);
1943 		cmd_blk = &cb->dcmd_blk;
1944 		myrb_reset_cmd(cmd_blk);
1945 		mbox = &cmd_blk->mbox;
1946 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1947 		mbox->type3D.id = MYRB_DCMD_TAG;
1948 		mbox->type3D.channel = sdev->channel;
1949 		mbox->type3D.target = sdev->id;
1950 		status = myrb_exec_cmd(cb, cmd_blk);
1951 		mutex_unlock(&cb->dcmd_mutex);
1952 	} else {
1953 		struct pci_dev *pdev = cb->pdev;
1954 		unsigned char *rate;
1955 		dma_addr_t rate_addr;
1956 
1957 		if (status != MYRB_STATUS_SUCCESS) {
1958 			sdev_printk(KERN_INFO, sdev,
1959 				    "Rebuild Not Cancelled; not in progress\n");
1960 			return 0;
1961 		}
1962 
1963 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1964 					  &rate_addr, GFP_KERNEL);
1965 		if (rate == NULL) {
1966 			sdev_printk(KERN_INFO, sdev,
1967 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1968 			return -ENOMEM;
1969 		}
1970 		mutex_lock(&cb->dcmd_mutex);
1971 		cmd_blk = &cb->dcmd_blk;
1972 		myrb_reset_cmd(cmd_blk);
1973 		mbox = &cmd_blk->mbox;
1974 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1975 		mbox->type3R.id = MYRB_DCMD_TAG;
1976 		mbox->type3R.rbld_rate = 0xFF;
1977 		mbox->type3R.addr = rate_addr;
1978 		status = myrb_exec_cmd(cb, cmd_blk);
1979 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1980 		mutex_unlock(&cb->dcmd_mutex);
1981 	}
1982 	if (status == MYRB_STATUS_SUCCESS) {
1983 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1984 			    start ? "Initiated" : "Cancelled");
1985 		return count;
1986 	}
1987 	if (!start) {
1988 		sdev_printk(KERN_INFO, sdev,
1989 			    "Rebuild Not Cancelled, status 0x%x\n",
1990 			    status);
1991 		return -EIO;
1992 	}
1993 
1994 	switch (status) {
1995 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1996 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
1997 		break;
1998 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
1999 		msg = "New Disk Failed During Rebuild";
2000 		break;
2001 	case MYRB_STATUS_INVALID_ADDRESS:
2002 		msg = "Invalid Device Address";
2003 		break;
2004 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2005 		msg = "Already in Progress";
2006 		break;
2007 	default:
2008 		msg = NULL;
2009 		break;
2010 	}
2011 	if (msg)
2012 		sdev_printk(KERN_INFO, sdev,
2013 			    "Rebuild Failed - %s\n", msg);
2014 	else
2015 		sdev_printk(KERN_INFO, sdev,
2016 			    "Rebuild Failed, status 0x%x\n", status);
2017 
2018 	return -EIO;
2019 }
2020 static DEVICE_ATTR_RW(rebuild);
2021 
2022 static ssize_t consistency_check_store(struct device *dev,
2023 		struct device_attribute *attr, const char *buf, size_t count)
2024 {
2025 	struct scsi_device *sdev = to_scsi_device(dev);
2026 	struct myrb_hba *cb = shost_priv(sdev->host);
2027 	struct myrb_rbld_progress rbld_buf;
2028 	struct myrb_cmdblk *cmd_blk;
2029 	union myrb_cmd_mbox *mbox;
2030 	unsigned short ldev_num = 0xFFFF;
2031 	unsigned short status;
2032 	int rc, start;
2033 	const char *msg;
2034 
2035 	rc = kstrtoint(buf, 0, &start);
2036 	if (rc)
2037 		return rc;
2038 
2039 	if (sdev->channel < myrb_logical_channel(sdev->host))
2040 		return -ENXIO;
2041 
2042 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2043 	if (start) {
2044 		if (status == MYRB_STATUS_SUCCESS) {
2045 			sdev_printk(KERN_INFO, sdev,
2046 				    "Check Consistency Not Initiated; already in progress\n");
2047 			return -EALREADY;
2048 		}
2049 		mutex_lock(&cb->dcmd_mutex);
2050 		cmd_blk = &cb->dcmd_blk;
2051 		myrb_reset_cmd(cmd_blk);
2052 		mbox = &cmd_blk->mbox;
2053 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2054 		mbox->type3C.id = MYRB_DCMD_TAG;
2055 		mbox->type3C.ldev_num = sdev->id;
2056 		mbox->type3C.auto_restore = true;
2057 
2058 		status = myrb_exec_cmd(cb, cmd_blk);
2059 		mutex_unlock(&cb->dcmd_mutex);
2060 	} else {
2061 		struct pci_dev *pdev = cb->pdev;
2062 		unsigned char *rate;
2063 		dma_addr_t rate_addr;
2064 
2065 		if (ldev_num != sdev->id) {
2066 			sdev_printk(KERN_INFO, sdev,
2067 				    "Check Consistency Not Cancelled; not in progress\n");
2068 			return 0;
2069 		}
2070 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2071 					  &rate_addr, GFP_KERNEL);
2072 		if (rate == NULL) {
2073 			sdev_printk(KERN_INFO, sdev,
2074 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2075 			return -ENOMEM;
2076 		}
2077 		mutex_lock(&cb->dcmd_mutex);
2078 		cmd_blk = &cb->dcmd_blk;
2079 		myrb_reset_cmd(cmd_blk);
2080 		mbox = &cmd_blk->mbox;
2081 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2082 		mbox->type3R.id = MYRB_DCMD_TAG;
2083 		mbox->type3R.rbld_rate = 0xFF;
2084 		mbox->type3R.addr = rate_addr;
2085 		status = myrb_exec_cmd(cb, cmd_blk);
2086 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2087 		mutex_unlock(&cb->dcmd_mutex);
2088 	}
2089 	if (status == MYRB_STATUS_SUCCESS) {
2090 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2091 			    start ? "Initiated" : "Cancelled");
2092 		return count;
2093 	}
2094 	if (!start) {
2095 		sdev_printk(KERN_INFO, sdev,
2096 			    "Check Consistency Not Cancelled, status 0x%x\n",
2097 			    status);
2098 		return -EIO;
2099 	}
2100 
2101 	switch (status) {
2102 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2103 		msg = "Dependent Physical Device is DEAD";
2104 		break;
2105 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2106 		msg = "New Disk Failed During Rebuild";
2107 		break;
2108 	case MYRB_STATUS_INVALID_ADDRESS:
2109 		msg = "Invalid or Nonredundant Logical Drive";
2110 		break;
2111 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2112 		msg = "Already in Progress";
2113 		break;
2114 	default:
2115 		msg = NULL;
2116 		break;
2117 	}
2118 	if (msg)
2119 		sdev_printk(KERN_INFO, sdev,
2120 			    "Check Consistency Failed - %s\n", msg);
2121 	else
2122 		sdev_printk(KERN_INFO, sdev,
2123 			    "Check Consistency Failed, status 0x%x\n", status);
2124 
2125 	return -EIO;
2126 }
2127 
2128 static ssize_t consistency_check_show(struct device *dev,
2129 		struct device_attribute *attr, char *buf)
2130 {
2131 	return rebuild_show(dev, attr, buf);
2132 }
2133 static DEVICE_ATTR_RW(consistency_check);
2134 
2135 static ssize_t ctlr_num_show(struct device *dev,
2136 		struct device_attribute *attr, char *buf)
2137 {
2138 	struct Scsi_Host *shost = class_to_shost(dev);
2139 	struct myrb_hba *cb = shost_priv(shost);
2140 
2141 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2142 }
2143 static DEVICE_ATTR_RO(ctlr_num);
2144 
2145 static ssize_t firmware_show(struct device *dev,
2146 		struct device_attribute *attr, char *buf)
2147 {
2148 	struct Scsi_Host *shost = class_to_shost(dev);
2149 	struct myrb_hba *cb = shost_priv(shost);
2150 
2151 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2152 }
2153 static DEVICE_ATTR_RO(firmware);
2154 
2155 static ssize_t model_show(struct device *dev,
2156 		struct device_attribute *attr, char *buf)
2157 {
2158 	struct Scsi_Host *shost = class_to_shost(dev);
2159 	struct myrb_hba *cb = shost_priv(shost);
2160 
2161 	return snprintf(buf, 16, "%s\n", cb->model_name);
2162 }
2163 static DEVICE_ATTR_RO(model);
2164 
2165 static ssize_t flush_cache_store(struct device *dev,
2166 		struct device_attribute *attr, const char *buf, size_t count)
2167 {
2168 	struct Scsi_Host *shost = class_to_shost(dev);
2169 	struct myrb_hba *cb = shost_priv(shost);
2170 	unsigned short status;
2171 
2172 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2173 	if (status == MYRB_STATUS_SUCCESS) {
2174 		shost_printk(KERN_INFO, shost,
2175 			     "Cache Flush Completed\n");
2176 		return count;
2177 	}
2178 	shost_printk(KERN_INFO, shost,
2179 		     "Cache Flush Failed, status %x\n", status);
2180 	return -EIO;
2181 }
2182 static DEVICE_ATTR_WO(flush_cache);
2183 
2184 static struct device_attribute *myrb_sdev_attrs[] = {
2185 	&dev_attr_rebuild,
2186 	&dev_attr_consistency_check,
2187 	&dev_attr_raid_state,
2188 	&dev_attr_raid_level,
2189 	NULL,
2190 };
2191 
2192 static struct device_attribute *myrb_shost_attrs[] = {
2193 	&dev_attr_ctlr_num,
2194 	&dev_attr_model,
2195 	&dev_attr_firmware,
2196 	&dev_attr_flush_cache,
2197 	NULL,
2198 };
2199 
2200 static struct scsi_host_template myrb_template = {
2201 	.module			= THIS_MODULE,
2202 	.name			= "DAC960",
2203 	.proc_name		= "myrb",
2204 	.queuecommand		= myrb_queuecommand,
2205 	.eh_host_reset_handler	= myrb_host_reset,
2206 	.slave_alloc		= myrb_slave_alloc,
2207 	.slave_configure	= myrb_slave_configure,
2208 	.slave_destroy		= myrb_slave_destroy,
2209 	.bios_param		= myrb_biosparam,
2210 	.cmd_size		= sizeof(struct myrb_cmdblk),
2211 	.shost_attrs		= myrb_shost_attrs,
2212 	.sdev_attrs		= myrb_sdev_attrs,
2213 	.this_id		= -1,
2214 };
2215 
2216 /**
2217  * myrb_is_raid - return boolean indicating device is raid volume
2218  * @dev: the device struct object
2219  */
2220 static int myrb_is_raid(struct device *dev)
2221 {
2222 	struct scsi_device *sdev = to_scsi_device(dev);
2223 
2224 	return sdev->channel == myrb_logical_channel(sdev->host);
2225 }
2226 
2227 /**
2228  * myrb_get_resync - get raid volume resync percent complete
2229  * @dev: the device struct object
2230  */
2231 static void myrb_get_resync(struct device *dev)
2232 {
2233 	struct scsi_device *sdev = to_scsi_device(dev);
2234 	struct myrb_hba *cb = shost_priv(sdev->host);
2235 	struct myrb_rbld_progress rbld_buf;
2236 	unsigned int percent_complete = 0;
2237 	unsigned short status;
2238 	unsigned int ldev_size = 0, remaining = 0;
2239 
2240 	if (sdev->channel < myrb_logical_channel(sdev->host))
2241 		return;
2242 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2243 	if (status == MYRB_STATUS_SUCCESS) {
2244 		if (rbld_buf.ldev_num == sdev->id) {
2245 			ldev_size = rbld_buf.ldev_size;
2246 			remaining = rbld_buf.blocks_left;
2247 		}
2248 	}
2249 	if (remaining && ldev_size)
2250 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2251 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2252 }
2253 
2254 /**
2255  * myrb_get_state - get raid volume status
2256  * @dev: the device struct object
2257  */
2258 static void myrb_get_state(struct device *dev)
2259 {
2260 	struct scsi_device *sdev = to_scsi_device(dev);
2261 	struct myrb_hba *cb = shost_priv(sdev->host);
2262 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2263 	enum raid_state state = RAID_STATE_UNKNOWN;
2264 	unsigned short status;
2265 
2266 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2267 		state = RAID_STATE_UNKNOWN;
2268 	else {
2269 		status = myrb_get_rbld_progress(cb, NULL);
2270 		if (status == MYRB_STATUS_SUCCESS)
2271 			state = RAID_STATE_RESYNCING;
2272 		else {
2273 			switch (ldev_info->state) {
2274 			case MYRB_DEVICE_ONLINE:
2275 				state = RAID_STATE_ACTIVE;
2276 				break;
2277 			case MYRB_DEVICE_WO:
2278 			case MYRB_DEVICE_CRITICAL:
2279 				state = RAID_STATE_DEGRADED;
2280 				break;
2281 			default:
2282 				state = RAID_STATE_OFFLINE;
2283 			}
2284 		}
2285 	}
2286 	raid_set_state(myrb_raid_template, dev, state);
2287 }
2288 
2289 static struct raid_function_template myrb_raid_functions = {
2290 	.cookie		= &myrb_template,
2291 	.is_raid	= myrb_is_raid,
2292 	.get_resync	= myrb_get_resync,
2293 	.get_state	= myrb_get_state,
2294 };
2295 
2296 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2297 		struct scsi_cmnd *scmd)
2298 {
2299 	unsigned short status;
2300 
2301 	if (!cmd_blk)
2302 		return;
2303 
2304 	scsi_dma_unmap(scmd);
2305 
2306 	if (cmd_blk->dcdb) {
2307 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2308 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2309 			      cmd_blk->dcdb_addr);
2310 		cmd_blk->dcdb = NULL;
2311 	}
2312 	if (cmd_blk->sgl) {
2313 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2314 		cmd_blk->sgl = NULL;
2315 		cmd_blk->sgl_addr = 0;
2316 	}
2317 	status = cmd_blk->status;
2318 	switch (status) {
2319 	case MYRB_STATUS_SUCCESS:
2320 	case MYRB_STATUS_DEVICE_BUSY:
2321 		scmd->result = (DID_OK << 16) | status;
2322 		break;
2323 	case MYRB_STATUS_BAD_DATA:
2324 		dev_dbg(&scmd->device->sdev_gendev,
2325 			"Bad Data Encountered\n");
2326 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2327 			/* Unrecovered read error */
2328 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2329 		else
2330 			/* Write error */
2331 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2332 		break;
2333 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2334 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2335 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2336 			/* Unrecovered read error, auto-reallocation failed */
2337 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2338 		else
2339 			/* Write error, auto-reallocation failed */
2340 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2341 		break;
2342 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2343 		dev_dbg(&scmd->device->sdev_gendev,
2344 			    "Logical Drive Nonexistent or Offline");
2345 		scmd->result = (DID_BAD_TARGET << 16);
2346 		break;
2347 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2348 		dev_dbg(&scmd->device->sdev_gendev,
2349 			    "Attempt to Access Beyond End of Logical Drive");
2350 		/* Logical block address out of range */
2351 		scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2352 		break;
2353 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2354 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2355 		scmd->result = (DID_BAD_TARGET << 16);
2356 		break;
2357 	default:
2358 		scmd_printk(KERN_ERR, scmd,
2359 			    "Unexpected Error Status %04X", status);
2360 		scmd->result = (DID_ERROR << 16);
2361 		break;
2362 	}
2363 	scmd->scsi_done(scmd);
2364 }
2365 
2366 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2367 {
2368 	if (!cmd_blk)
2369 		return;
2370 
2371 	if (cmd_blk->completion) {
2372 		complete(cmd_blk->completion);
2373 		cmd_blk->completion = NULL;
2374 	}
2375 }
2376 
2377 static void myrb_monitor(struct work_struct *work)
2378 {
2379 	struct myrb_hba *cb = container_of(work,
2380 			struct myrb_hba, monitor_work.work);
2381 	struct Scsi_Host *shost = cb->host;
2382 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2383 
2384 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2385 
2386 	if (cb->new_ev_seq > cb->old_ev_seq) {
2387 		int event = cb->old_ev_seq;
2388 
2389 		dev_dbg(&shost->shost_gendev,
2390 			"get event log no %d/%d\n",
2391 			cb->new_ev_seq, event);
2392 		myrb_get_event(cb, event);
2393 		cb->old_ev_seq = event + 1;
2394 		interval = 10;
2395 	} else if (cb->need_err_info) {
2396 		cb->need_err_info = false;
2397 		dev_dbg(&shost->shost_gendev, "get error table\n");
2398 		myrb_get_errtable(cb);
2399 		interval = 10;
2400 	} else if (cb->need_rbld && cb->rbld_first) {
2401 		cb->need_rbld = false;
2402 		dev_dbg(&shost->shost_gendev,
2403 			"get rebuild progress\n");
2404 		myrb_update_rbld_progress(cb);
2405 		interval = 10;
2406 	} else if (cb->need_ldev_info) {
2407 		cb->need_ldev_info = false;
2408 		dev_dbg(&shost->shost_gendev,
2409 			"get logical drive info\n");
2410 		myrb_get_ldev_info(cb);
2411 		interval = 10;
2412 	} else if (cb->need_rbld) {
2413 		cb->need_rbld = false;
2414 		dev_dbg(&shost->shost_gendev,
2415 			"get rebuild progress\n");
2416 		myrb_update_rbld_progress(cb);
2417 		interval = 10;
2418 	} else if (cb->need_cc_status) {
2419 		cb->need_cc_status = false;
2420 		dev_dbg(&shost->shost_gendev,
2421 			"get consistency check progress\n");
2422 		myrb_get_cc_progress(cb);
2423 		interval = 10;
2424 	} else if (cb->need_bgi_status) {
2425 		cb->need_bgi_status = false;
2426 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2427 		myrb_bgi_control(cb);
2428 		interval = 10;
2429 	} else {
2430 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2431 		mutex_lock(&cb->dma_mutex);
2432 		myrb_hba_enquiry(cb);
2433 		mutex_unlock(&cb->dma_mutex);
2434 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2435 		    cb->need_err_info || cb->need_rbld ||
2436 		    cb->need_ldev_info || cb->need_cc_status ||
2437 		    cb->need_bgi_status) {
2438 			dev_dbg(&shost->shost_gendev,
2439 				"reschedule monitor\n");
2440 			interval = 0;
2441 		}
2442 	}
2443 	if (interval > 1)
2444 		cb->primary_monitor_time = jiffies;
2445 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2446 }
2447 
2448 /*
2449  * myrb_err_status - reports controller BIOS messages
2450  *
2451  * Controller BIOS messages are passed through the Error Status Register
2452  * when the driver performs the BIOS handshaking.
2453  *
2454  * Return: true for fatal errors and false otherwise.
2455  */
2456 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2457 		unsigned char parm0, unsigned char parm1)
2458 {
2459 	struct pci_dev *pdev = cb->pdev;
2460 
2461 	switch (error) {
2462 	case 0x00:
2463 		dev_info(&pdev->dev,
2464 			 "Physical Device %d:%d Not Responding\n",
2465 			 parm1, parm0);
2466 		break;
2467 	case 0x08:
2468 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2469 		break;
2470 	case 0x30:
2471 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2472 		break;
2473 	case 0x60:
2474 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2475 		break;
2476 	case 0x70:
2477 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2478 		break;
2479 	case 0x90:
2480 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2481 			   parm1, parm0);
2482 		break;
2483 	case 0xA0:
2484 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2485 		break;
2486 	case 0xB0:
2487 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2488 		break;
2489 	case 0xD0:
2490 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2491 		break;
2492 	case 0xF0:
2493 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2494 		return true;
2495 	default:
2496 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2497 			error);
2498 		return true;
2499 	}
2500 	return false;
2501 }
2502 
2503 /*
2504  * Hardware-specific functions
2505  */
2506 
2507 /*
2508  * DAC960 LA Series Controllers
2509  */
2510 
2511 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2512 {
2513 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2514 }
2515 
2516 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2517 {
2518 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2519 }
2520 
2521 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2522 {
2523 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2524 }
2525 
2526 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2527 {
2528 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2529 }
2530 
2531 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2532 {
2533 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2534 
2535 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2536 }
2537 
2538 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2539 {
2540 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2541 
2542 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2543 }
2544 
2545 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2546 {
2547 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2548 }
2549 
2550 static inline void DAC960_LA_ack_intr(void __iomem *base)
2551 {
2552 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2553 	       base + DAC960_LA_ODB_OFFSET);
2554 }
2555 
2556 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2557 {
2558 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2559 
2560 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2561 }
2562 
2563 static inline void DAC960_LA_enable_intr(void __iomem *base)
2564 {
2565 	unsigned char odb = 0xFF;
2566 
2567 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2568 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2569 }
2570 
2571 static inline void DAC960_LA_disable_intr(void __iomem *base)
2572 {
2573 	unsigned char odb = 0xFF;
2574 
2575 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2576 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2577 }
2578 
2579 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2580 		union myrb_cmd_mbox *mbox)
2581 {
2582 	mem_mbox->words[1] = mbox->words[1];
2583 	mem_mbox->words[2] = mbox->words[2];
2584 	mem_mbox->words[3] = mbox->words[3];
2585 	/* Memory barrier to prevent reordering */
2586 	wmb();
2587 	mem_mbox->words[0] = mbox->words[0];
2588 	/* Memory barrier to force PCI access */
2589 	mb();
2590 }
2591 
2592 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2593 		union myrb_cmd_mbox *mbox)
2594 {
2595 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2596 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2597 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2598 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2599 }
2600 
2601 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2602 {
2603 	return readw(base + DAC960_LA_STS_OFFSET);
2604 }
2605 
2606 static inline bool
2607 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2608 		unsigned char *param0, unsigned char *param1)
2609 {
2610 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2611 
2612 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2613 		return false;
2614 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2615 
2616 	*error = errsts;
2617 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2618 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2619 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2620 	return true;
2621 }
2622 
2623 static inline unsigned short
2624 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2625 		union myrb_cmd_mbox *mbox)
2626 {
2627 	unsigned short status;
2628 	int timeout = 0;
2629 
2630 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2631 		if (!DAC960_LA_hw_mbox_is_full(base))
2632 			break;
2633 		udelay(10);
2634 		timeout++;
2635 	}
2636 	if (DAC960_LA_hw_mbox_is_full(base)) {
2637 		dev_err(&pdev->dev,
2638 			"Timeout waiting for empty mailbox\n");
2639 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2640 	}
2641 	DAC960_LA_write_hw_mbox(base, mbox);
2642 	DAC960_LA_hw_mbox_new_cmd(base);
2643 	timeout = 0;
2644 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2645 		if (DAC960_LA_hw_mbox_status_available(base))
2646 			break;
2647 		udelay(10);
2648 		timeout++;
2649 	}
2650 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2651 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2652 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2653 	}
2654 	status = DAC960_LA_read_status(base);
2655 	DAC960_LA_ack_hw_mbox_intr(base);
2656 	DAC960_LA_ack_hw_mbox_status(base);
2657 
2658 	return status;
2659 }
2660 
2661 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2662 		struct myrb_hba *cb, void __iomem *base)
2663 {
2664 	int timeout = 0;
2665 	unsigned char error, parm0, parm1;
2666 
2667 	DAC960_LA_disable_intr(base);
2668 	DAC960_LA_ack_hw_mbox_status(base);
2669 	udelay(1000);
2670 	while (DAC960_LA_init_in_progress(base) &&
2671 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2672 		if (DAC960_LA_read_error_status(base, &error,
2673 					      &parm0, &parm1) &&
2674 		    myrb_err_status(cb, error, parm0, parm1))
2675 			return -ENODEV;
2676 		udelay(10);
2677 		timeout++;
2678 	}
2679 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2680 		dev_err(&pdev->dev,
2681 			"Timeout waiting for Controller Initialisation\n");
2682 		return -ETIMEDOUT;
2683 	}
2684 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2685 		dev_err(&pdev->dev,
2686 			"Unable to Enable Memory Mailbox Interface\n");
2687 		DAC960_LA_reset_ctrl(base);
2688 		return -ENODEV;
2689 	}
2690 	DAC960_LA_enable_intr(base);
2691 	cb->qcmd = myrb_qcmd;
2692 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2693 	if (cb->dual_mode_interface)
2694 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2695 	else
2696 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2697 	cb->disable_intr = DAC960_LA_disable_intr;
2698 	cb->reset = DAC960_LA_reset_ctrl;
2699 
2700 	return 0;
2701 }
2702 
2703 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2704 {
2705 	struct myrb_hba *cb = arg;
2706 	void __iomem *base = cb->io_base;
2707 	struct myrb_stat_mbox *next_stat_mbox;
2708 	unsigned long flags;
2709 
2710 	spin_lock_irqsave(&cb->queue_lock, flags);
2711 	DAC960_LA_ack_intr(base);
2712 	next_stat_mbox = cb->next_stat_mbox;
2713 	while (next_stat_mbox->valid) {
2714 		unsigned char id = next_stat_mbox->id;
2715 		struct scsi_cmnd *scmd = NULL;
2716 		struct myrb_cmdblk *cmd_blk = NULL;
2717 
2718 		if (id == MYRB_DCMD_TAG)
2719 			cmd_blk = &cb->dcmd_blk;
2720 		else if (id == MYRB_MCMD_TAG)
2721 			cmd_blk = &cb->mcmd_blk;
2722 		else {
2723 			scmd = scsi_host_find_tag(cb->host, id - 3);
2724 			if (scmd)
2725 				cmd_blk = scsi_cmd_priv(scmd);
2726 		}
2727 		if (cmd_blk)
2728 			cmd_blk->status = next_stat_mbox->status;
2729 		else
2730 			dev_err(&cb->pdev->dev,
2731 				"Unhandled command completion %d\n", id);
2732 
2733 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2734 		if (++next_stat_mbox > cb->last_stat_mbox)
2735 			next_stat_mbox = cb->first_stat_mbox;
2736 
2737 		if (cmd_blk) {
2738 			if (id < 3)
2739 				myrb_handle_cmdblk(cb, cmd_blk);
2740 			else
2741 				myrb_handle_scsi(cb, cmd_blk, scmd);
2742 		}
2743 	}
2744 	cb->next_stat_mbox = next_stat_mbox;
2745 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2746 	return IRQ_HANDLED;
2747 }
2748 
2749 static struct myrb_privdata DAC960_LA_privdata = {
2750 	.hw_init =	DAC960_LA_hw_init,
2751 	.irq_handler =	DAC960_LA_intr_handler,
2752 	.mmio_size =	DAC960_LA_mmio_size,
2753 };
2754 
2755 /*
2756  * DAC960 PG Series Controllers
2757  */
2758 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2759 {
2760 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2761 }
2762 
2763 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2764 {
2765 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2766 }
2767 
2768 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2769 {
2770 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2771 }
2772 
2773 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2774 {
2775 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2776 }
2777 
2778 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2779 {
2780 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2781 
2782 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2783 }
2784 
2785 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2786 {
2787 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2788 
2789 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2790 }
2791 
2792 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2793 {
2794 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2795 }
2796 
2797 static inline void DAC960_PG_ack_intr(void __iomem *base)
2798 {
2799 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2800 	       base + DAC960_PG_ODB_OFFSET);
2801 }
2802 
2803 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2804 {
2805 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2806 
2807 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2808 }
2809 
2810 static inline void DAC960_PG_enable_intr(void __iomem *base)
2811 {
2812 	unsigned int imask = (unsigned int)-1;
2813 
2814 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2815 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2816 }
2817 
2818 static inline void DAC960_PG_disable_intr(void __iomem *base)
2819 {
2820 	unsigned int imask = (unsigned int)-1;
2821 
2822 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2823 }
2824 
2825 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2826 		union myrb_cmd_mbox *mbox)
2827 {
2828 	mem_mbox->words[1] = mbox->words[1];
2829 	mem_mbox->words[2] = mbox->words[2];
2830 	mem_mbox->words[3] = mbox->words[3];
2831 	/* Memory barrier to prevent reordering */
2832 	wmb();
2833 	mem_mbox->words[0] = mbox->words[0];
2834 	/* Memory barrier to force PCI access */
2835 	mb();
2836 }
2837 
2838 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2839 		union myrb_cmd_mbox *mbox)
2840 {
2841 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2842 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2843 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2844 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2845 }
2846 
2847 static inline unsigned short
2848 DAC960_PG_read_status(void __iomem *base)
2849 {
2850 	return readw(base + DAC960_PG_STS_OFFSET);
2851 }
2852 
2853 static inline bool
2854 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2855 		unsigned char *param0, unsigned char *param1)
2856 {
2857 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2858 
2859 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2860 		return false;
2861 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2862 	*error = errsts;
2863 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2864 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2865 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2866 	return true;
2867 }
2868 
2869 static inline unsigned short
2870 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2871 		union myrb_cmd_mbox *mbox)
2872 {
2873 	unsigned short status;
2874 	int timeout = 0;
2875 
2876 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2877 		if (!DAC960_PG_hw_mbox_is_full(base))
2878 			break;
2879 		udelay(10);
2880 		timeout++;
2881 	}
2882 	if (DAC960_PG_hw_mbox_is_full(base)) {
2883 		dev_err(&pdev->dev,
2884 			"Timeout waiting for empty mailbox\n");
2885 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2886 	}
2887 	DAC960_PG_write_hw_mbox(base, mbox);
2888 	DAC960_PG_hw_mbox_new_cmd(base);
2889 
2890 	timeout = 0;
2891 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2892 		if (DAC960_PG_hw_mbox_status_available(base))
2893 			break;
2894 		udelay(10);
2895 		timeout++;
2896 	}
2897 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2898 		dev_err(&pdev->dev,
2899 			"Timeout waiting for mailbox status\n");
2900 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2901 	}
2902 	status = DAC960_PG_read_status(base);
2903 	DAC960_PG_ack_hw_mbox_intr(base);
2904 	DAC960_PG_ack_hw_mbox_status(base);
2905 
2906 	return status;
2907 }
2908 
2909 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2910 		struct myrb_hba *cb, void __iomem *base)
2911 {
2912 	int timeout = 0;
2913 	unsigned char error, parm0, parm1;
2914 
2915 	DAC960_PG_disable_intr(base);
2916 	DAC960_PG_ack_hw_mbox_status(base);
2917 	udelay(1000);
2918 	while (DAC960_PG_init_in_progress(base) &&
2919 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2920 		if (DAC960_PG_read_error_status(base, &error,
2921 						&parm0, &parm1) &&
2922 		    myrb_err_status(cb, error, parm0, parm1))
2923 			return -EIO;
2924 		udelay(10);
2925 		timeout++;
2926 	}
2927 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2928 		dev_err(&pdev->dev,
2929 			"Timeout waiting for Controller Initialisation\n");
2930 		return -ETIMEDOUT;
2931 	}
2932 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2933 		dev_err(&pdev->dev,
2934 			"Unable to Enable Memory Mailbox Interface\n");
2935 		DAC960_PG_reset_ctrl(base);
2936 		return -ENODEV;
2937 	}
2938 	DAC960_PG_enable_intr(base);
2939 	cb->qcmd = myrb_qcmd;
2940 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2941 	if (cb->dual_mode_interface)
2942 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2943 	else
2944 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2945 	cb->disable_intr = DAC960_PG_disable_intr;
2946 	cb->reset = DAC960_PG_reset_ctrl;
2947 
2948 	return 0;
2949 }
2950 
2951 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2952 {
2953 	struct myrb_hba *cb = arg;
2954 	void __iomem *base = cb->io_base;
2955 	struct myrb_stat_mbox *next_stat_mbox;
2956 	unsigned long flags;
2957 
2958 	spin_lock_irqsave(&cb->queue_lock, flags);
2959 	DAC960_PG_ack_intr(base);
2960 	next_stat_mbox = cb->next_stat_mbox;
2961 	while (next_stat_mbox->valid) {
2962 		unsigned char id = next_stat_mbox->id;
2963 		struct scsi_cmnd *scmd = NULL;
2964 		struct myrb_cmdblk *cmd_blk = NULL;
2965 
2966 		if (id == MYRB_DCMD_TAG)
2967 			cmd_blk = &cb->dcmd_blk;
2968 		else if (id == MYRB_MCMD_TAG)
2969 			cmd_blk = &cb->mcmd_blk;
2970 		else {
2971 			scmd = scsi_host_find_tag(cb->host, id - 3);
2972 			if (scmd)
2973 				cmd_blk = scsi_cmd_priv(scmd);
2974 		}
2975 		if (cmd_blk)
2976 			cmd_blk->status = next_stat_mbox->status;
2977 		else
2978 			dev_err(&cb->pdev->dev,
2979 				"Unhandled command completion %d\n", id);
2980 
2981 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2982 		if (++next_stat_mbox > cb->last_stat_mbox)
2983 			next_stat_mbox = cb->first_stat_mbox;
2984 
2985 		if (id < 3)
2986 			myrb_handle_cmdblk(cb, cmd_blk);
2987 		else
2988 			myrb_handle_scsi(cb, cmd_blk, scmd);
2989 	}
2990 	cb->next_stat_mbox = next_stat_mbox;
2991 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2992 	return IRQ_HANDLED;
2993 }
2994 
2995 static struct myrb_privdata DAC960_PG_privdata = {
2996 	.hw_init =	DAC960_PG_hw_init,
2997 	.irq_handler =	DAC960_PG_intr_handler,
2998 	.mmio_size =	DAC960_PG_mmio_size,
2999 };
3000 
3001 
3002 /*
3003  * DAC960 PD Series Controllers
3004  */
3005 
3006 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3007 {
3008 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3009 }
3010 
3011 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3012 {
3013 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3014 }
3015 
3016 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3017 {
3018 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3019 }
3020 
3021 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3022 {
3023 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3024 
3025 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3026 }
3027 
3028 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3029 {
3030 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3031 
3032 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3033 }
3034 
3035 static inline void DAC960_PD_ack_intr(void __iomem *base)
3036 {
3037 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3038 }
3039 
3040 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3041 {
3042 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3043 
3044 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3045 }
3046 
3047 static inline void DAC960_PD_enable_intr(void __iomem *base)
3048 {
3049 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3050 }
3051 
3052 static inline void DAC960_PD_disable_intr(void __iomem *base)
3053 {
3054 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3055 }
3056 
3057 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3058 		union myrb_cmd_mbox *mbox)
3059 {
3060 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3061 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3062 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3063 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3064 }
3065 
3066 static inline unsigned char
3067 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3068 {
3069 	return readb(base + DAC960_PD_STSID_OFFSET);
3070 }
3071 
3072 static inline unsigned short
3073 DAC960_PD_read_status(void __iomem *base)
3074 {
3075 	return readw(base + DAC960_PD_STS_OFFSET);
3076 }
3077 
3078 static inline bool
3079 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3080 		unsigned char *param0, unsigned char *param1)
3081 {
3082 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3083 
3084 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3085 		return false;
3086 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3087 	*error = errsts;
3088 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3089 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3090 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3091 	return true;
3092 }
3093 
3094 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3095 {
3096 	void __iomem *base = cb->io_base;
3097 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3098 
3099 	while (DAC960_PD_hw_mbox_is_full(base))
3100 		udelay(1);
3101 	DAC960_PD_write_cmd_mbox(base, mbox);
3102 	DAC960_PD_hw_mbox_new_cmd(base);
3103 }
3104 
3105 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3106 		struct myrb_hba *cb, void __iomem *base)
3107 {
3108 	int timeout = 0;
3109 	unsigned char error, parm0, parm1;
3110 
3111 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3112 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3113 			(unsigned long)cb->io_addr);
3114 		return -EBUSY;
3115 	}
3116 	DAC960_PD_disable_intr(base);
3117 	DAC960_PD_ack_hw_mbox_status(base);
3118 	udelay(1000);
3119 	while (DAC960_PD_init_in_progress(base) &&
3120 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3121 		if (DAC960_PD_read_error_status(base, &error,
3122 					      &parm0, &parm1) &&
3123 		    myrb_err_status(cb, error, parm0, parm1))
3124 			return -EIO;
3125 		udelay(10);
3126 		timeout++;
3127 	}
3128 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3129 		dev_err(&pdev->dev,
3130 			"Timeout waiting for Controller Initialisation\n");
3131 		return -ETIMEDOUT;
3132 	}
3133 	if (!myrb_enable_mmio(cb, NULL)) {
3134 		dev_err(&pdev->dev,
3135 			"Unable to Enable Memory Mailbox Interface\n");
3136 		DAC960_PD_reset_ctrl(base);
3137 		return -ENODEV;
3138 	}
3139 	DAC960_PD_enable_intr(base);
3140 	cb->qcmd = DAC960_PD_qcmd;
3141 	cb->disable_intr = DAC960_PD_disable_intr;
3142 	cb->reset = DAC960_PD_reset_ctrl;
3143 
3144 	return 0;
3145 }
3146 
3147 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3148 {
3149 	struct myrb_hba *cb = arg;
3150 	void __iomem *base = cb->io_base;
3151 	unsigned long flags;
3152 
3153 	spin_lock_irqsave(&cb->queue_lock, flags);
3154 	while (DAC960_PD_hw_mbox_status_available(base)) {
3155 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3156 		struct scsi_cmnd *scmd = NULL;
3157 		struct myrb_cmdblk *cmd_blk = NULL;
3158 
3159 		if (id == MYRB_DCMD_TAG)
3160 			cmd_blk = &cb->dcmd_blk;
3161 		else if (id == MYRB_MCMD_TAG)
3162 			cmd_blk = &cb->mcmd_blk;
3163 		else {
3164 			scmd = scsi_host_find_tag(cb->host, id - 3);
3165 			if (scmd)
3166 				cmd_blk = scsi_cmd_priv(scmd);
3167 		}
3168 		if (cmd_blk)
3169 			cmd_blk->status = DAC960_PD_read_status(base);
3170 		else
3171 			dev_err(&cb->pdev->dev,
3172 				"Unhandled command completion %d\n", id);
3173 
3174 		DAC960_PD_ack_intr(base);
3175 		DAC960_PD_ack_hw_mbox_status(base);
3176 
3177 		if (id < 3)
3178 			myrb_handle_cmdblk(cb, cmd_blk);
3179 		else
3180 			myrb_handle_scsi(cb, cmd_blk, scmd);
3181 	}
3182 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3183 	return IRQ_HANDLED;
3184 }
3185 
3186 static struct myrb_privdata DAC960_PD_privdata = {
3187 	.hw_init =	DAC960_PD_hw_init,
3188 	.irq_handler =	DAC960_PD_intr_handler,
3189 	.mmio_size =	DAC960_PD_mmio_size,
3190 };
3191 
3192 
3193 /*
3194  * DAC960 P Series Controllers
3195  *
3196  * Similar to the DAC960 PD Series Controllers, but some commands have
3197  * to be translated.
3198  */
3199 
3200 static inline void myrb_translate_enquiry(void *enq)
3201 {
3202 	memcpy(enq + 132, enq + 36, 64);
3203 	memset(enq + 36, 0, 96);
3204 }
3205 
3206 static inline void myrb_translate_devstate(void *state)
3207 {
3208 	memcpy(state + 2, state + 3, 1);
3209 	memmove(state + 4, state + 5, 2);
3210 	memmove(state + 6, state + 8, 4);
3211 }
3212 
3213 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3214 {
3215 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3216 	int ldev_num = mbox->type5.ld.ldev_num;
3217 
3218 	mbox->bytes[3] &= 0x7;
3219 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3220 	mbox->bytes[7] = ldev_num;
3221 }
3222 
3223 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3224 {
3225 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3226 	int ldev_num = mbox->bytes[7];
3227 
3228 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3229 	mbox->bytes[3] &= 0x7;
3230 	mbox->bytes[3] |= ldev_num << 3;
3231 }
3232 
3233 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3234 {
3235 	void __iomem *base = cb->io_base;
3236 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3237 
3238 	switch (mbox->common.opcode) {
3239 	case MYRB_CMD_ENQUIRY:
3240 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3241 		break;
3242 	case MYRB_CMD_GET_DEVICE_STATE:
3243 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3244 		break;
3245 	case MYRB_CMD_READ:
3246 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3247 		myrb_translate_to_rw_command(cmd_blk);
3248 		break;
3249 	case MYRB_CMD_WRITE:
3250 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3251 		myrb_translate_to_rw_command(cmd_blk);
3252 		break;
3253 	case MYRB_CMD_READ_SG:
3254 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3255 		myrb_translate_to_rw_command(cmd_blk);
3256 		break;
3257 	case MYRB_CMD_WRITE_SG:
3258 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3259 		myrb_translate_to_rw_command(cmd_blk);
3260 		break;
3261 	default:
3262 		break;
3263 	}
3264 	while (DAC960_PD_hw_mbox_is_full(base))
3265 		udelay(1);
3266 	DAC960_PD_write_cmd_mbox(base, mbox);
3267 	DAC960_PD_hw_mbox_new_cmd(base);
3268 }
3269 
3270 
3271 static int DAC960_P_hw_init(struct pci_dev *pdev,
3272 		struct myrb_hba *cb, void __iomem *base)
3273 {
3274 	int timeout = 0;
3275 	unsigned char error, parm0, parm1;
3276 
3277 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3278 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3279 			(unsigned long)cb->io_addr);
3280 		return -EBUSY;
3281 	}
3282 	DAC960_PD_disable_intr(base);
3283 	DAC960_PD_ack_hw_mbox_status(base);
3284 	udelay(1000);
3285 	while (DAC960_PD_init_in_progress(base) &&
3286 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3287 		if (DAC960_PD_read_error_status(base, &error,
3288 						&parm0, &parm1) &&
3289 		    myrb_err_status(cb, error, parm0, parm1))
3290 			return -EAGAIN;
3291 		udelay(10);
3292 		timeout++;
3293 	}
3294 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3295 		dev_err(&pdev->dev,
3296 			"Timeout waiting for Controller Initialisation\n");
3297 		return -ETIMEDOUT;
3298 	}
3299 	if (!myrb_enable_mmio(cb, NULL)) {
3300 		dev_err(&pdev->dev,
3301 			"Unable to allocate DMA mapped memory\n");
3302 		DAC960_PD_reset_ctrl(base);
3303 		return -ETIMEDOUT;
3304 	}
3305 	DAC960_PD_enable_intr(base);
3306 	cb->qcmd = DAC960_P_qcmd;
3307 	cb->disable_intr = DAC960_PD_disable_intr;
3308 	cb->reset = DAC960_PD_reset_ctrl;
3309 
3310 	return 0;
3311 }
3312 
3313 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3314 {
3315 	struct myrb_hba *cb = arg;
3316 	void __iomem *base = cb->io_base;
3317 	unsigned long flags;
3318 
3319 	spin_lock_irqsave(&cb->queue_lock, flags);
3320 	while (DAC960_PD_hw_mbox_status_available(base)) {
3321 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3322 		struct scsi_cmnd *scmd = NULL;
3323 		struct myrb_cmdblk *cmd_blk = NULL;
3324 		union myrb_cmd_mbox *mbox;
3325 		enum myrb_cmd_opcode op;
3326 
3327 
3328 		if (id == MYRB_DCMD_TAG)
3329 			cmd_blk = &cb->dcmd_blk;
3330 		else if (id == MYRB_MCMD_TAG)
3331 			cmd_blk = &cb->mcmd_blk;
3332 		else {
3333 			scmd = scsi_host_find_tag(cb->host, id - 3);
3334 			if (scmd)
3335 				cmd_blk = scsi_cmd_priv(scmd);
3336 		}
3337 		if (cmd_blk)
3338 			cmd_blk->status = DAC960_PD_read_status(base);
3339 		else
3340 			dev_err(&cb->pdev->dev,
3341 				"Unhandled command completion %d\n", id);
3342 
3343 		DAC960_PD_ack_intr(base);
3344 		DAC960_PD_ack_hw_mbox_status(base);
3345 
3346 		if (!cmd_blk)
3347 			continue;
3348 
3349 		mbox = &cmd_blk->mbox;
3350 		op = mbox->common.opcode;
3351 		switch (op) {
3352 		case MYRB_CMD_ENQUIRY_OLD:
3353 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3354 			myrb_translate_enquiry(cb->enquiry);
3355 			break;
3356 		case MYRB_CMD_READ_OLD:
3357 			mbox->common.opcode = MYRB_CMD_READ;
3358 			myrb_translate_from_rw_command(cmd_blk);
3359 			break;
3360 		case MYRB_CMD_WRITE_OLD:
3361 			mbox->common.opcode = MYRB_CMD_WRITE;
3362 			myrb_translate_from_rw_command(cmd_blk);
3363 			break;
3364 		case MYRB_CMD_READ_SG_OLD:
3365 			mbox->common.opcode = MYRB_CMD_READ_SG;
3366 			myrb_translate_from_rw_command(cmd_blk);
3367 			break;
3368 		case MYRB_CMD_WRITE_SG_OLD:
3369 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3370 			myrb_translate_from_rw_command(cmd_blk);
3371 			break;
3372 		default:
3373 			break;
3374 		}
3375 		if (id < 3)
3376 			myrb_handle_cmdblk(cb, cmd_blk);
3377 		else
3378 			myrb_handle_scsi(cb, cmd_blk, scmd);
3379 	}
3380 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3381 	return IRQ_HANDLED;
3382 }
3383 
3384 static struct myrb_privdata DAC960_P_privdata = {
3385 	.hw_init =	DAC960_P_hw_init,
3386 	.irq_handler =	DAC960_P_intr_handler,
3387 	.mmio_size =	DAC960_PD_mmio_size,
3388 };
3389 
3390 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3391 		const struct pci_device_id *entry)
3392 {
3393 	struct myrb_privdata *privdata =
3394 		(struct myrb_privdata *)entry->driver_data;
3395 	irq_handler_t irq_handler = privdata->irq_handler;
3396 	unsigned int mmio_size = privdata->mmio_size;
3397 	struct Scsi_Host *shost;
3398 	struct myrb_hba *cb = NULL;
3399 
3400 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3401 	if (!shost) {
3402 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3403 		return NULL;
3404 	}
3405 	shost->max_cmd_len = 12;
3406 	shost->max_lun = 256;
3407 	cb = shost_priv(shost);
3408 	mutex_init(&cb->dcmd_mutex);
3409 	mutex_init(&cb->dma_mutex);
3410 	cb->pdev = pdev;
3411 
3412 	if (pci_enable_device(pdev))
3413 		goto failure;
3414 
3415 	if (privdata->hw_init == DAC960_PD_hw_init ||
3416 	    privdata->hw_init == DAC960_P_hw_init) {
3417 		cb->io_addr = pci_resource_start(pdev, 0);
3418 		cb->pci_addr = pci_resource_start(pdev, 1);
3419 	} else
3420 		cb->pci_addr = pci_resource_start(pdev, 0);
3421 
3422 	pci_set_drvdata(pdev, cb);
3423 	spin_lock_init(&cb->queue_lock);
3424 	if (mmio_size < PAGE_SIZE)
3425 		mmio_size = PAGE_SIZE;
3426 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3427 	if (cb->mmio_base == NULL) {
3428 		dev_err(&pdev->dev,
3429 			"Unable to map Controller Register Window\n");
3430 		goto failure;
3431 	}
3432 
3433 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3434 	if (privdata->hw_init(pdev, cb, cb->io_base))
3435 		goto failure;
3436 
3437 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3438 		dev_err(&pdev->dev,
3439 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3440 		goto failure;
3441 	}
3442 	cb->irq = pdev->irq;
3443 	return cb;
3444 
3445 failure:
3446 	dev_err(&pdev->dev,
3447 		"Failed to initialize Controller\n");
3448 	myrb_cleanup(cb);
3449 	return NULL;
3450 }
3451 
3452 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3453 {
3454 	struct myrb_hba *cb;
3455 	int ret;
3456 
3457 	cb = myrb_detect(dev, entry);
3458 	if (!cb)
3459 		return -ENODEV;
3460 
3461 	ret = myrb_get_hba_config(cb);
3462 	if (ret < 0) {
3463 		myrb_cleanup(cb);
3464 		return ret;
3465 	}
3466 
3467 	if (!myrb_create_mempools(dev, cb)) {
3468 		ret = -ENOMEM;
3469 		goto failed;
3470 	}
3471 
3472 	ret = scsi_add_host(cb->host, &dev->dev);
3473 	if (ret) {
3474 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3475 		myrb_destroy_mempools(cb);
3476 		goto failed;
3477 	}
3478 	scsi_scan_host(cb->host);
3479 	return 0;
3480 failed:
3481 	myrb_cleanup(cb);
3482 	return ret;
3483 }
3484 
3485 
3486 static void myrb_remove(struct pci_dev *pdev)
3487 {
3488 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3489 
3490 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3491 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3492 	myrb_cleanup(cb);
3493 	myrb_destroy_mempools(cb);
3494 }
3495 
3496 
3497 static const struct pci_device_id myrb_id_table[] = {
3498 	{
3499 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3500 			       PCI_DEVICE_ID_DEC_21285,
3501 			       PCI_VENDOR_ID_MYLEX,
3502 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3503 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3504 	},
3505 	{
3506 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3507 	},
3508 	{
3509 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3510 	},
3511 	{
3512 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3513 	},
3514 	{0, },
3515 };
3516 
3517 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3518 
3519 static struct pci_driver myrb_pci_driver = {
3520 	.name		= "myrb",
3521 	.id_table	= myrb_id_table,
3522 	.probe		= myrb_probe,
3523 	.remove		= myrb_remove,
3524 };
3525 
3526 static int __init myrb_init_module(void)
3527 {
3528 	int ret;
3529 
3530 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3531 	if (!myrb_raid_template)
3532 		return -ENODEV;
3533 
3534 	ret = pci_register_driver(&myrb_pci_driver);
3535 	if (ret)
3536 		raid_class_release(myrb_raid_template);
3537 
3538 	return ret;
3539 }
3540 
3541 static void __exit myrb_cleanup_module(void)
3542 {
3543 	pci_unregister_driver(&myrb_pci_driver);
3544 	raid_class_release(myrb_raid_template);
3545 }
3546 
3547 module_init(myrb_init_module);
3548 module_exit(myrb_cleanup_module);
3549 
3550 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3551 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3552 MODULE_LICENSE("GPL");
3553