xref: /openbmc/linux/drivers/scsi/myrb.c (revision d47a97bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /*
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /*
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /*
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /*
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /*
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	wait_for_completion(&cmpl);
198 	return cmd_blk->status;
199 }
200 
201 /*
202  * myrb_exec_type3 - executes a type 3 command and waits for completion.
203  *
204  * Return: command status
205  */
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 		enum myrb_cmd_opcode op, dma_addr_t addr)
208 {
209 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 	unsigned short status;
212 
213 	mutex_lock(&cb->dcmd_mutex);
214 	myrb_reset_cmd(cmd_blk);
215 	mbox->type3.id = MYRB_DCMD_TAG;
216 	mbox->type3.opcode = op;
217 	mbox->type3.addr = addr;
218 	status = myrb_exec_cmd(cb, cmd_blk);
219 	mutex_unlock(&cb->dcmd_mutex);
220 	return status;
221 }
222 
223 /*
224  * myrb_exec_type3D - executes a type 3D command and waits for completion.
225  *
226  * Return: command status
227  */
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 		struct myrb_pdev_state *pdev_info)
231 {
232 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 	unsigned short status;
235 	dma_addr_t pdev_info_addr;
236 
237 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 					sizeof(struct myrb_pdev_state),
239 					DMA_FROM_DEVICE);
240 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 		return MYRB_STATUS_SUBSYS_FAILED;
242 
243 	mutex_lock(&cb->dcmd_mutex);
244 	myrb_reset_cmd(cmd_blk);
245 	mbox->type3D.id = MYRB_DCMD_TAG;
246 	mbox->type3D.opcode = op;
247 	mbox->type3D.channel = sdev->channel;
248 	mbox->type3D.target = sdev->id;
249 	mbox->type3D.addr = pdev_info_addr;
250 	status = myrb_exec_cmd(cb, cmd_blk);
251 	mutex_unlock(&cb->dcmd_mutex);
252 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 	if (status == MYRB_STATUS_SUCCESS &&
255 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 		myrb_translate_devstate(pdev_info);
257 
258 	return status;
259 }
260 
261 static char *myrb_event_msg[] = {
262 	"killed because write recovery failed",
263 	"killed because of SCSI bus reset failure",
264 	"killed because of double check condition",
265 	"killed because it was removed",
266 	"killed because of gross error on SCSI chip",
267 	"killed because of bad tag returned from drive",
268 	"killed because of timeout on SCSI command",
269 	"killed because of reset SCSI command issued from system",
270 	"killed because busy or parity error count exceeded limit",
271 	"killed because of 'kill drive' command from system",
272 	"killed because of selection timeout",
273 	"killed due to SCSI phase sequence error",
274 	"killed due to unknown status",
275 };
276 
277 /**
278  * myrb_get_event - get event log from HBA
279  * @cb: pointer to the hba structure
280  * @event: number of the event
281  *
282  * Execute a type 3E command and logs the event message
283  */
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
285 {
286 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 	struct myrb_log_entry *ev_buf;
289 	dma_addr_t ev_addr;
290 	unsigned short status;
291 
292 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 				    sizeof(struct myrb_log_entry),
294 				    &ev_addr, GFP_KERNEL);
295 	if (!ev_buf)
296 		return;
297 
298 	myrb_reset_cmd(cmd_blk);
299 	mbox->type3E.id = MYRB_MCMD_TAG;
300 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 	mbox->type3E.opqual = 1;
303 	mbox->type3E.ev_seq = event;
304 	mbox->type3E.addr = ev_addr;
305 	status = myrb_exec_cmd(cb, cmd_blk);
306 	if (status != MYRB_STATUS_SUCCESS)
307 		shost_printk(KERN_INFO, cb->host,
308 			     "Failed to get event log %d, status %04x\n",
309 			     event, status);
310 
311 	else if (ev_buf->seq_num == event) {
312 		struct scsi_sense_hdr sshdr;
313 
314 		memset(&sshdr, 0, sizeof(sshdr));
315 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
316 
317 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
318 		    sshdr.asc == 0x80 &&
319 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 			shost_printk(KERN_CRIT, cb->host,
321 				     "Physical drive %d:%d: %s\n",
322 				     ev_buf->channel, ev_buf->target,
323 				     myrb_event_msg[sshdr.ascq]);
324 		else
325 			shost_printk(KERN_CRIT, cb->host,
326 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 				     ev_buf->channel, ev_buf->target,
328 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
329 	}
330 
331 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
332 			  ev_buf, ev_addr);
333 }
334 
335 /*
336  * myrb_get_errtable - retrieves the error table from the controller
337  *
338  * Executes a type 3 command and logs the error table from the controller.
339  */
340 static void myrb_get_errtable(struct myrb_hba *cb)
341 {
342 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 	unsigned short status;
345 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
346 
347 	memcpy(&old_table, cb->err_table, sizeof(old_table));
348 
349 	myrb_reset_cmd(cmd_blk);
350 	mbox->type3.id = MYRB_MCMD_TAG;
351 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 	mbox->type3.addr = cb->err_table_addr;
353 	status = myrb_exec_cmd(cb, cmd_blk);
354 	if (status == MYRB_STATUS_SUCCESS) {
355 		struct myrb_error_entry *table = cb->err_table;
356 		struct myrb_error_entry *new, *old;
357 		size_t err_table_offset;
358 		struct scsi_device *sdev;
359 
360 		shost_for_each_device(sdev, cb->host) {
361 			if (sdev->channel >= myrb_logical_channel(cb->host))
362 				continue;
363 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
364 				+ sdev->id;
365 			new = table + err_table_offset;
366 			old = &old_table[err_table_offset];
367 			if (new->parity_err == old->parity_err &&
368 			    new->soft_err == old->soft_err &&
369 			    new->hard_err == old->hard_err &&
370 			    new->misc_err == old->misc_err)
371 				continue;
372 			sdev_printk(KERN_CRIT, sdev,
373 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 				    new->parity_err, new->soft_err,
375 				    new->hard_err, new->misc_err);
376 		}
377 	}
378 }
379 
380 /*
381  * myrb_get_ldev_info - retrieves the logical device table from the controller
382  *
383  * Executes a type 3 command and updates the logical device table.
384  *
385  * Return: command status
386  */
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
388 {
389 	unsigned short status;
390 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 	struct Scsi_Host *shost = cb->host;
392 
393 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394 				 cb->ldev_info_addr);
395 	if (status != MYRB_STATUS_SUCCESS)
396 		return status;
397 
398 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 		struct myrb_ldev_info *old = NULL;
400 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 		struct scsi_device *sdev;
402 
403 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
404 					  ldev_num, 0);
405 		if (!sdev) {
406 			if (new->state == MYRB_DEVICE_OFFLINE)
407 				continue;
408 			shost_printk(KERN_INFO, shost,
409 				     "Adding Logical Drive %d in state %s\n",
410 				     ldev_num, myrb_devstate_name(new->state));
411 			scsi_add_device(shost, myrb_logical_channel(shost),
412 					ldev_num, 0);
413 			continue;
414 		}
415 		old = sdev->hostdata;
416 		if (new->state != old->state)
417 			shost_printk(KERN_INFO, shost,
418 				     "Logical Drive %d is now %s\n",
419 				     ldev_num, myrb_devstate_name(new->state));
420 		if (new->wb_enabled != old->wb_enabled)
421 			sdev_printk(KERN_INFO, sdev,
422 				    "Logical Drive is now WRITE %s\n",
423 				    (new->wb_enabled ? "BACK" : "THRU"));
424 		memcpy(old, new, sizeof(*new));
425 		scsi_device_put(sdev);
426 	}
427 	return status;
428 }
429 
430 /*
431  * myrb_get_rbld_progress - get rebuild progress information
432  *
433  * Executes a type 3 command and returns the rebuild progress
434  * information.
435  *
436  * Return: command status
437  */
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 		struct myrb_rbld_progress *rbld)
440 {
441 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 	struct myrb_rbld_progress *rbld_buf;
444 	dma_addr_t rbld_addr;
445 	unsigned short status;
446 
447 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 				      sizeof(struct myrb_rbld_progress),
449 				      &rbld_addr, GFP_KERNEL);
450 	if (!rbld_buf)
451 		return MYRB_STATUS_RBLD_NOT_CHECKED;
452 
453 	myrb_reset_cmd(cmd_blk);
454 	mbox->type3.id = MYRB_MCMD_TAG;
455 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 	mbox->type3.addr = rbld_addr;
457 	status = myrb_exec_cmd(cb, cmd_blk);
458 	if (rbld)
459 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 			  rbld_buf, rbld_addr);
462 	return status;
463 }
464 
465 /*
466  * myrb_update_rbld_progress - updates the rebuild status
467  *
468  * Updates the rebuild status for the attached logical devices.
469  */
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
471 {
472 	struct myrb_rbld_progress rbld_buf;
473 	unsigned short status;
474 
475 	status = myrb_get_rbld_progress(cb, &rbld_buf);
476 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 		status = MYRB_STATUS_RBLD_SUCCESS;
479 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 		unsigned int blocks_done =
481 			rbld_buf.ldev_size - rbld_buf.blocks_left;
482 		struct scsi_device *sdev;
483 
484 		sdev = scsi_device_lookup(cb->host,
485 					  myrb_logical_channel(cb->host),
486 					  rbld_buf.ldev_num, 0);
487 		if (!sdev)
488 			return;
489 
490 		switch (status) {
491 		case MYRB_STATUS_SUCCESS:
492 			sdev_printk(KERN_INFO, sdev,
493 				    "Rebuild in Progress, %d%% completed\n",
494 				    (100 * (blocks_done >> 7))
495 				    / (rbld_buf.ldev_size >> 7));
496 			break;
497 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 			sdev_printk(KERN_INFO, sdev,
499 				    "Rebuild Failed due to Logical Drive Failure\n");
500 			break;
501 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 			sdev_printk(KERN_INFO, sdev,
503 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
504 			break;
505 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 			sdev_printk(KERN_INFO, sdev,
507 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
508 			break;
509 		case MYRB_STATUS_RBLD_SUCCESS:
510 			sdev_printk(KERN_INFO, sdev,
511 				    "Rebuild Completed Successfully\n");
512 			break;
513 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 			sdev_printk(KERN_INFO, sdev,
515 				     "Rebuild Successfully Terminated\n");
516 			break;
517 		default:
518 			break;
519 		}
520 		scsi_device_put(sdev);
521 	}
522 	cb->last_rbld_status = status;
523 }
524 
525 /*
526  * myrb_get_cc_progress - retrieve the rebuild status
527  *
528  * Execute a type 3 Command and fetch the rebuild / consistency check
529  * status.
530  */
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
532 {
533 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 	struct myrb_rbld_progress *rbld_buf;
536 	dma_addr_t rbld_addr;
537 	unsigned short status;
538 
539 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 				      sizeof(struct myrb_rbld_progress),
541 				      &rbld_addr, GFP_KERNEL);
542 	if (!rbld_buf) {
543 		cb->need_cc_status = true;
544 		return;
545 	}
546 	myrb_reset_cmd(cmd_blk);
547 	mbox->type3.id = MYRB_MCMD_TAG;
548 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 	mbox->type3.addr = rbld_addr;
550 	status = myrb_exec_cmd(cb, cmd_blk);
551 	if (status == MYRB_STATUS_SUCCESS) {
552 		unsigned int ldev_num = rbld_buf->ldev_num;
553 		unsigned int ldev_size = rbld_buf->ldev_size;
554 		unsigned int blocks_done =
555 			ldev_size - rbld_buf->blocks_left;
556 		struct scsi_device *sdev;
557 
558 		sdev = scsi_device_lookup(cb->host,
559 					  myrb_logical_channel(cb->host),
560 					  ldev_num, 0);
561 		if (sdev) {
562 			sdev_printk(KERN_INFO, sdev,
563 				    "Consistency Check in Progress: %d%% completed\n",
564 				    (100 * (blocks_done >> 7))
565 				    / (ldev_size >> 7));
566 			scsi_device_put(sdev);
567 		}
568 	}
569 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 			  rbld_buf, rbld_addr);
571 }
572 
573 /*
574  * myrb_bgi_control - updates background initialisation status
575  *
576  * Executes a type 3B command and updates the background initialisation status
577  */
578 static void myrb_bgi_control(struct myrb_hba *cb)
579 {
580 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 	struct myrb_bgi_status *bgi, *last_bgi;
583 	dma_addr_t bgi_addr;
584 	struct scsi_device *sdev = NULL;
585 	unsigned short status;
586 
587 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 				 &bgi_addr, GFP_KERNEL);
589 	if (!bgi) {
590 		shost_printk(KERN_ERR, cb->host,
591 			     "Failed to allocate bgi memory\n");
592 		return;
593 	}
594 	myrb_reset_cmd(cmd_blk);
595 	mbox->type3B.id = MYRB_DCMD_TAG;
596 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 	mbox->type3B.optype = 0x20;
598 	mbox->type3B.addr = bgi_addr;
599 	status = myrb_exec_cmd(cb, cmd_blk);
600 	last_bgi = &cb->bgi_status;
601 	sdev = scsi_device_lookup(cb->host,
602 				  myrb_logical_channel(cb->host),
603 				  bgi->ldev_num, 0);
604 	switch (status) {
605 	case MYRB_STATUS_SUCCESS:
606 		switch (bgi->status) {
607 		case MYRB_BGI_INVALID:
608 			break;
609 		case MYRB_BGI_STARTED:
610 			if (!sdev)
611 				break;
612 			sdev_printk(KERN_INFO, sdev,
613 				    "Background Initialization Started\n");
614 			break;
615 		case MYRB_BGI_INPROGRESS:
616 			if (!sdev)
617 				break;
618 			if (bgi->blocks_done == last_bgi->blocks_done &&
619 			    bgi->ldev_num == last_bgi->ldev_num)
620 				break;
621 			sdev_printk(KERN_INFO, sdev,
622 				 "Background Initialization in Progress: %d%% completed\n",
623 				 (100 * (bgi->blocks_done >> 7))
624 				 / (bgi->ldev_size >> 7));
625 			break;
626 		case MYRB_BGI_SUSPENDED:
627 			if (!sdev)
628 				break;
629 			sdev_printk(KERN_INFO, sdev,
630 				    "Background Initialization Suspended\n");
631 			break;
632 		case MYRB_BGI_CANCELLED:
633 			if (!sdev)
634 				break;
635 			sdev_printk(KERN_INFO, sdev,
636 				    "Background Initialization Cancelled\n");
637 			break;
638 		}
639 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
640 		break;
641 	case MYRB_STATUS_BGI_SUCCESS:
642 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 			sdev_printk(KERN_INFO, sdev,
644 				    "Background Initialization Completed Successfully\n");
645 		cb->bgi_status.status = MYRB_BGI_INVALID;
646 		break;
647 	case MYRB_STATUS_BGI_ABORTED:
648 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 			sdev_printk(KERN_INFO, sdev,
650 				    "Background Initialization Aborted\n");
651 		fallthrough;
652 	case MYRB_STATUS_NO_BGI_INPROGRESS:
653 		cb->bgi_status.status = MYRB_BGI_INVALID;
654 		break;
655 	}
656 	if (sdev)
657 		scsi_device_put(sdev);
658 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
659 			  bgi, bgi_addr);
660 }
661 
662 /*
663  * myrb_hba_enquiry - updates the controller status
664  *
665  * Executes a DAC_V1_Enquiry command and updates the controller status.
666  *
667  * Return: command status
668  */
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
670 {
671 	struct myrb_enquiry old, *new;
672 	unsigned short status;
673 
674 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
675 
676 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 	if (status != MYRB_STATUS_SUCCESS)
678 		return status;
679 
680 	new = cb->enquiry;
681 	if (new->ldev_count > old.ldev_count) {
682 		int ldev_num = old.ldev_count - 1;
683 
684 		while (++ldev_num < new->ldev_count)
685 			shost_printk(KERN_CRIT, cb->host,
686 				     "Logical Drive %d Now Exists\n",
687 				     ldev_num);
688 	}
689 	if (new->ldev_count < old.ldev_count) {
690 		int ldev_num = new->ldev_count - 1;
691 
692 		while (++ldev_num < old.ldev_count)
693 			shost_printk(KERN_CRIT, cb->host,
694 				     "Logical Drive %d No Longer Exists\n",
695 				     ldev_num);
696 	}
697 	if (new->status.deferred != old.status.deferred)
698 		shost_printk(KERN_CRIT, cb->host,
699 			     "Deferred Write Error Flag is now %s\n",
700 			     (new->status.deferred ? "TRUE" : "FALSE"));
701 	if (new->ev_seq != old.ev_seq) {
702 		cb->new_ev_seq = new->ev_seq;
703 		cb->need_err_info = true;
704 		shost_printk(KERN_INFO, cb->host,
705 			     "Event log %d/%d (%d/%d) available\n",
706 			     cb->old_ev_seq, cb->new_ev_seq,
707 			     old.ev_seq, new->ev_seq);
708 	}
709 	if ((new->ldev_critical > 0 &&
710 	     new->ldev_critical != old.ldev_critical) ||
711 	    (new->ldev_offline > 0 &&
712 	     new->ldev_offline != old.ldev_offline) ||
713 	    (new->ldev_count != old.ldev_count)) {
714 		shost_printk(KERN_INFO, cb->host,
715 			     "Logical drive count changed (%d/%d/%d)\n",
716 			     new->ldev_critical,
717 			     new->ldev_offline,
718 			     new->ldev_count);
719 		cb->need_ldev_info = true;
720 	}
721 	if (new->pdev_dead > 0 ||
722 	    new->pdev_dead != old.pdev_dead ||
723 	    time_after_eq(jiffies, cb->secondary_monitor_time
724 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 		cb->need_bgi_status = cb->bgi_status_supported;
726 		cb->secondary_monitor_time = jiffies;
727 	}
728 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 		cb->need_rbld = true;
733 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
734 	}
735 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
736 		switch (new->rbld) {
737 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 			shost_printk(KERN_INFO, cb->host,
739 				     "Consistency Check Completed Successfully\n");
740 			break;
741 		case MYRB_STDBY_RBLD_IN_PROGRESS:
742 		case MYRB_BG_RBLD_IN_PROGRESS:
743 			break;
744 		case MYRB_BG_CHECK_IN_PROGRESS:
745 			cb->need_cc_status = true;
746 			break;
747 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 			shost_printk(KERN_INFO, cb->host,
749 				     "Consistency Check Completed with Error\n");
750 			break;
751 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 			shost_printk(KERN_INFO, cb->host,
753 				     "Consistency Check Failed - Physical Device Failed\n");
754 			break;
755 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 			shost_printk(KERN_INFO, cb->host,
757 				     "Consistency Check Failed - Logical Drive Failed\n");
758 			break;
759 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 			shost_printk(KERN_INFO, cb->host,
761 				     "Consistency Check Failed - Other Causes\n");
762 			break;
763 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 			shost_printk(KERN_INFO, cb->host,
765 				     "Consistency Check Successfully Terminated\n");
766 			break;
767 		}
768 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 		cb->need_cc_status = true;
770 
771 	return MYRB_STATUS_SUCCESS;
772 }
773 
774 /*
775  * myrb_set_pdev_state - sets the device state for a physical device
776  *
777  * Return: command status
778  */
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 		struct scsi_device *sdev, enum myrb_devstate state)
781 {
782 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 	unsigned short status;
785 
786 	mutex_lock(&cb->dcmd_mutex);
787 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 	mbox->type3D.id = MYRB_DCMD_TAG;
789 	mbox->type3D.channel = sdev->channel;
790 	mbox->type3D.target = sdev->id;
791 	mbox->type3D.state = state & 0x1F;
792 	status = myrb_exec_cmd(cb, cmd_blk);
793 	mutex_unlock(&cb->dcmd_mutex);
794 
795 	return status;
796 }
797 
798 /*
799  * myrb_enable_mmio - enables the Memory Mailbox Interface
800  *
801  * PD and P controller types have no memory mailbox, but still need the
802  * other dma mapped memory.
803  *
804  * Return: true on success, false otherwise.
805  */
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
807 {
808 	void __iomem *base = cb->io_base;
809 	struct pci_dev *pdev = cb->pdev;
810 	size_t err_table_size;
811 	size_t ldev_info_size;
812 	union myrb_cmd_mbox *cmd_mbox_mem;
813 	struct myrb_stat_mbox *stat_mbox_mem;
814 	union myrb_cmd_mbox mbox;
815 	unsigned short status;
816 
817 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
818 
819 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 		dev_err(&pdev->dev, "DMA mask out of range\n");
821 		return false;
822 	}
823 
824 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 					 sizeof(struct myrb_enquiry),
826 					 &cb->enquiry_addr, GFP_KERNEL);
827 	if (!cb->enquiry)
828 		return false;
829 
830 	err_table_size = sizeof(struct myrb_error_entry) *
831 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 					   &cb->err_table_addr, GFP_KERNEL);
834 	if (!cb->err_table)
835 		return false;
836 
837 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 					       &cb->ldev_info_addr, GFP_KERNEL);
840 	if (!cb->ldev_info_buf)
841 		return false;
842 
843 	/*
844 	 * Skip mailbox initialisation for PD and P Controllers
845 	 */
846 	if (!mmio_init_fn)
847 		return true;
848 
849 	/* These are the base addresses for the command memory mailbox array */
850 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
852 						cb->cmd_mbox_size,
853 						&cb->cmd_mbox_addr,
854 						GFP_KERNEL);
855 	if (!cb->first_cmd_mbox)
856 		return false;
857 
858 	cmd_mbox_mem = cb->first_cmd_mbox;
859 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 	cb->last_cmd_mbox = cmd_mbox_mem;
861 	cb->next_cmd_mbox = cb->first_cmd_mbox;
862 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
864 
865 	/* These are the base addresses for the status memory mailbox array */
866 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 	    sizeof(struct myrb_stat_mbox);
868 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
869 						 cb->stat_mbox_size,
870 						 &cb->stat_mbox_addr,
871 						 GFP_KERNEL);
872 	if (!cb->first_stat_mbox)
873 		return false;
874 
875 	stat_mbox_mem = cb->first_stat_mbox;
876 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 	cb->last_stat_mbox = stat_mbox_mem;
878 	cb->next_stat_mbox = cb->first_stat_mbox;
879 
880 	/* Enable the Memory Mailbox Interface. */
881 	cb->dual_mode_interface = true;
882 	mbox.typeX.opcode = 0x2B;
883 	mbox.typeX.id = 0;
884 	mbox.typeX.opcode2 = 0x14;
885 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
887 
888 	status = mmio_init_fn(pdev, base, &mbox);
889 	if (status != MYRB_STATUS_SUCCESS) {
890 		cb->dual_mode_interface = false;
891 		mbox.typeX.opcode2 = 0x10;
892 		status = mmio_init_fn(pdev, base, &mbox);
893 		if (status != MYRB_STATUS_SUCCESS) {
894 			dev_err(&pdev->dev,
895 				"Failed to enable mailbox, statux %02X\n",
896 				status);
897 			return false;
898 		}
899 	}
900 	return true;
901 }
902 
903 /*
904  * myrb_get_hba_config - reads the configuration information
905  *
906  * Reads the configuration information from the controller and
907  * initializes the controller structure.
908  *
909  * Return: 0 on success, errno otherwise
910  */
911 static int myrb_get_hba_config(struct myrb_hba *cb)
912 {
913 	struct myrb_enquiry2 *enquiry2;
914 	dma_addr_t enquiry2_addr;
915 	struct myrb_config2 *config2;
916 	dma_addr_t config2_addr;
917 	struct Scsi_Host *shost = cb->host;
918 	struct pci_dev *pdev = cb->pdev;
919 	int pchan_max = 0, pchan_cur = 0;
920 	unsigned short status;
921 	int ret = -ENODEV, memsize = 0;
922 
923 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 				      &enquiry2_addr, GFP_KERNEL);
925 	if (!enquiry2) {
926 		shost_printk(KERN_ERR, cb->host,
927 			     "Failed to allocate V1 enquiry2 memory\n");
928 		return -ENOMEM;
929 	}
930 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 				     &config2_addr, GFP_KERNEL);
932 	if (!config2) {
933 		shost_printk(KERN_ERR, cb->host,
934 			     "Failed to allocate V1 config2 memory\n");
935 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 				  enquiry2, enquiry2_addr);
937 		return -ENOMEM;
938 	}
939 	mutex_lock(&cb->dma_mutex);
940 	status = myrb_hba_enquiry(cb);
941 	mutex_unlock(&cb->dma_mutex);
942 	if (status != MYRB_STATUS_SUCCESS) {
943 		shost_printk(KERN_WARNING, cb->host,
944 			     "Failed it issue V1 Enquiry\n");
945 		goto out_free;
946 	}
947 
948 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 	if (status != MYRB_STATUS_SUCCESS) {
950 		shost_printk(KERN_WARNING, cb->host,
951 			     "Failed to issue V1 Enquiry2\n");
952 		goto out_free;
953 	}
954 
955 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 	if (status != MYRB_STATUS_SUCCESS) {
957 		shost_printk(KERN_WARNING, cb->host,
958 			     "Failed to issue ReadConfig2\n");
959 		goto out_free;
960 	}
961 
962 	status = myrb_get_ldev_info(cb);
963 	if (status != MYRB_STATUS_SUCCESS) {
964 		shost_printk(KERN_WARNING, cb->host,
965 			     "Failed to get logical drive information\n");
966 		goto out_free;
967 	}
968 
969 	/*
970 	 * Initialize the Controller Model Name and Full Model Name fields.
971 	 */
972 	switch (enquiry2->hw.sub_model) {
973 	case DAC960_V1_P_PD_PU:
974 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 			strcpy(cb->model_name, "DAC960PU");
976 		else
977 			strcpy(cb->model_name, "DAC960PD");
978 		break;
979 	case DAC960_V1_PL:
980 		strcpy(cb->model_name, "DAC960PL");
981 		break;
982 	case DAC960_V1_PG:
983 		strcpy(cb->model_name, "DAC960PG");
984 		break;
985 	case DAC960_V1_PJ:
986 		strcpy(cb->model_name, "DAC960PJ");
987 		break;
988 	case DAC960_V1_PR:
989 		strcpy(cb->model_name, "DAC960PR");
990 		break;
991 	case DAC960_V1_PT:
992 		strcpy(cb->model_name, "DAC960PT");
993 		break;
994 	case DAC960_V1_PTL0:
995 		strcpy(cb->model_name, "DAC960PTL0");
996 		break;
997 	case DAC960_V1_PRL:
998 		strcpy(cb->model_name, "DAC960PRL");
999 		break;
1000 	case DAC960_V1_PTL1:
1001 		strcpy(cb->model_name, "DAC960PTL1");
1002 		break;
1003 	case DAC960_V1_1164P:
1004 		strcpy(cb->model_name, "eXtremeRAID 1100");
1005 		break;
1006 	default:
1007 		shost_printk(KERN_WARNING, cb->host,
1008 			     "Unknown Model %X\n",
1009 			     enquiry2->hw.sub_model);
1010 		goto out;
1011 	}
1012 	/*
1013 	 * Initialize the Controller Firmware Version field and verify that it
1014 	 * is a supported firmware version.
1015 	 * The supported firmware versions are:
1016 	 *
1017 	 * DAC1164P		    5.06 and above
1018 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1019 	 * DAC960PU/PD/PL	    3.51 and above
1020 	 * DAC960PU/PD/PL/P	    2.73 and above
1021 	 */
1022 #if defined(CONFIG_ALPHA)
1023 	/*
1024 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 	 * the last custom FW revision to be released by DEC for these older
1027 	 * controllers, appears to work quite well with this driver.
1028 	 *
1029 	 * Cards tested successfully were several versions each of the PD and
1030 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 	 * back of the board, of:
1033 	 *
1034 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1035 	 *         or D040349 (3-channel)
1036 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1037 	 *         or D040397 (3-channel)
1038 	 */
1039 # define FIRMWARE_27X	"2.70"
1040 #else
1041 # define FIRMWARE_27X	"2.73"
1042 #endif
1043 
1044 	if (enquiry2->fw.major_version == 0) {
1045 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 		enquiry2->fw.firmware_type = '0';
1048 		enquiry2->fw.turn_id = 0;
1049 	}
1050 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1051 		"%u.%02u-%c-%02u",
1052 		enquiry2->fw.major_version,
1053 		enquiry2->fw.minor_version,
1054 		enquiry2->fw.firmware_type,
1055 		enquiry2->fw.turn_id);
1056 	if (!((enquiry2->fw.major_version == 5 &&
1057 	       enquiry2->fw.minor_version >= 6) ||
1058 	      (enquiry2->fw.major_version == 4 &&
1059 	       enquiry2->fw.minor_version >= 6) ||
1060 	      (enquiry2->fw.major_version == 3 &&
1061 	       enquiry2->fw.minor_version >= 51) ||
1062 	      (enquiry2->fw.major_version == 2 &&
1063 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 		shost_printk(KERN_WARNING, cb->host,
1065 			"Firmware Version '%s' unsupported\n",
1066 			cb->fw_version);
1067 		goto out;
1068 	}
1069 	/*
1070 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 	 * Enclosure Management Enabled fields.
1072 	 */
1073 	switch (enquiry2->hw.model) {
1074 	case MYRB_5_CHANNEL_BOARD:
1075 		pchan_max = 5;
1076 		break;
1077 	case MYRB_3_CHANNEL_BOARD:
1078 	case MYRB_3_CHANNEL_ASIC_DAC:
1079 		pchan_max = 3;
1080 		break;
1081 	case MYRB_2_CHANNEL_BOARD:
1082 		pchan_max = 2;
1083 		break;
1084 	default:
1085 		pchan_max = enquiry2->cfg_chan;
1086 		break;
1087 	}
1088 	pchan_cur = enquiry2->cur_chan;
1089 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090 		cb->bus_width = 32;
1091 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092 		cb->bus_width = 16;
1093 	else
1094 		cb->bus_width = 8;
1095 	cb->ldev_block_size = enquiry2->ldev_block_size;
1096 	shost->max_channel = pchan_cur;
1097 	shost->max_id = enquiry2->max_targets;
1098 	memsize = enquiry2->mem_size >> 20;
1099 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100 	/*
1101 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 	 * The Driver Queue Depth must be at most one less than the
1105 	 * Controller Queue Depth to allow for an automatic drive
1106 	 * rebuild operation.
1107 	 */
1108 	shost->can_queue = cb->enquiry->max_tcq;
1109 	if (shost->can_queue < 3)
1110 		shost->can_queue = enquiry2->max_cmds;
1111 	if (shost->can_queue < 3)
1112 		/* Play safe and disable TCQ */
1113 		shost->can_queue = 1;
1114 
1115 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 	shost->max_sectors = enquiry2->max_sectors;
1118 	shost->sg_tablesize = enquiry2->max_sge;
1119 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121 	/*
1122 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123 	 */
1124 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 		>> (10 - MYRB_BLKSIZE_BITS);
1126 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 		>> (10 - MYRB_BLKSIZE_BITS);
1128 	/* Assume 255/63 translation */
1129 	cb->ldev_geom_heads = 255;
1130 	cb->ldev_geom_sectors = 63;
1131 	if (config2->drive_geometry) {
1132 		cb->ldev_geom_heads = 128;
1133 		cb->ldev_geom_sectors = 32;
1134 	}
1135 
1136 	/*
1137 	 * Initialize the Background Initialization Status.
1138 	 */
1139 	if ((cb->fw_version[0] == '4' &&
1140 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1141 	    (cb->fw_version[0] == '5' &&
1142 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1143 		cb->bgi_status_supported = true;
1144 		myrb_bgi_control(cb);
1145 	}
1146 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147 	ret = 0;
1148 
1149 out:
1150 	shost_printk(KERN_INFO, cb->host,
1151 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1152 	shost_printk(KERN_INFO, cb->host,
1153 		"  Firmware Version: %s, Memory Size: %dMB\n",
1154 		cb->fw_version, memsize);
1155 	if (cb->io_addr == 0)
1156 		shost_printk(KERN_INFO, cb->host,
1157 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 			(unsigned long)cb->pci_addr, cb->irq);
1159 	else
1160 		shost_printk(KERN_INFO, cb->host,
1161 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163 			cb->irq);
1164 	shost_printk(KERN_INFO, cb->host,
1165 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 		cb->host->can_queue, cb->host->max_sectors);
1167 	shost_printk(KERN_INFO, cb->host,
1168 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 		     cb->host->can_queue, cb->host->sg_tablesize,
1170 		     MYRB_SCATTER_GATHER_LIMIT);
1171 	shost_printk(KERN_INFO, cb->host,
1172 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 		     cb->stripe_size, cb->segment_size,
1174 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175 		     cb->safte_enabled ?
1176 		     "  SAF-TE Enclosure Management Enabled" : "");
1177 	shost_printk(KERN_INFO, cb->host,
1178 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1179 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180 		     cb->host->max_id);
1181 
1182 	shost_printk(KERN_INFO, cb->host,
1183 		     "  Logical: 1/1 channels, %d/%d disks\n",
1184 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185 
1186 out_free:
1187 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 			  enquiry2, enquiry2_addr);
1189 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 			  config2, config2_addr);
1191 
1192 	return ret;
1193 }
1194 
1195 /*
1196  * myrb_unmap - unmaps controller structures
1197  */
1198 static void myrb_unmap(struct myrb_hba *cb)
1199 {
1200 	if (cb->ldev_info_buf) {
1201 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202 			MYRB_MAX_LDEVS;
1203 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 				  cb->ldev_info_buf, cb->ldev_info_addr);
1205 		cb->ldev_info_buf = NULL;
1206 	}
1207 	if (cb->err_table) {
1208 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 				  cb->err_table, cb->err_table_addr);
1212 		cb->err_table = NULL;
1213 	}
1214 	if (cb->enquiry) {
1215 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 				  cb->enquiry, cb->enquiry_addr);
1217 		cb->enquiry = NULL;
1218 	}
1219 	if (cb->first_stat_mbox) {
1220 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1222 		cb->first_stat_mbox = NULL;
1223 	}
1224 	if (cb->first_cmd_mbox) {
1225 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 		cb->first_cmd_mbox = NULL;
1228 	}
1229 }
1230 
1231 /*
1232  * myrb_cleanup - cleanup controller structures
1233  */
1234 static void myrb_cleanup(struct myrb_hba *cb)
1235 {
1236 	struct pci_dev *pdev = cb->pdev;
1237 
1238 	/* Free the memory mailbox, status, and related structures */
1239 	myrb_unmap(cb);
1240 
1241 	if (cb->mmio_base) {
1242 		if (cb->disable_intr)
1243 			cb->disable_intr(cb->io_base);
1244 		iounmap(cb->mmio_base);
1245 	}
1246 	if (cb->irq)
1247 		free_irq(cb->irq, cb);
1248 	if (cb->io_addr)
1249 		release_region(cb->io_addr, 0x80);
1250 	pci_set_drvdata(pdev, NULL);
1251 	pci_disable_device(pdev);
1252 	scsi_host_put(cb->host);
1253 }
1254 
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1256 {
1257 	struct Scsi_Host *shost = scmd->device->host;
1258 	struct myrb_hba *cb = shost_priv(shost);
1259 
1260 	cb->reset(cb->io_base);
1261 	return SUCCESS;
1262 }
1263 
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 		struct scsi_cmnd *scmd)
1266 {
1267 	struct request *rq = scsi_cmd_to_rq(scmd);
1268 	struct myrb_hba *cb = shost_priv(shost);
1269 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 	struct myrb_dcdb *dcdb;
1272 	dma_addr_t dcdb_addr;
1273 	struct scsi_device *sdev = scmd->device;
1274 	struct scatterlist *sgl;
1275 	unsigned long flags;
1276 	int nsge;
1277 
1278 	myrb_reset_cmd(cmd_blk);
1279 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 	if (!dcdb)
1281 		return SCSI_MLQUEUE_HOST_BUSY;
1282 	nsge = scsi_dma_map(scmd);
1283 	if (nsge > 1) {
1284 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 		scmd->result = (DID_ERROR << 16);
1286 		scsi_done(scmd);
1287 		return 0;
1288 	}
1289 
1290 	mbox->type3.opcode = MYRB_CMD_DCDB;
1291 	mbox->type3.id = rq->tag + 3;
1292 	mbox->type3.addr = dcdb_addr;
1293 	dcdb->channel = sdev->channel;
1294 	dcdb->target = sdev->id;
1295 	switch (scmd->sc_data_direction) {
1296 	case DMA_NONE:
1297 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298 		break;
1299 	case DMA_TO_DEVICE:
1300 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 		break;
1302 	case DMA_FROM_DEVICE:
1303 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304 		break;
1305 	default:
1306 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307 		break;
1308 	}
1309 	dcdb->early_status = false;
1310 	if (rq->timeout <= 10)
1311 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 	else if (rq->timeout <= 60)
1313 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 	else if (rq->timeout <= 600)
1315 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 	else
1317 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 	dcdb->no_autosense = false;
1319 	dcdb->allow_disconnect = true;
1320 	sgl = scsi_sglist(scmd);
1321 	dcdb->dma_addr = sg_dma_address(sgl);
1322 	if (sg_dma_len(sgl) > USHRT_MAX) {
1323 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 	} else {
1326 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 		dcdb->xfer_len_hi4 = 0;
1328 	}
1329 	dcdb->cdb_len = scmd->cmd_len;
1330 	dcdb->sense_len = sizeof(dcdb->sense);
1331 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332 
1333 	spin_lock_irqsave(&cb->queue_lock, flags);
1334 	cb->qcmd(cb, cmd_blk);
1335 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1336 	return 0;
1337 }
1338 
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340 		struct scsi_cmnd *scmd)
1341 {
1342 	unsigned char inq[36] = {
1343 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 		0x20, 0x20, 0x20, 0x20,
1348 	};
1349 
1350 	if (cb->bus_width > 16)
1351 		inq[7] |= 1 << 6;
1352 	if (cb->bus_width > 8)
1353 		inq[7] |= 1 << 5;
1354 	memcpy(&inq[16], cb->model_name, 16);
1355 	memcpy(&inq[32], cb->fw_version, 1);
1356 	memcpy(&inq[33], &cb->fw_version[2], 2);
1357 	memcpy(&inq[35], &cb->fw_version[7], 1);
1358 
1359 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360 }
1361 
1362 static void
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 		struct myrb_ldev_info *ldev_info)
1365 {
1366 	unsigned char modes[32], *mode_pg;
1367 	bool dbd;
1368 	size_t mode_len;
1369 
1370 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371 	if (dbd) {
1372 		mode_len = 24;
1373 		mode_pg = &modes[4];
1374 	} else {
1375 		mode_len = 32;
1376 		mode_pg = &modes[12];
1377 	}
1378 	memset(modes, 0, sizeof(modes));
1379 	modes[0] = mode_len - 1;
1380 	if (!dbd) {
1381 		unsigned char *block_desc = &modes[4];
1382 
1383 		modes[3] = 8;
1384 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386 	}
1387 	mode_pg[0] = 0x08;
1388 	mode_pg[1] = 0x12;
1389 	if (ldev_info->wb_enabled)
1390 		mode_pg[2] |= 0x04;
1391 	if (cb->segment_size) {
1392 		mode_pg[2] |= 0x08;
1393 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394 	}
1395 
1396 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397 }
1398 
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400 		struct scsi_cmnd *scmd)
1401 {
1402 	scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1403 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 				 SCSI_SENSE_BUFFERSIZE);
1405 }
1406 
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 		struct myrb_ldev_info *ldev_info)
1409 {
1410 	unsigned char data[8];
1411 
1412 	dev_dbg(&scmd->device->sdev_gendev,
1413 		"Capacity %u, blocksize %u\n",
1414 		ldev_info->size, cb->ldev_block_size);
1415 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 	scsi_sg_copy_from_buffer(scmd, data, 8);
1418 }
1419 
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 		struct scsi_cmnd *scmd)
1422 {
1423 	struct myrb_hba *cb = shost_priv(shost);
1424 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 	struct myrb_ldev_info *ldev_info;
1427 	struct scsi_device *sdev = scmd->device;
1428 	struct scatterlist *sgl;
1429 	unsigned long flags;
1430 	u64 lba;
1431 	u32 block_cnt;
1432 	int nsge;
1433 
1434 	ldev_info = sdev->hostdata;
1435 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 	    ldev_info->state != MYRB_DEVICE_WO) {
1437 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 		scmd->result = (DID_BAD_TARGET << 16);
1440 		scsi_done(scmd);
1441 		return 0;
1442 	}
1443 	switch (scmd->cmnd[0]) {
1444 	case TEST_UNIT_READY:
1445 		scmd->result = (DID_OK << 16);
1446 		scsi_done(scmd);
1447 		return 0;
1448 	case INQUIRY:
1449 		if (scmd->cmnd[1] & 1) {
1450 			/* Illegal request, invalid field in CDB */
1451 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1452 		} else {
1453 			myrb_inquiry(cb, scmd);
1454 			scmd->result = (DID_OK << 16);
1455 		}
1456 		scsi_done(scmd);
1457 		return 0;
1458 	case SYNCHRONIZE_CACHE:
1459 		scmd->result = (DID_OK << 16);
1460 		scsi_done(scmd);
1461 		return 0;
1462 	case MODE_SENSE:
1463 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1464 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1465 			/* Illegal request, invalid field in CDB */
1466 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1467 		} else {
1468 			myrb_mode_sense(cb, scmd, ldev_info);
1469 			scmd->result = (DID_OK << 16);
1470 		}
1471 		scsi_done(scmd);
1472 		return 0;
1473 	case READ_CAPACITY:
1474 		if ((scmd->cmnd[1] & 1) ||
1475 		    (scmd->cmnd[8] & 1)) {
1476 			/* Illegal request, invalid field in CDB */
1477 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1478 			scsi_done(scmd);
1479 			return 0;
1480 		}
1481 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1482 		if (lba) {
1483 			/* Illegal request, invalid field in CDB */
1484 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1485 			scsi_done(scmd);
1486 			return 0;
1487 		}
1488 		myrb_read_capacity(cb, scmd, ldev_info);
1489 		scsi_done(scmd);
1490 		return 0;
1491 	case REQUEST_SENSE:
1492 		myrb_request_sense(cb, scmd);
1493 		scmd->result = (DID_OK << 16);
1494 		return 0;
1495 	case SEND_DIAGNOSTIC:
1496 		if (scmd->cmnd[1] != 0x04) {
1497 			/* Illegal request, invalid field in CDB */
1498 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1499 		} else {
1500 			/* Assume good status */
1501 			scmd->result = (DID_OK << 16);
1502 		}
1503 		scsi_done(scmd);
1504 		return 0;
1505 	case READ_6:
1506 		if (ldev_info->state == MYRB_DEVICE_WO) {
1507 			/* Data protect, attempt to read invalid data */
1508 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1509 			scsi_done(scmd);
1510 			return 0;
1511 		}
1512 		fallthrough;
1513 	case WRITE_6:
1514 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1515 		       (scmd->cmnd[2] << 8) |
1516 		       scmd->cmnd[3]);
1517 		block_cnt = scmd->cmnd[4];
1518 		break;
1519 	case READ_10:
1520 		if (ldev_info->state == MYRB_DEVICE_WO) {
1521 			/* Data protect, attempt to read invalid data */
1522 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1523 			scsi_done(scmd);
1524 			return 0;
1525 		}
1526 		fallthrough;
1527 	case WRITE_10:
1528 	case VERIFY:		/* 0x2F */
1529 	case WRITE_VERIFY:	/* 0x2E */
1530 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1531 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1532 		break;
1533 	case READ_12:
1534 		if (ldev_info->state == MYRB_DEVICE_WO) {
1535 			/* Data protect, attempt to read invalid data */
1536 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1537 			scsi_done(scmd);
1538 			return 0;
1539 		}
1540 		fallthrough;
1541 	case WRITE_12:
1542 	case VERIFY_12: /* 0xAF */
1543 	case WRITE_VERIFY_12:	/* 0xAE */
1544 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1545 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1546 		break;
1547 	default:
1548 		/* Illegal request, invalid opcode */
1549 		scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1550 		scsi_done(scmd);
1551 		return 0;
1552 	}
1553 
1554 	myrb_reset_cmd(cmd_blk);
1555 	mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1556 	if (scmd->sc_data_direction == DMA_NONE)
1557 		goto submit;
1558 	nsge = scsi_dma_map(scmd);
1559 	if (nsge == 1) {
1560 		sgl = scsi_sglist(scmd);
1561 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1562 			mbox->type5.opcode = MYRB_CMD_READ;
1563 		else
1564 			mbox->type5.opcode = MYRB_CMD_WRITE;
1565 
1566 		mbox->type5.ld.xfer_len = block_cnt;
1567 		mbox->type5.ld.ldev_num = sdev->id;
1568 		mbox->type5.lba = lba;
1569 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1570 	} else {
1571 		struct myrb_sge *hw_sgl;
1572 		dma_addr_t hw_sgl_addr;
1573 		int i;
1574 
1575 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1576 		if (!hw_sgl)
1577 			return SCSI_MLQUEUE_HOST_BUSY;
1578 
1579 		cmd_blk->sgl = hw_sgl;
1580 		cmd_blk->sgl_addr = hw_sgl_addr;
1581 
1582 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1583 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1584 		else
1585 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1586 
1587 		mbox->type5.ld.xfer_len = block_cnt;
1588 		mbox->type5.ld.ldev_num = sdev->id;
1589 		mbox->type5.lba = lba;
1590 		mbox->type5.addr = hw_sgl_addr;
1591 		mbox->type5.sg_count = nsge;
1592 
1593 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1594 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1595 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1596 			hw_sgl++;
1597 		}
1598 	}
1599 submit:
1600 	spin_lock_irqsave(&cb->queue_lock, flags);
1601 	cb->qcmd(cb, cmd_blk);
1602 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1603 
1604 	return 0;
1605 }
1606 
1607 static int myrb_queuecommand(struct Scsi_Host *shost,
1608 		struct scsi_cmnd *scmd)
1609 {
1610 	struct scsi_device *sdev = scmd->device;
1611 
1612 	if (sdev->channel > myrb_logical_channel(shost)) {
1613 		scmd->result = (DID_BAD_TARGET << 16);
1614 		scsi_done(scmd);
1615 		return 0;
1616 	}
1617 	if (sdev->channel == myrb_logical_channel(shost))
1618 		return myrb_ldev_queuecommand(shost, scmd);
1619 
1620 	return myrb_pthru_queuecommand(shost, scmd);
1621 }
1622 
1623 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1624 {
1625 	struct myrb_hba *cb = shost_priv(sdev->host);
1626 	struct myrb_ldev_info *ldev_info;
1627 	unsigned short ldev_num = sdev->id;
1628 	enum raid_level level;
1629 
1630 	ldev_info = cb->ldev_info_buf + ldev_num;
1631 	if (!ldev_info)
1632 		return -ENXIO;
1633 
1634 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1635 	if (!sdev->hostdata)
1636 		return -ENOMEM;
1637 	dev_dbg(&sdev->sdev_gendev,
1638 		"slave alloc ldev %d state %x\n",
1639 		ldev_num, ldev_info->state);
1640 	memcpy(sdev->hostdata, ldev_info,
1641 	       sizeof(*ldev_info));
1642 	switch (ldev_info->raid_level) {
1643 	case MYRB_RAID_LEVEL0:
1644 		level = RAID_LEVEL_LINEAR;
1645 		break;
1646 	case MYRB_RAID_LEVEL1:
1647 		level = RAID_LEVEL_1;
1648 		break;
1649 	case MYRB_RAID_LEVEL3:
1650 		level = RAID_LEVEL_3;
1651 		break;
1652 	case MYRB_RAID_LEVEL5:
1653 		level = RAID_LEVEL_5;
1654 		break;
1655 	case MYRB_RAID_LEVEL6:
1656 		level = RAID_LEVEL_6;
1657 		break;
1658 	case MYRB_RAID_JBOD:
1659 		level = RAID_LEVEL_JBOD;
1660 		break;
1661 	default:
1662 		level = RAID_LEVEL_UNKNOWN;
1663 		break;
1664 	}
1665 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1666 	return 0;
1667 }
1668 
1669 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1670 {
1671 	struct myrb_hba *cb = shost_priv(sdev->host);
1672 	struct myrb_pdev_state *pdev_info;
1673 	unsigned short status;
1674 
1675 	if (sdev->id > MYRB_MAX_TARGETS)
1676 		return -ENXIO;
1677 
1678 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
1679 	if (!pdev_info)
1680 		return -ENOMEM;
1681 
1682 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1683 				  sdev, pdev_info);
1684 	if (status != MYRB_STATUS_SUCCESS) {
1685 		dev_dbg(&sdev->sdev_gendev,
1686 			"Failed to get device state, status %x\n",
1687 			status);
1688 		kfree(pdev_info);
1689 		return -ENXIO;
1690 	}
1691 	if (!pdev_info->present) {
1692 		dev_dbg(&sdev->sdev_gendev,
1693 			"device not present, skip\n");
1694 		kfree(pdev_info);
1695 		return -ENXIO;
1696 	}
1697 	dev_dbg(&sdev->sdev_gendev,
1698 		"slave alloc pdev %d:%d state %x\n",
1699 		sdev->channel, sdev->id, pdev_info->state);
1700 	sdev->hostdata = pdev_info;
1701 
1702 	return 0;
1703 }
1704 
1705 static int myrb_slave_alloc(struct scsi_device *sdev)
1706 {
1707 	if (sdev->channel > myrb_logical_channel(sdev->host))
1708 		return -ENXIO;
1709 
1710 	if (sdev->lun > 0)
1711 		return -ENXIO;
1712 
1713 	if (sdev->channel == myrb_logical_channel(sdev->host))
1714 		return myrb_ldev_slave_alloc(sdev);
1715 
1716 	return myrb_pdev_slave_alloc(sdev);
1717 }
1718 
1719 static int myrb_slave_configure(struct scsi_device *sdev)
1720 {
1721 	struct myrb_ldev_info *ldev_info;
1722 
1723 	if (sdev->channel > myrb_logical_channel(sdev->host))
1724 		return -ENXIO;
1725 
1726 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1727 		sdev->no_uld_attach = 1;
1728 		return 0;
1729 	}
1730 	if (sdev->lun != 0)
1731 		return -ENXIO;
1732 
1733 	ldev_info = sdev->hostdata;
1734 	if (!ldev_info)
1735 		return -ENXIO;
1736 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1737 		sdev_printk(KERN_INFO, sdev,
1738 			    "Logical drive is %s\n",
1739 			    myrb_devstate_name(ldev_info->state));
1740 
1741 	sdev->tagged_supported = 1;
1742 	return 0;
1743 }
1744 
1745 static void myrb_slave_destroy(struct scsi_device *sdev)
1746 {
1747 	kfree(sdev->hostdata);
1748 }
1749 
1750 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1751 		sector_t capacity, int geom[])
1752 {
1753 	struct myrb_hba *cb = shost_priv(sdev->host);
1754 
1755 	geom[0] = cb->ldev_geom_heads;
1756 	geom[1] = cb->ldev_geom_sectors;
1757 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1758 
1759 	return 0;
1760 }
1761 
1762 static ssize_t raid_state_show(struct device *dev,
1763 		struct device_attribute *attr, char *buf)
1764 {
1765 	struct scsi_device *sdev = to_scsi_device(dev);
1766 	struct myrb_hba *cb = shost_priv(sdev->host);
1767 	int ret;
1768 
1769 	if (!sdev->hostdata)
1770 		return snprintf(buf, 16, "Unknown\n");
1771 
1772 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1773 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1774 		const char *name;
1775 
1776 		name = myrb_devstate_name(ldev_info->state);
1777 		if (name)
1778 			ret = snprintf(buf, 32, "%s\n", name);
1779 		else
1780 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1781 				       ldev_info->state);
1782 	} else {
1783 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1784 		unsigned short status;
1785 		const char *name;
1786 
1787 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1788 					  sdev, pdev_info);
1789 		if (status != MYRB_STATUS_SUCCESS)
1790 			sdev_printk(KERN_INFO, sdev,
1791 				    "Failed to get device state, status %x\n",
1792 				    status);
1793 
1794 		if (!pdev_info->present)
1795 			name = "Removed";
1796 		else
1797 			name = myrb_devstate_name(pdev_info->state);
1798 		if (name)
1799 			ret = snprintf(buf, 32, "%s\n", name);
1800 		else
1801 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1802 				       pdev_info->state);
1803 	}
1804 	return ret;
1805 }
1806 
1807 static ssize_t raid_state_store(struct device *dev,
1808 		struct device_attribute *attr, const char *buf, size_t count)
1809 {
1810 	struct scsi_device *sdev = to_scsi_device(dev);
1811 	struct myrb_hba *cb = shost_priv(sdev->host);
1812 	struct myrb_pdev_state *pdev_info;
1813 	enum myrb_devstate new_state;
1814 	unsigned short status;
1815 
1816 	if (!strncmp(buf, "kill", 4) ||
1817 	    !strncmp(buf, "offline", 7))
1818 		new_state = MYRB_DEVICE_DEAD;
1819 	else if (!strncmp(buf, "online", 6))
1820 		new_state = MYRB_DEVICE_ONLINE;
1821 	else if (!strncmp(buf, "standby", 7))
1822 		new_state = MYRB_DEVICE_STANDBY;
1823 	else
1824 		return -EINVAL;
1825 
1826 	pdev_info = sdev->hostdata;
1827 	if (!pdev_info) {
1828 		sdev_printk(KERN_INFO, sdev,
1829 			    "Failed - no physical device information\n");
1830 		return -ENXIO;
1831 	}
1832 	if (!pdev_info->present) {
1833 		sdev_printk(KERN_INFO, sdev,
1834 			    "Failed - device not present\n");
1835 		return -ENXIO;
1836 	}
1837 
1838 	if (pdev_info->state == new_state)
1839 		return count;
1840 
1841 	status = myrb_set_pdev_state(cb, sdev, new_state);
1842 	switch (status) {
1843 	case MYRB_STATUS_SUCCESS:
1844 		break;
1845 	case MYRB_STATUS_START_DEVICE_FAILED:
1846 		sdev_printk(KERN_INFO, sdev,
1847 			     "Failed - Unable to Start Device\n");
1848 		count = -EAGAIN;
1849 		break;
1850 	case MYRB_STATUS_NO_DEVICE:
1851 		sdev_printk(KERN_INFO, sdev,
1852 			    "Failed - No Device at Address\n");
1853 		count = -ENODEV;
1854 		break;
1855 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1856 		sdev_printk(KERN_INFO, sdev,
1857 			 "Failed - Invalid Channel or Target or Modifier\n");
1858 		count = -EINVAL;
1859 		break;
1860 	case MYRB_STATUS_CHANNEL_BUSY:
1861 		sdev_printk(KERN_INFO, sdev,
1862 			 "Failed - Channel Busy\n");
1863 		count = -EBUSY;
1864 		break;
1865 	default:
1866 		sdev_printk(KERN_INFO, sdev,
1867 			 "Failed - Unexpected Status %04X\n", status);
1868 		count = -EIO;
1869 		break;
1870 	}
1871 	return count;
1872 }
1873 static DEVICE_ATTR_RW(raid_state);
1874 
1875 static ssize_t raid_level_show(struct device *dev,
1876 		struct device_attribute *attr, char *buf)
1877 {
1878 	struct scsi_device *sdev = to_scsi_device(dev);
1879 
1880 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1881 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1882 		const char *name;
1883 
1884 		if (!ldev_info)
1885 			return -ENXIO;
1886 
1887 		name = myrb_raidlevel_name(ldev_info->raid_level);
1888 		if (!name)
1889 			return snprintf(buf, 32, "Invalid (%02X)\n",
1890 					ldev_info->state);
1891 		return snprintf(buf, 32, "%s\n", name);
1892 	}
1893 	return snprintf(buf, 32, "Physical Drive\n");
1894 }
1895 static DEVICE_ATTR_RO(raid_level);
1896 
1897 static ssize_t rebuild_show(struct device *dev,
1898 		struct device_attribute *attr, char *buf)
1899 {
1900 	struct scsi_device *sdev = to_scsi_device(dev);
1901 	struct myrb_hba *cb = shost_priv(sdev->host);
1902 	struct myrb_rbld_progress rbld_buf;
1903 	unsigned char status;
1904 
1905 	if (sdev->channel < myrb_logical_channel(sdev->host))
1906 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1907 
1908 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1909 
1910 	if (rbld_buf.ldev_num != sdev->id ||
1911 	    status != MYRB_STATUS_SUCCESS)
1912 		return snprintf(buf, 32, "not rebuilding\n");
1913 
1914 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1915 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1916 			rbld_buf.ldev_size);
1917 }
1918 
1919 static ssize_t rebuild_store(struct device *dev,
1920 		struct device_attribute *attr, const char *buf, size_t count)
1921 {
1922 	struct scsi_device *sdev = to_scsi_device(dev);
1923 	struct myrb_hba *cb = shost_priv(sdev->host);
1924 	struct myrb_cmdblk *cmd_blk;
1925 	union myrb_cmd_mbox *mbox;
1926 	unsigned short status;
1927 	int rc, start;
1928 	const char *msg;
1929 
1930 	rc = kstrtoint(buf, 0, &start);
1931 	if (rc)
1932 		return rc;
1933 
1934 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1935 		return -ENXIO;
1936 
1937 	status = myrb_get_rbld_progress(cb, NULL);
1938 	if (start) {
1939 		if (status == MYRB_STATUS_SUCCESS) {
1940 			sdev_printk(KERN_INFO, sdev,
1941 				    "Rebuild Not Initiated; already in progress\n");
1942 			return -EALREADY;
1943 		}
1944 		mutex_lock(&cb->dcmd_mutex);
1945 		cmd_blk = &cb->dcmd_blk;
1946 		myrb_reset_cmd(cmd_blk);
1947 		mbox = &cmd_blk->mbox;
1948 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1949 		mbox->type3D.id = MYRB_DCMD_TAG;
1950 		mbox->type3D.channel = sdev->channel;
1951 		mbox->type3D.target = sdev->id;
1952 		status = myrb_exec_cmd(cb, cmd_blk);
1953 		mutex_unlock(&cb->dcmd_mutex);
1954 	} else {
1955 		struct pci_dev *pdev = cb->pdev;
1956 		unsigned char *rate;
1957 		dma_addr_t rate_addr;
1958 
1959 		if (status != MYRB_STATUS_SUCCESS) {
1960 			sdev_printk(KERN_INFO, sdev,
1961 				    "Rebuild Not Cancelled; not in progress\n");
1962 			return 0;
1963 		}
1964 
1965 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1966 					  &rate_addr, GFP_KERNEL);
1967 		if (rate == NULL) {
1968 			sdev_printk(KERN_INFO, sdev,
1969 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1970 			return -ENOMEM;
1971 		}
1972 		mutex_lock(&cb->dcmd_mutex);
1973 		cmd_blk = &cb->dcmd_blk;
1974 		myrb_reset_cmd(cmd_blk);
1975 		mbox = &cmd_blk->mbox;
1976 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1977 		mbox->type3R.id = MYRB_DCMD_TAG;
1978 		mbox->type3R.rbld_rate = 0xFF;
1979 		mbox->type3R.addr = rate_addr;
1980 		status = myrb_exec_cmd(cb, cmd_blk);
1981 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1982 		mutex_unlock(&cb->dcmd_mutex);
1983 	}
1984 	if (status == MYRB_STATUS_SUCCESS) {
1985 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1986 			    start ? "Initiated" : "Cancelled");
1987 		return count;
1988 	}
1989 	if (!start) {
1990 		sdev_printk(KERN_INFO, sdev,
1991 			    "Rebuild Not Cancelled, status 0x%x\n",
1992 			    status);
1993 		return -EIO;
1994 	}
1995 
1996 	switch (status) {
1997 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1998 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
1999 		break;
2000 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2001 		msg = "New Disk Failed During Rebuild";
2002 		break;
2003 	case MYRB_STATUS_INVALID_ADDRESS:
2004 		msg = "Invalid Device Address";
2005 		break;
2006 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2007 		msg = "Already in Progress";
2008 		break;
2009 	default:
2010 		msg = NULL;
2011 		break;
2012 	}
2013 	if (msg)
2014 		sdev_printk(KERN_INFO, sdev,
2015 			    "Rebuild Failed - %s\n", msg);
2016 	else
2017 		sdev_printk(KERN_INFO, sdev,
2018 			    "Rebuild Failed, status 0x%x\n", status);
2019 
2020 	return -EIO;
2021 }
2022 static DEVICE_ATTR_RW(rebuild);
2023 
2024 static ssize_t consistency_check_store(struct device *dev,
2025 		struct device_attribute *attr, const char *buf, size_t count)
2026 {
2027 	struct scsi_device *sdev = to_scsi_device(dev);
2028 	struct myrb_hba *cb = shost_priv(sdev->host);
2029 	struct myrb_rbld_progress rbld_buf;
2030 	struct myrb_cmdblk *cmd_blk;
2031 	union myrb_cmd_mbox *mbox;
2032 	unsigned short ldev_num = 0xFFFF;
2033 	unsigned short status;
2034 	int rc, start;
2035 	const char *msg;
2036 
2037 	rc = kstrtoint(buf, 0, &start);
2038 	if (rc)
2039 		return rc;
2040 
2041 	if (sdev->channel < myrb_logical_channel(sdev->host))
2042 		return -ENXIO;
2043 
2044 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2045 	if (start) {
2046 		if (status == MYRB_STATUS_SUCCESS) {
2047 			sdev_printk(KERN_INFO, sdev,
2048 				    "Check Consistency Not Initiated; already in progress\n");
2049 			return -EALREADY;
2050 		}
2051 		mutex_lock(&cb->dcmd_mutex);
2052 		cmd_blk = &cb->dcmd_blk;
2053 		myrb_reset_cmd(cmd_blk);
2054 		mbox = &cmd_blk->mbox;
2055 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2056 		mbox->type3C.id = MYRB_DCMD_TAG;
2057 		mbox->type3C.ldev_num = sdev->id;
2058 		mbox->type3C.auto_restore = true;
2059 
2060 		status = myrb_exec_cmd(cb, cmd_blk);
2061 		mutex_unlock(&cb->dcmd_mutex);
2062 	} else {
2063 		struct pci_dev *pdev = cb->pdev;
2064 		unsigned char *rate;
2065 		dma_addr_t rate_addr;
2066 
2067 		if (ldev_num != sdev->id) {
2068 			sdev_printk(KERN_INFO, sdev,
2069 				    "Check Consistency Not Cancelled; not in progress\n");
2070 			return 0;
2071 		}
2072 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2073 					  &rate_addr, GFP_KERNEL);
2074 		if (rate == NULL) {
2075 			sdev_printk(KERN_INFO, sdev,
2076 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2077 			return -ENOMEM;
2078 		}
2079 		mutex_lock(&cb->dcmd_mutex);
2080 		cmd_blk = &cb->dcmd_blk;
2081 		myrb_reset_cmd(cmd_blk);
2082 		mbox = &cmd_blk->mbox;
2083 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2084 		mbox->type3R.id = MYRB_DCMD_TAG;
2085 		mbox->type3R.rbld_rate = 0xFF;
2086 		mbox->type3R.addr = rate_addr;
2087 		status = myrb_exec_cmd(cb, cmd_blk);
2088 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2089 		mutex_unlock(&cb->dcmd_mutex);
2090 	}
2091 	if (status == MYRB_STATUS_SUCCESS) {
2092 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2093 			    start ? "Initiated" : "Cancelled");
2094 		return count;
2095 	}
2096 	if (!start) {
2097 		sdev_printk(KERN_INFO, sdev,
2098 			    "Check Consistency Not Cancelled, status 0x%x\n",
2099 			    status);
2100 		return -EIO;
2101 	}
2102 
2103 	switch (status) {
2104 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2105 		msg = "Dependent Physical Device is DEAD";
2106 		break;
2107 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2108 		msg = "New Disk Failed During Rebuild";
2109 		break;
2110 	case MYRB_STATUS_INVALID_ADDRESS:
2111 		msg = "Invalid or Nonredundant Logical Drive";
2112 		break;
2113 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2114 		msg = "Already in Progress";
2115 		break;
2116 	default:
2117 		msg = NULL;
2118 		break;
2119 	}
2120 	if (msg)
2121 		sdev_printk(KERN_INFO, sdev,
2122 			    "Check Consistency Failed - %s\n", msg);
2123 	else
2124 		sdev_printk(KERN_INFO, sdev,
2125 			    "Check Consistency Failed, status 0x%x\n", status);
2126 
2127 	return -EIO;
2128 }
2129 
2130 static ssize_t consistency_check_show(struct device *dev,
2131 		struct device_attribute *attr, char *buf)
2132 {
2133 	return rebuild_show(dev, attr, buf);
2134 }
2135 static DEVICE_ATTR_RW(consistency_check);
2136 
2137 static ssize_t ctlr_num_show(struct device *dev,
2138 		struct device_attribute *attr, char *buf)
2139 {
2140 	struct Scsi_Host *shost = class_to_shost(dev);
2141 	struct myrb_hba *cb = shost_priv(shost);
2142 
2143 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2144 }
2145 static DEVICE_ATTR_RO(ctlr_num);
2146 
2147 static ssize_t firmware_show(struct device *dev,
2148 		struct device_attribute *attr, char *buf)
2149 {
2150 	struct Scsi_Host *shost = class_to_shost(dev);
2151 	struct myrb_hba *cb = shost_priv(shost);
2152 
2153 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2154 }
2155 static DEVICE_ATTR_RO(firmware);
2156 
2157 static ssize_t model_show(struct device *dev,
2158 		struct device_attribute *attr, char *buf)
2159 {
2160 	struct Scsi_Host *shost = class_to_shost(dev);
2161 	struct myrb_hba *cb = shost_priv(shost);
2162 
2163 	return snprintf(buf, 16, "%s\n", cb->model_name);
2164 }
2165 static DEVICE_ATTR_RO(model);
2166 
2167 static ssize_t flush_cache_store(struct device *dev,
2168 		struct device_attribute *attr, const char *buf, size_t count)
2169 {
2170 	struct Scsi_Host *shost = class_to_shost(dev);
2171 	struct myrb_hba *cb = shost_priv(shost);
2172 	unsigned short status;
2173 
2174 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2175 	if (status == MYRB_STATUS_SUCCESS) {
2176 		shost_printk(KERN_INFO, shost,
2177 			     "Cache Flush Completed\n");
2178 		return count;
2179 	}
2180 	shost_printk(KERN_INFO, shost,
2181 		     "Cache Flush Failed, status %x\n", status);
2182 	return -EIO;
2183 }
2184 static DEVICE_ATTR_WO(flush_cache);
2185 
2186 static struct attribute *myrb_sdev_attrs[] = {
2187 	&dev_attr_rebuild.attr,
2188 	&dev_attr_consistency_check.attr,
2189 	&dev_attr_raid_state.attr,
2190 	&dev_attr_raid_level.attr,
2191 	NULL,
2192 };
2193 
2194 ATTRIBUTE_GROUPS(myrb_sdev);
2195 
2196 static struct attribute *myrb_shost_attrs[] = {
2197 	&dev_attr_ctlr_num.attr,
2198 	&dev_attr_model.attr,
2199 	&dev_attr_firmware.attr,
2200 	&dev_attr_flush_cache.attr,
2201 	NULL,
2202 };
2203 
2204 ATTRIBUTE_GROUPS(myrb_shost);
2205 
2206 static struct scsi_host_template myrb_template = {
2207 	.module			= THIS_MODULE,
2208 	.name			= "DAC960",
2209 	.proc_name		= "myrb",
2210 	.queuecommand		= myrb_queuecommand,
2211 	.eh_host_reset_handler	= myrb_host_reset,
2212 	.slave_alloc		= myrb_slave_alloc,
2213 	.slave_configure	= myrb_slave_configure,
2214 	.slave_destroy		= myrb_slave_destroy,
2215 	.bios_param		= myrb_biosparam,
2216 	.cmd_size		= sizeof(struct myrb_cmdblk),
2217 	.shost_groups		= myrb_shost_groups,
2218 	.sdev_groups		= myrb_sdev_groups,
2219 	.this_id		= -1,
2220 };
2221 
2222 /**
2223  * myrb_is_raid - return boolean indicating device is raid volume
2224  * @dev: the device struct object
2225  */
2226 static int myrb_is_raid(struct device *dev)
2227 {
2228 	struct scsi_device *sdev = to_scsi_device(dev);
2229 
2230 	return sdev->channel == myrb_logical_channel(sdev->host);
2231 }
2232 
2233 /**
2234  * myrb_get_resync - get raid volume resync percent complete
2235  * @dev: the device struct object
2236  */
2237 static void myrb_get_resync(struct device *dev)
2238 {
2239 	struct scsi_device *sdev = to_scsi_device(dev);
2240 	struct myrb_hba *cb = shost_priv(sdev->host);
2241 	struct myrb_rbld_progress rbld_buf;
2242 	unsigned int percent_complete = 0;
2243 	unsigned short status;
2244 	unsigned int ldev_size = 0, remaining = 0;
2245 
2246 	if (sdev->channel < myrb_logical_channel(sdev->host))
2247 		return;
2248 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2249 	if (status == MYRB_STATUS_SUCCESS) {
2250 		if (rbld_buf.ldev_num == sdev->id) {
2251 			ldev_size = rbld_buf.ldev_size;
2252 			remaining = rbld_buf.blocks_left;
2253 		}
2254 	}
2255 	if (remaining && ldev_size)
2256 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2257 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2258 }
2259 
2260 /**
2261  * myrb_get_state - get raid volume status
2262  * @dev: the device struct object
2263  */
2264 static void myrb_get_state(struct device *dev)
2265 {
2266 	struct scsi_device *sdev = to_scsi_device(dev);
2267 	struct myrb_hba *cb = shost_priv(sdev->host);
2268 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2269 	enum raid_state state = RAID_STATE_UNKNOWN;
2270 	unsigned short status;
2271 
2272 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2273 		state = RAID_STATE_UNKNOWN;
2274 	else {
2275 		status = myrb_get_rbld_progress(cb, NULL);
2276 		if (status == MYRB_STATUS_SUCCESS)
2277 			state = RAID_STATE_RESYNCING;
2278 		else {
2279 			switch (ldev_info->state) {
2280 			case MYRB_DEVICE_ONLINE:
2281 				state = RAID_STATE_ACTIVE;
2282 				break;
2283 			case MYRB_DEVICE_WO:
2284 			case MYRB_DEVICE_CRITICAL:
2285 				state = RAID_STATE_DEGRADED;
2286 				break;
2287 			default:
2288 				state = RAID_STATE_OFFLINE;
2289 			}
2290 		}
2291 	}
2292 	raid_set_state(myrb_raid_template, dev, state);
2293 }
2294 
2295 static struct raid_function_template myrb_raid_functions = {
2296 	.cookie		= &myrb_template,
2297 	.is_raid	= myrb_is_raid,
2298 	.get_resync	= myrb_get_resync,
2299 	.get_state	= myrb_get_state,
2300 };
2301 
2302 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2303 		struct scsi_cmnd *scmd)
2304 {
2305 	unsigned short status;
2306 
2307 	if (!cmd_blk)
2308 		return;
2309 
2310 	scsi_dma_unmap(scmd);
2311 
2312 	if (cmd_blk->dcdb) {
2313 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2314 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2315 			      cmd_blk->dcdb_addr);
2316 		cmd_blk->dcdb = NULL;
2317 	}
2318 	if (cmd_blk->sgl) {
2319 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2320 		cmd_blk->sgl = NULL;
2321 		cmd_blk->sgl_addr = 0;
2322 	}
2323 	status = cmd_blk->status;
2324 	switch (status) {
2325 	case MYRB_STATUS_SUCCESS:
2326 	case MYRB_STATUS_DEVICE_BUSY:
2327 		scmd->result = (DID_OK << 16) | status;
2328 		break;
2329 	case MYRB_STATUS_BAD_DATA:
2330 		dev_dbg(&scmd->device->sdev_gendev,
2331 			"Bad Data Encountered\n");
2332 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2333 			/* Unrecovered read error */
2334 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2335 		else
2336 			/* Write error */
2337 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2338 		break;
2339 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2340 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2341 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2342 			/* Unrecovered read error, auto-reallocation failed */
2343 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2344 		else
2345 			/* Write error, auto-reallocation failed */
2346 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2347 		break;
2348 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2349 		dev_dbg(&scmd->device->sdev_gendev,
2350 			    "Logical Drive Nonexistent or Offline");
2351 		scmd->result = (DID_BAD_TARGET << 16);
2352 		break;
2353 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2354 		dev_dbg(&scmd->device->sdev_gendev,
2355 			    "Attempt to Access Beyond End of Logical Drive");
2356 		/* Logical block address out of range */
2357 		scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2358 		break;
2359 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2360 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2361 		scmd->result = (DID_BAD_TARGET << 16);
2362 		break;
2363 	default:
2364 		scmd_printk(KERN_ERR, scmd,
2365 			    "Unexpected Error Status %04X", status);
2366 		scmd->result = (DID_ERROR << 16);
2367 		break;
2368 	}
2369 	scsi_done(scmd);
2370 }
2371 
2372 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2373 {
2374 	if (!cmd_blk)
2375 		return;
2376 
2377 	if (cmd_blk->completion) {
2378 		complete(cmd_blk->completion);
2379 		cmd_blk->completion = NULL;
2380 	}
2381 }
2382 
2383 static void myrb_monitor(struct work_struct *work)
2384 {
2385 	struct myrb_hba *cb = container_of(work,
2386 			struct myrb_hba, monitor_work.work);
2387 	struct Scsi_Host *shost = cb->host;
2388 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2389 
2390 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2391 
2392 	if (cb->new_ev_seq > cb->old_ev_seq) {
2393 		int event = cb->old_ev_seq;
2394 
2395 		dev_dbg(&shost->shost_gendev,
2396 			"get event log no %d/%d\n",
2397 			cb->new_ev_seq, event);
2398 		myrb_get_event(cb, event);
2399 		cb->old_ev_seq = event + 1;
2400 		interval = 10;
2401 	} else if (cb->need_err_info) {
2402 		cb->need_err_info = false;
2403 		dev_dbg(&shost->shost_gendev, "get error table\n");
2404 		myrb_get_errtable(cb);
2405 		interval = 10;
2406 	} else if (cb->need_rbld && cb->rbld_first) {
2407 		cb->need_rbld = false;
2408 		dev_dbg(&shost->shost_gendev,
2409 			"get rebuild progress\n");
2410 		myrb_update_rbld_progress(cb);
2411 		interval = 10;
2412 	} else if (cb->need_ldev_info) {
2413 		cb->need_ldev_info = false;
2414 		dev_dbg(&shost->shost_gendev,
2415 			"get logical drive info\n");
2416 		myrb_get_ldev_info(cb);
2417 		interval = 10;
2418 	} else if (cb->need_rbld) {
2419 		cb->need_rbld = false;
2420 		dev_dbg(&shost->shost_gendev,
2421 			"get rebuild progress\n");
2422 		myrb_update_rbld_progress(cb);
2423 		interval = 10;
2424 	} else if (cb->need_cc_status) {
2425 		cb->need_cc_status = false;
2426 		dev_dbg(&shost->shost_gendev,
2427 			"get consistency check progress\n");
2428 		myrb_get_cc_progress(cb);
2429 		interval = 10;
2430 	} else if (cb->need_bgi_status) {
2431 		cb->need_bgi_status = false;
2432 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2433 		myrb_bgi_control(cb);
2434 		interval = 10;
2435 	} else {
2436 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2437 		mutex_lock(&cb->dma_mutex);
2438 		myrb_hba_enquiry(cb);
2439 		mutex_unlock(&cb->dma_mutex);
2440 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2441 		    cb->need_err_info || cb->need_rbld ||
2442 		    cb->need_ldev_info || cb->need_cc_status ||
2443 		    cb->need_bgi_status) {
2444 			dev_dbg(&shost->shost_gendev,
2445 				"reschedule monitor\n");
2446 			interval = 0;
2447 		}
2448 	}
2449 	if (interval > 1)
2450 		cb->primary_monitor_time = jiffies;
2451 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2452 }
2453 
2454 /*
2455  * myrb_err_status - reports controller BIOS messages
2456  *
2457  * Controller BIOS messages are passed through the Error Status Register
2458  * when the driver performs the BIOS handshaking.
2459  *
2460  * Return: true for fatal errors and false otherwise.
2461  */
2462 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2463 		unsigned char parm0, unsigned char parm1)
2464 {
2465 	struct pci_dev *pdev = cb->pdev;
2466 
2467 	switch (error) {
2468 	case 0x00:
2469 		dev_info(&pdev->dev,
2470 			 "Physical Device %d:%d Not Responding\n",
2471 			 parm1, parm0);
2472 		break;
2473 	case 0x08:
2474 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2475 		break;
2476 	case 0x30:
2477 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2478 		break;
2479 	case 0x60:
2480 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2481 		break;
2482 	case 0x70:
2483 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2484 		break;
2485 	case 0x90:
2486 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2487 			   parm1, parm0);
2488 		break;
2489 	case 0xA0:
2490 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2491 		break;
2492 	case 0xB0:
2493 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2494 		break;
2495 	case 0xD0:
2496 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2497 		break;
2498 	case 0xF0:
2499 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2500 		return true;
2501 	default:
2502 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2503 			error);
2504 		return true;
2505 	}
2506 	return false;
2507 }
2508 
2509 /*
2510  * Hardware-specific functions
2511  */
2512 
2513 /*
2514  * DAC960 LA Series Controllers
2515  */
2516 
2517 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2518 {
2519 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2520 }
2521 
2522 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2523 {
2524 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2525 }
2526 
2527 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2528 {
2529 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2530 }
2531 
2532 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2533 {
2534 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2535 }
2536 
2537 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2538 {
2539 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2540 
2541 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2542 }
2543 
2544 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2545 {
2546 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2547 
2548 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2549 }
2550 
2551 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2552 {
2553 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2554 }
2555 
2556 static inline void DAC960_LA_ack_intr(void __iomem *base)
2557 {
2558 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2559 	       base + DAC960_LA_ODB_OFFSET);
2560 }
2561 
2562 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2563 {
2564 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2565 
2566 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2567 }
2568 
2569 static inline void DAC960_LA_enable_intr(void __iomem *base)
2570 {
2571 	unsigned char odb = 0xFF;
2572 
2573 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2574 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2575 }
2576 
2577 static inline void DAC960_LA_disable_intr(void __iomem *base)
2578 {
2579 	unsigned char odb = 0xFF;
2580 
2581 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2582 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2583 }
2584 
2585 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2586 		union myrb_cmd_mbox *mbox)
2587 {
2588 	mem_mbox->words[1] = mbox->words[1];
2589 	mem_mbox->words[2] = mbox->words[2];
2590 	mem_mbox->words[3] = mbox->words[3];
2591 	/* Memory barrier to prevent reordering */
2592 	wmb();
2593 	mem_mbox->words[0] = mbox->words[0];
2594 	/* Memory barrier to force PCI access */
2595 	mb();
2596 }
2597 
2598 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2599 		union myrb_cmd_mbox *mbox)
2600 {
2601 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2602 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2603 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2604 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2605 }
2606 
2607 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2608 {
2609 	return readw(base + DAC960_LA_STS_OFFSET);
2610 }
2611 
2612 static inline bool
2613 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2614 		unsigned char *param0, unsigned char *param1)
2615 {
2616 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2617 
2618 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2619 		return false;
2620 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2621 
2622 	*error = errsts;
2623 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2624 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2625 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2626 	return true;
2627 }
2628 
2629 static inline unsigned short
2630 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2631 		union myrb_cmd_mbox *mbox)
2632 {
2633 	unsigned short status;
2634 	int timeout = 0;
2635 
2636 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2637 		if (!DAC960_LA_hw_mbox_is_full(base))
2638 			break;
2639 		udelay(10);
2640 		timeout++;
2641 	}
2642 	if (DAC960_LA_hw_mbox_is_full(base)) {
2643 		dev_err(&pdev->dev,
2644 			"Timeout waiting for empty mailbox\n");
2645 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2646 	}
2647 	DAC960_LA_write_hw_mbox(base, mbox);
2648 	DAC960_LA_hw_mbox_new_cmd(base);
2649 	timeout = 0;
2650 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2651 		if (DAC960_LA_hw_mbox_status_available(base))
2652 			break;
2653 		udelay(10);
2654 		timeout++;
2655 	}
2656 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2657 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2658 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2659 	}
2660 	status = DAC960_LA_read_status(base);
2661 	DAC960_LA_ack_hw_mbox_intr(base);
2662 	DAC960_LA_ack_hw_mbox_status(base);
2663 
2664 	return status;
2665 }
2666 
2667 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2668 		struct myrb_hba *cb, void __iomem *base)
2669 {
2670 	int timeout = 0;
2671 	unsigned char error, parm0, parm1;
2672 
2673 	DAC960_LA_disable_intr(base);
2674 	DAC960_LA_ack_hw_mbox_status(base);
2675 	udelay(1000);
2676 	while (DAC960_LA_init_in_progress(base) &&
2677 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2678 		if (DAC960_LA_read_error_status(base, &error,
2679 					      &parm0, &parm1) &&
2680 		    myrb_err_status(cb, error, parm0, parm1))
2681 			return -ENODEV;
2682 		udelay(10);
2683 		timeout++;
2684 	}
2685 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2686 		dev_err(&pdev->dev,
2687 			"Timeout waiting for Controller Initialisation\n");
2688 		return -ETIMEDOUT;
2689 	}
2690 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2691 		dev_err(&pdev->dev,
2692 			"Unable to Enable Memory Mailbox Interface\n");
2693 		DAC960_LA_reset_ctrl(base);
2694 		return -ENODEV;
2695 	}
2696 	DAC960_LA_enable_intr(base);
2697 	cb->qcmd = myrb_qcmd;
2698 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2699 	if (cb->dual_mode_interface)
2700 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2701 	else
2702 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2703 	cb->disable_intr = DAC960_LA_disable_intr;
2704 	cb->reset = DAC960_LA_reset_ctrl;
2705 
2706 	return 0;
2707 }
2708 
2709 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2710 {
2711 	struct myrb_hba *cb = arg;
2712 	void __iomem *base = cb->io_base;
2713 	struct myrb_stat_mbox *next_stat_mbox;
2714 	unsigned long flags;
2715 
2716 	spin_lock_irqsave(&cb->queue_lock, flags);
2717 	DAC960_LA_ack_intr(base);
2718 	next_stat_mbox = cb->next_stat_mbox;
2719 	while (next_stat_mbox->valid) {
2720 		unsigned char id = next_stat_mbox->id;
2721 		struct scsi_cmnd *scmd = NULL;
2722 		struct myrb_cmdblk *cmd_blk = NULL;
2723 
2724 		if (id == MYRB_DCMD_TAG)
2725 			cmd_blk = &cb->dcmd_blk;
2726 		else if (id == MYRB_MCMD_TAG)
2727 			cmd_blk = &cb->mcmd_blk;
2728 		else {
2729 			scmd = scsi_host_find_tag(cb->host, id - 3);
2730 			if (scmd)
2731 				cmd_blk = scsi_cmd_priv(scmd);
2732 		}
2733 		if (cmd_blk)
2734 			cmd_blk->status = next_stat_mbox->status;
2735 		else
2736 			dev_err(&cb->pdev->dev,
2737 				"Unhandled command completion %d\n", id);
2738 
2739 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2740 		if (++next_stat_mbox > cb->last_stat_mbox)
2741 			next_stat_mbox = cb->first_stat_mbox;
2742 
2743 		if (cmd_blk) {
2744 			if (id < 3)
2745 				myrb_handle_cmdblk(cb, cmd_blk);
2746 			else
2747 				myrb_handle_scsi(cb, cmd_blk, scmd);
2748 		}
2749 	}
2750 	cb->next_stat_mbox = next_stat_mbox;
2751 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2752 	return IRQ_HANDLED;
2753 }
2754 
2755 static struct myrb_privdata DAC960_LA_privdata = {
2756 	.hw_init =	DAC960_LA_hw_init,
2757 	.irq_handler =	DAC960_LA_intr_handler,
2758 	.mmio_size =	DAC960_LA_mmio_size,
2759 };
2760 
2761 /*
2762  * DAC960 PG Series Controllers
2763  */
2764 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2765 {
2766 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2767 }
2768 
2769 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2770 {
2771 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2772 }
2773 
2774 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2775 {
2776 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2777 }
2778 
2779 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2780 {
2781 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2782 }
2783 
2784 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2785 {
2786 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2787 
2788 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2789 }
2790 
2791 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2792 {
2793 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2794 
2795 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2796 }
2797 
2798 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2799 {
2800 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2801 }
2802 
2803 static inline void DAC960_PG_ack_intr(void __iomem *base)
2804 {
2805 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2806 	       base + DAC960_PG_ODB_OFFSET);
2807 }
2808 
2809 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2810 {
2811 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2812 
2813 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2814 }
2815 
2816 static inline void DAC960_PG_enable_intr(void __iomem *base)
2817 {
2818 	unsigned int imask = (unsigned int)-1;
2819 
2820 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2821 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2822 }
2823 
2824 static inline void DAC960_PG_disable_intr(void __iomem *base)
2825 {
2826 	unsigned int imask = (unsigned int)-1;
2827 
2828 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2829 }
2830 
2831 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2832 		union myrb_cmd_mbox *mbox)
2833 {
2834 	mem_mbox->words[1] = mbox->words[1];
2835 	mem_mbox->words[2] = mbox->words[2];
2836 	mem_mbox->words[3] = mbox->words[3];
2837 	/* Memory barrier to prevent reordering */
2838 	wmb();
2839 	mem_mbox->words[0] = mbox->words[0];
2840 	/* Memory barrier to force PCI access */
2841 	mb();
2842 }
2843 
2844 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2845 		union myrb_cmd_mbox *mbox)
2846 {
2847 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2848 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2849 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2850 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2851 }
2852 
2853 static inline unsigned short
2854 DAC960_PG_read_status(void __iomem *base)
2855 {
2856 	return readw(base + DAC960_PG_STS_OFFSET);
2857 }
2858 
2859 static inline bool
2860 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2861 		unsigned char *param0, unsigned char *param1)
2862 {
2863 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2864 
2865 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2866 		return false;
2867 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2868 	*error = errsts;
2869 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2870 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2871 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2872 	return true;
2873 }
2874 
2875 static inline unsigned short
2876 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2877 		union myrb_cmd_mbox *mbox)
2878 {
2879 	unsigned short status;
2880 	int timeout = 0;
2881 
2882 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2883 		if (!DAC960_PG_hw_mbox_is_full(base))
2884 			break;
2885 		udelay(10);
2886 		timeout++;
2887 	}
2888 	if (DAC960_PG_hw_mbox_is_full(base)) {
2889 		dev_err(&pdev->dev,
2890 			"Timeout waiting for empty mailbox\n");
2891 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2892 	}
2893 	DAC960_PG_write_hw_mbox(base, mbox);
2894 	DAC960_PG_hw_mbox_new_cmd(base);
2895 
2896 	timeout = 0;
2897 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2898 		if (DAC960_PG_hw_mbox_status_available(base))
2899 			break;
2900 		udelay(10);
2901 		timeout++;
2902 	}
2903 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2904 		dev_err(&pdev->dev,
2905 			"Timeout waiting for mailbox status\n");
2906 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2907 	}
2908 	status = DAC960_PG_read_status(base);
2909 	DAC960_PG_ack_hw_mbox_intr(base);
2910 	DAC960_PG_ack_hw_mbox_status(base);
2911 
2912 	return status;
2913 }
2914 
2915 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2916 		struct myrb_hba *cb, void __iomem *base)
2917 {
2918 	int timeout = 0;
2919 	unsigned char error, parm0, parm1;
2920 
2921 	DAC960_PG_disable_intr(base);
2922 	DAC960_PG_ack_hw_mbox_status(base);
2923 	udelay(1000);
2924 	while (DAC960_PG_init_in_progress(base) &&
2925 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2926 		if (DAC960_PG_read_error_status(base, &error,
2927 						&parm0, &parm1) &&
2928 		    myrb_err_status(cb, error, parm0, parm1))
2929 			return -EIO;
2930 		udelay(10);
2931 		timeout++;
2932 	}
2933 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2934 		dev_err(&pdev->dev,
2935 			"Timeout waiting for Controller Initialisation\n");
2936 		return -ETIMEDOUT;
2937 	}
2938 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2939 		dev_err(&pdev->dev,
2940 			"Unable to Enable Memory Mailbox Interface\n");
2941 		DAC960_PG_reset_ctrl(base);
2942 		return -ENODEV;
2943 	}
2944 	DAC960_PG_enable_intr(base);
2945 	cb->qcmd = myrb_qcmd;
2946 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2947 	if (cb->dual_mode_interface)
2948 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2949 	else
2950 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2951 	cb->disable_intr = DAC960_PG_disable_intr;
2952 	cb->reset = DAC960_PG_reset_ctrl;
2953 
2954 	return 0;
2955 }
2956 
2957 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2958 {
2959 	struct myrb_hba *cb = arg;
2960 	void __iomem *base = cb->io_base;
2961 	struct myrb_stat_mbox *next_stat_mbox;
2962 	unsigned long flags;
2963 
2964 	spin_lock_irqsave(&cb->queue_lock, flags);
2965 	DAC960_PG_ack_intr(base);
2966 	next_stat_mbox = cb->next_stat_mbox;
2967 	while (next_stat_mbox->valid) {
2968 		unsigned char id = next_stat_mbox->id;
2969 		struct scsi_cmnd *scmd = NULL;
2970 		struct myrb_cmdblk *cmd_blk = NULL;
2971 
2972 		if (id == MYRB_DCMD_TAG)
2973 			cmd_blk = &cb->dcmd_blk;
2974 		else if (id == MYRB_MCMD_TAG)
2975 			cmd_blk = &cb->mcmd_blk;
2976 		else {
2977 			scmd = scsi_host_find_tag(cb->host, id - 3);
2978 			if (scmd)
2979 				cmd_blk = scsi_cmd_priv(scmd);
2980 		}
2981 		if (cmd_blk)
2982 			cmd_blk->status = next_stat_mbox->status;
2983 		else
2984 			dev_err(&cb->pdev->dev,
2985 				"Unhandled command completion %d\n", id);
2986 
2987 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2988 		if (++next_stat_mbox > cb->last_stat_mbox)
2989 			next_stat_mbox = cb->first_stat_mbox;
2990 
2991 		if (id < 3)
2992 			myrb_handle_cmdblk(cb, cmd_blk);
2993 		else
2994 			myrb_handle_scsi(cb, cmd_blk, scmd);
2995 	}
2996 	cb->next_stat_mbox = next_stat_mbox;
2997 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2998 	return IRQ_HANDLED;
2999 }
3000 
3001 static struct myrb_privdata DAC960_PG_privdata = {
3002 	.hw_init =	DAC960_PG_hw_init,
3003 	.irq_handler =	DAC960_PG_intr_handler,
3004 	.mmio_size =	DAC960_PG_mmio_size,
3005 };
3006 
3007 
3008 /*
3009  * DAC960 PD Series Controllers
3010  */
3011 
3012 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3013 {
3014 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3015 }
3016 
3017 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3018 {
3019 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3020 }
3021 
3022 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3023 {
3024 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3025 }
3026 
3027 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3028 {
3029 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3030 
3031 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3032 }
3033 
3034 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3035 {
3036 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3037 
3038 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3039 }
3040 
3041 static inline void DAC960_PD_ack_intr(void __iomem *base)
3042 {
3043 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3044 }
3045 
3046 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3047 {
3048 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3049 
3050 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3051 }
3052 
3053 static inline void DAC960_PD_enable_intr(void __iomem *base)
3054 {
3055 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3056 }
3057 
3058 static inline void DAC960_PD_disable_intr(void __iomem *base)
3059 {
3060 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3061 }
3062 
3063 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3064 		union myrb_cmd_mbox *mbox)
3065 {
3066 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3067 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3068 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3069 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3070 }
3071 
3072 static inline unsigned char
3073 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3074 {
3075 	return readb(base + DAC960_PD_STSID_OFFSET);
3076 }
3077 
3078 static inline unsigned short
3079 DAC960_PD_read_status(void __iomem *base)
3080 {
3081 	return readw(base + DAC960_PD_STS_OFFSET);
3082 }
3083 
3084 static inline bool
3085 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3086 		unsigned char *param0, unsigned char *param1)
3087 {
3088 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3089 
3090 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3091 		return false;
3092 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3093 	*error = errsts;
3094 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3095 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3096 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3097 	return true;
3098 }
3099 
3100 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3101 {
3102 	void __iomem *base = cb->io_base;
3103 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3104 
3105 	while (DAC960_PD_hw_mbox_is_full(base))
3106 		udelay(1);
3107 	DAC960_PD_write_cmd_mbox(base, mbox);
3108 	DAC960_PD_hw_mbox_new_cmd(base);
3109 }
3110 
3111 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3112 		struct myrb_hba *cb, void __iomem *base)
3113 {
3114 	int timeout = 0;
3115 	unsigned char error, parm0, parm1;
3116 
3117 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3118 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3119 			(unsigned long)cb->io_addr);
3120 		return -EBUSY;
3121 	}
3122 	DAC960_PD_disable_intr(base);
3123 	DAC960_PD_ack_hw_mbox_status(base);
3124 	udelay(1000);
3125 	while (DAC960_PD_init_in_progress(base) &&
3126 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3127 		if (DAC960_PD_read_error_status(base, &error,
3128 					      &parm0, &parm1) &&
3129 		    myrb_err_status(cb, error, parm0, parm1))
3130 			return -EIO;
3131 		udelay(10);
3132 		timeout++;
3133 	}
3134 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3135 		dev_err(&pdev->dev,
3136 			"Timeout waiting for Controller Initialisation\n");
3137 		return -ETIMEDOUT;
3138 	}
3139 	if (!myrb_enable_mmio(cb, NULL)) {
3140 		dev_err(&pdev->dev,
3141 			"Unable to Enable Memory Mailbox Interface\n");
3142 		DAC960_PD_reset_ctrl(base);
3143 		return -ENODEV;
3144 	}
3145 	DAC960_PD_enable_intr(base);
3146 	cb->qcmd = DAC960_PD_qcmd;
3147 	cb->disable_intr = DAC960_PD_disable_intr;
3148 	cb->reset = DAC960_PD_reset_ctrl;
3149 
3150 	return 0;
3151 }
3152 
3153 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3154 {
3155 	struct myrb_hba *cb = arg;
3156 	void __iomem *base = cb->io_base;
3157 	unsigned long flags;
3158 
3159 	spin_lock_irqsave(&cb->queue_lock, flags);
3160 	while (DAC960_PD_hw_mbox_status_available(base)) {
3161 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3162 		struct scsi_cmnd *scmd = NULL;
3163 		struct myrb_cmdblk *cmd_blk = NULL;
3164 
3165 		if (id == MYRB_DCMD_TAG)
3166 			cmd_blk = &cb->dcmd_blk;
3167 		else if (id == MYRB_MCMD_TAG)
3168 			cmd_blk = &cb->mcmd_blk;
3169 		else {
3170 			scmd = scsi_host_find_tag(cb->host, id - 3);
3171 			if (scmd)
3172 				cmd_blk = scsi_cmd_priv(scmd);
3173 		}
3174 		if (cmd_blk)
3175 			cmd_blk->status = DAC960_PD_read_status(base);
3176 		else
3177 			dev_err(&cb->pdev->dev,
3178 				"Unhandled command completion %d\n", id);
3179 
3180 		DAC960_PD_ack_intr(base);
3181 		DAC960_PD_ack_hw_mbox_status(base);
3182 
3183 		if (id < 3)
3184 			myrb_handle_cmdblk(cb, cmd_blk);
3185 		else
3186 			myrb_handle_scsi(cb, cmd_blk, scmd);
3187 	}
3188 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3189 	return IRQ_HANDLED;
3190 }
3191 
3192 static struct myrb_privdata DAC960_PD_privdata = {
3193 	.hw_init =	DAC960_PD_hw_init,
3194 	.irq_handler =	DAC960_PD_intr_handler,
3195 	.mmio_size =	DAC960_PD_mmio_size,
3196 };
3197 
3198 
3199 /*
3200  * DAC960 P Series Controllers
3201  *
3202  * Similar to the DAC960 PD Series Controllers, but some commands have
3203  * to be translated.
3204  */
3205 
3206 static inline void myrb_translate_enquiry(void *enq)
3207 {
3208 	memcpy(enq + 132, enq + 36, 64);
3209 	memset(enq + 36, 0, 96);
3210 }
3211 
3212 static inline void myrb_translate_devstate(void *state)
3213 {
3214 	memcpy(state + 2, state + 3, 1);
3215 	memmove(state + 4, state + 5, 2);
3216 	memmove(state + 6, state + 8, 4);
3217 }
3218 
3219 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3220 {
3221 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3222 	int ldev_num = mbox->type5.ld.ldev_num;
3223 
3224 	mbox->bytes[3] &= 0x7;
3225 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3226 	mbox->bytes[7] = ldev_num;
3227 }
3228 
3229 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3230 {
3231 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3232 	int ldev_num = mbox->bytes[7];
3233 
3234 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3235 	mbox->bytes[3] &= 0x7;
3236 	mbox->bytes[3] |= ldev_num << 3;
3237 }
3238 
3239 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3240 {
3241 	void __iomem *base = cb->io_base;
3242 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3243 
3244 	switch (mbox->common.opcode) {
3245 	case MYRB_CMD_ENQUIRY:
3246 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3247 		break;
3248 	case MYRB_CMD_GET_DEVICE_STATE:
3249 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3250 		break;
3251 	case MYRB_CMD_READ:
3252 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3253 		myrb_translate_to_rw_command(cmd_blk);
3254 		break;
3255 	case MYRB_CMD_WRITE:
3256 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3257 		myrb_translate_to_rw_command(cmd_blk);
3258 		break;
3259 	case MYRB_CMD_READ_SG:
3260 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3261 		myrb_translate_to_rw_command(cmd_blk);
3262 		break;
3263 	case MYRB_CMD_WRITE_SG:
3264 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3265 		myrb_translate_to_rw_command(cmd_blk);
3266 		break;
3267 	default:
3268 		break;
3269 	}
3270 	while (DAC960_PD_hw_mbox_is_full(base))
3271 		udelay(1);
3272 	DAC960_PD_write_cmd_mbox(base, mbox);
3273 	DAC960_PD_hw_mbox_new_cmd(base);
3274 }
3275 
3276 
3277 static int DAC960_P_hw_init(struct pci_dev *pdev,
3278 		struct myrb_hba *cb, void __iomem *base)
3279 {
3280 	int timeout = 0;
3281 	unsigned char error, parm0, parm1;
3282 
3283 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3284 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3285 			(unsigned long)cb->io_addr);
3286 		return -EBUSY;
3287 	}
3288 	DAC960_PD_disable_intr(base);
3289 	DAC960_PD_ack_hw_mbox_status(base);
3290 	udelay(1000);
3291 	while (DAC960_PD_init_in_progress(base) &&
3292 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3293 		if (DAC960_PD_read_error_status(base, &error,
3294 						&parm0, &parm1) &&
3295 		    myrb_err_status(cb, error, parm0, parm1))
3296 			return -EAGAIN;
3297 		udelay(10);
3298 		timeout++;
3299 	}
3300 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3301 		dev_err(&pdev->dev,
3302 			"Timeout waiting for Controller Initialisation\n");
3303 		return -ETIMEDOUT;
3304 	}
3305 	if (!myrb_enable_mmio(cb, NULL)) {
3306 		dev_err(&pdev->dev,
3307 			"Unable to allocate DMA mapped memory\n");
3308 		DAC960_PD_reset_ctrl(base);
3309 		return -ETIMEDOUT;
3310 	}
3311 	DAC960_PD_enable_intr(base);
3312 	cb->qcmd = DAC960_P_qcmd;
3313 	cb->disable_intr = DAC960_PD_disable_intr;
3314 	cb->reset = DAC960_PD_reset_ctrl;
3315 
3316 	return 0;
3317 }
3318 
3319 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3320 {
3321 	struct myrb_hba *cb = arg;
3322 	void __iomem *base = cb->io_base;
3323 	unsigned long flags;
3324 
3325 	spin_lock_irqsave(&cb->queue_lock, flags);
3326 	while (DAC960_PD_hw_mbox_status_available(base)) {
3327 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3328 		struct scsi_cmnd *scmd = NULL;
3329 		struct myrb_cmdblk *cmd_blk = NULL;
3330 		union myrb_cmd_mbox *mbox;
3331 		enum myrb_cmd_opcode op;
3332 
3333 
3334 		if (id == MYRB_DCMD_TAG)
3335 			cmd_blk = &cb->dcmd_blk;
3336 		else if (id == MYRB_MCMD_TAG)
3337 			cmd_blk = &cb->mcmd_blk;
3338 		else {
3339 			scmd = scsi_host_find_tag(cb->host, id - 3);
3340 			if (scmd)
3341 				cmd_blk = scsi_cmd_priv(scmd);
3342 		}
3343 		if (cmd_blk)
3344 			cmd_blk->status = DAC960_PD_read_status(base);
3345 		else
3346 			dev_err(&cb->pdev->dev,
3347 				"Unhandled command completion %d\n", id);
3348 
3349 		DAC960_PD_ack_intr(base);
3350 		DAC960_PD_ack_hw_mbox_status(base);
3351 
3352 		if (!cmd_blk)
3353 			continue;
3354 
3355 		mbox = &cmd_blk->mbox;
3356 		op = mbox->common.opcode;
3357 		switch (op) {
3358 		case MYRB_CMD_ENQUIRY_OLD:
3359 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3360 			myrb_translate_enquiry(cb->enquiry);
3361 			break;
3362 		case MYRB_CMD_READ_OLD:
3363 			mbox->common.opcode = MYRB_CMD_READ;
3364 			myrb_translate_from_rw_command(cmd_blk);
3365 			break;
3366 		case MYRB_CMD_WRITE_OLD:
3367 			mbox->common.opcode = MYRB_CMD_WRITE;
3368 			myrb_translate_from_rw_command(cmd_blk);
3369 			break;
3370 		case MYRB_CMD_READ_SG_OLD:
3371 			mbox->common.opcode = MYRB_CMD_READ_SG;
3372 			myrb_translate_from_rw_command(cmd_blk);
3373 			break;
3374 		case MYRB_CMD_WRITE_SG_OLD:
3375 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3376 			myrb_translate_from_rw_command(cmd_blk);
3377 			break;
3378 		default:
3379 			break;
3380 		}
3381 		if (id < 3)
3382 			myrb_handle_cmdblk(cb, cmd_blk);
3383 		else
3384 			myrb_handle_scsi(cb, cmd_blk, scmd);
3385 	}
3386 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3387 	return IRQ_HANDLED;
3388 }
3389 
3390 static struct myrb_privdata DAC960_P_privdata = {
3391 	.hw_init =	DAC960_P_hw_init,
3392 	.irq_handler =	DAC960_P_intr_handler,
3393 	.mmio_size =	DAC960_PD_mmio_size,
3394 };
3395 
3396 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3397 		const struct pci_device_id *entry)
3398 {
3399 	struct myrb_privdata *privdata =
3400 		(struct myrb_privdata *)entry->driver_data;
3401 	irq_handler_t irq_handler = privdata->irq_handler;
3402 	unsigned int mmio_size = privdata->mmio_size;
3403 	struct Scsi_Host *shost;
3404 	struct myrb_hba *cb = NULL;
3405 
3406 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3407 	if (!shost) {
3408 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3409 		return NULL;
3410 	}
3411 	shost->max_cmd_len = 12;
3412 	shost->max_lun = 256;
3413 	cb = shost_priv(shost);
3414 	mutex_init(&cb->dcmd_mutex);
3415 	mutex_init(&cb->dma_mutex);
3416 	cb->pdev = pdev;
3417 	cb->host = shost;
3418 
3419 	if (pci_enable_device(pdev)) {
3420 		dev_err(&pdev->dev, "Failed to enable PCI device\n");
3421 		scsi_host_put(shost);
3422 		return NULL;
3423 	}
3424 
3425 	if (privdata->hw_init == DAC960_PD_hw_init ||
3426 	    privdata->hw_init == DAC960_P_hw_init) {
3427 		cb->io_addr = pci_resource_start(pdev, 0);
3428 		cb->pci_addr = pci_resource_start(pdev, 1);
3429 	} else
3430 		cb->pci_addr = pci_resource_start(pdev, 0);
3431 
3432 	pci_set_drvdata(pdev, cb);
3433 	spin_lock_init(&cb->queue_lock);
3434 	if (mmio_size < PAGE_SIZE)
3435 		mmio_size = PAGE_SIZE;
3436 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3437 	if (cb->mmio_base == NULL) {
3438 		dev_err(&pdev->dev,
3439 			"Unable to map Controller Register Window\n");
3440 		goto failure;
3441 	}
3442 
3443 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3444 	if (privdata->hw_init(pdev, cb, cb->io_base))
3445 		goto failure;
3446 
3447 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3448 		dev_err(&pdev->dev,
3449 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3450 		goto failure;
3451 	}
3452 	cb->irq = pdev->irq;
3453 	return cb;
3454 
3455 failure:
3456 	dev_err(&pdev->dev,
3457 		"Failed to initialize Controller\n");
3458 	myrb_cleanup(cb);
3459 	return NULL;
3460 }
3461 
3462 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3463 {
3464 	struct myrb_hba *cb;
3465 	int ret;
3466 
3467 	cb = myrb_detect(dev, entry);
3468 	if (!cb)
3469 		return -ENODEV;
3470 
3471 	ret = myrb_get_hba_config(cb);
3472 	if (ret < 0) {
3473 		myrb_cleanup(cb);
3474 		return ret;
3475 	}
3476 
3477 	if (!myrb_create_mempools(dev, cb)) {
3478 		ret = -ENOMEM;
3479 		goto failed;
3480 	}
3481 
3482 	ret = scsi_add_host(cb->host, &dev->dev);
3483 	if (ret) {
3484 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3485 		myrb_destroy_mempools(cb);
3486 		goto failed;
3487 	}
3488 	scsi_scan_host(cb->host);
3489 	return 0;
3490 failed:
3491 	myrb_cleanup(cb);
3492 	return ret;
3493 }
3494 
3495 
3496 static void myrb_remove(struct pci_dev *pdev)
3497 {
3498 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3499 
3500 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3501 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3502 	myrb_cleanup(cb);
3503 	myrb_destroy_mempools(cb);
3504 }
3505 
3506 
3507 static const struct pci_device_id myrb_id_table[] = {
3508 	{
3509 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3510 			       PCI_DEVICE_ID_DEC_21285,
3511 			       PCI_VENDOR_ID_MYLEX,
3512 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3513 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3514 	},
3515 	{
3516 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3517 	},
3518 	{
3519 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3520 	},
3521 	{
3522 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3523 	},
3524 	{0, },
3525 };
3526 
3527 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3528 
3529 static struct pci_driver myrb_pci_driver = {
3530 	.name		= "myrb",
3531 	.id_table	= myrb_id_table,
3532 	.probe		= myrb_probe,
3533 	.remove		= myrb_remove,
3534 };
3535 
3536 static int __init myrb_init_module(void)
3537 {
3538 	int ret;
3539 
3540 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3541 	if (!myrb_raid_template)
3542 		return -ENODEV;
3543 
3544 	ret = pci_register_driver(&myrb_pci_driver);
3545 	if (ret)
3546 		raid_class_release(myrb_raid_template);
3547 
3548 	return ret;
3549 }
3550 
3551 static void __exit myrb_cleanup_module(void)
3552 {
3553 	pci_unregister_driver(&myrb_pci_driver);
3554 	raid_class_release(myrb_raid_template);
3555 }
3556 
3557 module_init(myrb_init_module);
3558 module_exit(myrb_cleanup_module);
3559 
3560 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3561 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3562 MODULE_LICENSE("GPL");
3563