xref: /openbmc/linux/drivers/scsi/myrb.c (revision 6a143a7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /**
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /**
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /**
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /**
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /**
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	wait_for_completion(&cmpl);
198 	return cmd_blk->status;
199 }
200 
201 /**
202  * myrb_exec_type3 - executes a type 3 command and waits for completion.
203  *
204  * Return: command status
205  */
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 		enum myrb_cmd_opcode op, dma_addr_t addr)
208 {
209 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 	unsigned short status;
212 
213 	mutex_lock(&cb->dcmd_mutex);
214 	myrb_reset_cmd(cmd_blk);
215 	mbox->type3.id = MYRB_DCMD_TAG;
216 	mbox->type3.opcode = op;
217 	mbox->type3.addr = addr;
218 	status = myrb_exec_cmd(cb, cmd_blk);
219 	mutex_unlock(&cb->dcmd_mutex);
220 	return status;
221 }
222 
223 /**
224  * myrb_exec_type3D - executes a type 3D command and waits for completion.
225  *
226  * Return: command status
227  */
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 		struct myrb_pdev_state *pdev_info)
231 {
232 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 	unsigned short status;
235 	dma_addr_t pdev_info_addr;
236 
237 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 					sizeof(struct myrb_pdev_state),
239 					DMA_FROM_DEVICE);
240 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 		return MYRB_STATUS_SUBSYS_FAILED;
242 
243 	mutex_lock(&cb->dcmd_mutex);
244 	myrb_reset_cmd(cmd_blk);
245 	mbox->type3D.id = MYRB_DCMD_TAG;
246 	mbox->type3D.opcode = op;
247 	mbox->type3D.channel = sdev->channel;
248 	mbox->type3D.target = sdev->id;
249 	mbox->type3D.addr = pdev_info_addr;
250 	status = myrb_exec_cmd(cb, cmd_blk);
251 	mutex_unlock(&cb->dcmd_mutex);
252 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 	if (status == MYRB_STATUS_SUCCESS &&
255 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 		myrb_translate_devstate(pdev_info);
257 
258 	return status;
259 }
260 
261 static char *myrb_event_msg[] = {
262 	"killed because write recovery failed",
263 	"killed because of SCSI bus reset failure",
264 	"killed because of double check condition",
265 	"killed because it was removed",
266 	"killed because of gross error on SCSI chip",
267 	"killed because of bad tag returned from drive",
268 	"killed because of timeout on SCSI command",
269 	"killed because of reset SCSI command issued from system",
270 	"killed because busy or parity error count exceeded limit",
271 	"killed because of 'kill drive' command from system",
272 	"killed because of selection timeout",
273 	"killed due to SCSI phase sequence error",
274 	"killed due to unknown status",
275 };
276 
277 /**
278  * myrb_get_event - get event log from HBA
279  * @cb: pointer to the hba structure
280  * @event: number of the event
281  *
282  * Execute a type 3E command and logs the event message
283  */
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
285 {
286 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 	struct myrb_log_entry *ev_buf;
289 	dma_addr_t ev_addr;
290 	unsigned short status;
291 
292 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 				    sizeof(struct myrb_log_entry),
294 				    &ev_addr, GFP_KERNEL);
295 	if (!ev_buf)
296 		return;
297 
298 	myrb_reset_cmd(cmd_blk);
299 	mbox->type3E.id = MYRB_MCMD_TAG;
300 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 	mbox->type3E.opqual = 1;
303 	mbox->type3E.ev_seq = event;
304 	mbox->type3E.addr = ev_addr;
305 	status = myrb_exec_cmd(cb, cmd_blk);
306 	if (status != MYRB_STATUS_SUCCESS)
307 		shost_printk(KERN_INFO, cb->host,
308 			     "Failed to get event log %d, status %04x\n",
309 			     event, status);
310 
311 	else if (ev_buf->seq_num == event) {
312 		struct scsi_sense_hdr sshdr;
313 
314 		memset(&sshdr, 0, sizeof(sshdr));
315 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
316 
317 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
318 		    sshdr.asc == 0x80 &&
319 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 			shost_printk(KERN_CRIT, cb->host,
321 				     "Physical drive %d:%d: %s\n",
322 				     ev_buf->channel, ev_buf->target,
323 				     myrb_event_msg[sshdr.ascq]);
324 		else
325 			shost_printk(KERN_CRIT, cb->host,
326 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 				     ev_buf->channel, ev_buf->target,
328 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
329 	}
330 
331 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
332 			  ev_buf, ev_addr);
333 }
334 
335 /**
336  * myrb_get_errtable - retrieves the error table from the controller
337  *
338  * Executes a type 3 command and logs the error table from the controller.
339  */
340 static void myrb_get_errtable(struct myrb_hba *cb)
341 {
342 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 	unsigned short status;
345 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
346 
347 	memcpy(&old_table, cb->err_table, sizeof(old_table));
348 
349 	myrb_reset_cmd(cmd_blk);
350 	mbox->type3.id = MYRB_MCMD_TAG;
351 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 	mbox->type3.addr = cb->err_table_addr;
353 	status = myrb_exec_cmd(cb, cmd_blk);
354 	if (status == MYRB_STATUS_SUCCESS) {
355 		struct myrb_error_entry *table = cb->err_table;
356 		struct myrb_error_entry *new, *old;
357 		size_t err_table_offset;
358 		struct scsi_device *sdev;
359 
360 		shost_for_each_device(sdev, cb->host) {
361 			if (sdev->channel >= myrb_logical_channel(cb->host))
362 				continue;
363 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
364 				+ sdev->id;
365 			new = table + err_table_offset;
366 			old = &old_table[err_table_offset];
367 			if (new->parity_err == old->parity_err &&
368 			    new->soft_err == old->soft_err &&
369 			    new->hard_err == old->hard_err &&
370 			    new->misc_err == old->misc_err)
371 				continue;
372 			sdev_printk(KERN_CRIT, sdev,
373 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 				    new->parity_err, new->soft_err,
375 				    new->hard_err, new->misc_err);
376 		}
377 	}
378 }
379 
380 /**
381  * myrb_get_ldev_info - retrieves the logical device table from the controller
382  *
383  * Executes a type 3 command and updates the logical device table.
384  *
385  * Return: command status
386  */
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
388 {
389 	unsigned short status;
390 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 	struct Scsi_Host *shost = cb->host;
392 
393 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394 				 cb->ldev_info_addr);
395 	if (status != MYRB_STATUS_SUCCESS)
396 		return status;
397 
398 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 		struct myrb_ldev_info *old = NULL;
400 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 		struct scsi_device *sdev;
402 
403 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
404 					  ldev_num, 0);
405 		if (!sdev) {
406 			if (new->state == MYRB_DEVICE_OFFLINE)
407 				continue;
408 			shost_printk(KERN_INFO, shost,
409 				     "Adding Logical Drive %d in state %s\n",
410 				     ldev_num, myrb_devstate_name(new->state));
411 			scsi_add_device(shost, myrb_logical_channel(shost),
412 					ldev_num, 0);
413 			continue;
414 		}
415 		old = sdev->hostdata;
416 		if (new->state != old->state)
417 			shost_printk(KERN_INFO, shost,
418 				     "Logical Drive %d is now %s\n",
419 				     ldev_num, myrb_devstate_name(new->state));
420 		if (new->wb_enabled != old->wb_enabled)
421 			sdev_printk(KERN_INFO, sdev,
422 				    "Logical Drive is now WRITE %s\n",
423 				    (new->wb_enabled ? "BACK" : "THRU"));
424 		memcpy(old, new, sizeof(*new));
425 		scsi_device_put(sdev);
426 	}
427 	return status;
428 }
429 
430 /**
431  * myrb_get_rbld_progress - get rebuild progress information
432  *
433  * Executes a type 3 command and returns the rebuild progress
434  * information.
435  *
436  * Return: command status
437  */
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 		struct myrb_rbld_progress *rbld)
440 {
441 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 	struct myrb_rbld_progress *rbld_buf;
444 	dma_addr_t rbld_addr;
445 	unsigned short status;
446 
447 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 				      sizeof(struct myrb_rbld_progress),
449 				      &rbld_addr, GFP_KERNEL);
450 	if (!rbld_buf)
451 		return MYRB_STATUS_RBLD_NOT_CHECKED;
452 
453 	myrb_reset_cmd(cmd_blk);
454 	mbox->type3.id = MYRB_MCMD_TAG;
455 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 	mbox->type3.addr = rbld_addr;
457 	status = myrb_exec_cmd(cb, cmd_blk);
458 	if (rbld)
459 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 			  rbld_buf, rbld_addr);
462 	return status;
463 }
464 
465 /**
466  * myrb_update_rbld_progress - updates the rebuild status
467  *
468  * Updates the rebuild status for the attached logical devices.
469  *
470  */
471 static void myrb_update_rbld_progress(struct myrb_hba *cb)
472 {
473 	struct myrb_rbld_progress rbld_buf;
474 	unsigned short status;
475 
476 	status = myrb_get_rbld_progress(cb, &rbld_buf);
477 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
478 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
479 		status = MYRB_STATUS_RBLD_SUCCESS;
480 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
481 		unsigned int blocks_done =
482 			rbld_buf.ldev_size - rbld_buf.blocks_left;
483 		struct scsi_device *sdev;
484 
485 		sdev = scsi_device_lookup(cb->host,
486 					  myrb_logical_channel(cb->host),
487 					  rbld_buf.ldev_num, 0);
488 		if (!sdev)
489 			return;
490 
491 		switch (status) {
492 		case MYRB_STATUS_SUCCESS:
493 			sdev_printk(KERN_INFO, sdev,
494 				    "Rebuild in Progress, %d%% completed\n",
495 				    (100 * (blocks_done >> 7))
496 				    / (rbld_buf.ldev_size >> 7));
497 			break;
498 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
499 			sdev_printk(KERN_INFO, sdev,
500 				    "Rebuild Failed due to Logical Drive Failure\n");
501 			break;
502 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
503 			sdev_printk(KERN_INFO, sdev,
504 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
505 			break;
506 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
507 			sdev_printk(KERN_INFO, sdev,
508 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
509 			break;
510 		case MYRB_STATUS_RBLD_SUCCESS:
511 			sdev_printk(KERN_INFO, sdev,
512 				    "Rebuild Completed Successfully\n");
513 			break;
514 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
515 			sdev_printk(KERN_INFO, sdev,
516 				     "Rebuild Successfully Terminated\n");
517 			break;
518 		default:
519 			break;
520 		}
521 		scsi_device_put(sdev);
522 	}
523 	cb->last_rbld_status = status;
524 }
525 
526 /**
527  * myrb_get_cc_progress - retrieve the rebuild status
528  *
529  * Execute a type 3 Command and fetch the rebuild / consistency check
530  * status.
531  */
532 static void myrb_get_cc_progress(struct myrb_hba *cb)
533 {
534 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
535 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
536 	struct myrb_rbld_progress *rbld_buf;
537 	dma_addr_t rbld_addr;
538 	unsigned short status;
539 
540 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
541 				      sizeof(struct myrb_rbld_progress),
542 				      &rbld_addr, GFP_KERNEL);
543 	if (!rbld_buf) {
544 		cb->need_cc_status = true;
545 		return;
546 	}
547 	myrb_reset_cmd(cmd_blk);
548 	mbox->type3.id = MYRB_MCMD_TAG;
549 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
550 	mbox->type3.addr = rbld_addr;
551 	status = myrb_exec_cmd(cb, cmd_blk);
552 	if (status == MYRB_STATUS_SUCCESS) {
553 		unsigned int ldev_num = rbld_buf->ldev_num;
554 		unsigned int ldev_size = rbld_buf->ldev_size;
555 		unsigned int blocks_done =
556 			ldev_size - rbld_buf->blocks_left;
557 		struct scsi_device *sdev;
558 
559 		sdev = scsi_device_lookup(cb->host,
560 					  myrb_logical_channel(cb->host),
561 					  ldev_num, 0);
562 		if (sdev) {
563 			sdev_printk(KERN_INFO, sdev,
564 				    "Consistency Check in Progress: %d%% completed\n",
565 				    (100 * (blocks_done >> 7))
566 				    / (ldev_size >> 7));
567 			scsi_device_put(sdev);
568 		}
569 	}
570 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
571 			  rbld_buf, rbld_addr);
572 }
573 
574 /**
575  * myrb_bgi_control - updates background initialisation status
576  *
577  * Executes a type 3B command and updates the background initialisation status
578  */
579 static void myrb_bgi_control(struct myrb_hba *cb)
580 {
581 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
582 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
583 	struct myrb_bgi_status *bgi, *last_bgi;
584 	dma_addr_t bgi_addr;
585 	struct scsi_device *sdev = NULL;
586 	unsigned short status;
587 
588 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
589 				 &bgi_addr, GFP_KERNEL);
590 	if (!bgi) {
591 		shost_printk(KERN_ERR, cb->host,
592 			     "Failed to allocate bgi memory\n");
593 		return;
594 	}
595 	myrb_reset_cmd(cmd_blk);
596 	mbox->type3B.id = MYRB_DCMD_TAG;
597 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
598 	mbox->type3B.optype = 0x20;
599 	mbox->type3B.addr = bgi_addr;
600 	status = myrb_exec_cmd(cb, cmd_blk);
601 	last_bgi = &cb->bgi_status;
602 	sdev = scsi_device_lookup(cb->host,
603 				  myrb_logical_channel(cb->host),
604 				  bgi->ldev_num, 0);
605 	switch (status) {
606 	case MYRB_STATUS_SUCCESS:
607 		switch (bgi->status) {
608 		case MYRB_BGI_INVALID:
609 			break;
610 		case MYRB_BGI_STARTED:
611 			if (!sdev)
612 				break;
613 			sdev_printk(KERN_INFO, sdev,
614 				    "Background Initialization Started\n");
615 			break;
616 		case MYRB_BGI_INPROGRESS:
617 			if (!sdev)
618 				break;
619 			if (bgi->blocks_done == last_bgi->blocks_done &&
620 			    bgi->ldev_num == last_bgi->ldev_num)
621 				break;
622 			sdev_printk(KERN_INFO, sdev,
623 				 "Background Initialization in Progress: %d%% completed\n",
624 				 (100 * (bgi->blocks_done >> 7))
625 				 / (bgi->ldev_size >> 7));
626 			break;
627 		case MYRB_BGI_SUSPENDED:
628 			if (!sdev)
629 				break;
630 			sdev_printk(KERN_INFO, sdev,
631 				    "Background Initialization Suspended\n");
632 			break;
633 		case MYRB_BGI_CANCELLED:
634 			if (!sdev)
635 				break;
636 			sdev_printk(KERN_INFO, sdev,
637 				    "Background Initialization Cancelled\n");
638 			break;
639 		}
640 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
641 		break;
642 	case MYRB_STATUS_BGI_SUCCESS:
643 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
644 			sdev_printk(KERN_INFO, sdev,
645 				    "Background Initialization Completed Successfully\n");
646 		cb->bgi_status.status = MYRB_BGI_INVALID;
647 		break;
648 	case MYRB_STATUS_BGI_ABORTED:
649 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
650 			sdev_printk(KERN_INFO, sdev,
651 				    "Background Initialization Aborted\n");
652 		fallthrough;
653 	case MYRB_STATUS_NO_BGI_INPROGRESS:
654 		cb->bgi_status.status = MYRB_BGI_INVALID;
655 		break;
656 	}
657 	if (sdev)
658 		scsi_device_put(sdev);
659 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
660 			  bgi, bgi_addr);
661 }
662 
663 /**
664  * myrb_hba_enquiry - updates the controller status
665  *
666  * Executes a DAC_V1_Enquiry command and updates the controller status.
667  *
668  * Return: command status
669  */
670 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
671 {
672 	struct myrb_enquiry old, *new;
673 	unsigned short status;
674 
675 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
676 
677 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
678 	if (status != MYRB_STATUS_SUCCESS)
679 		return status;
680 
681 	new = cb->enquiry;
682 	if (new->ldev_count > old.ldev_count) {
683 		int ldev_num = old.ldev_count - 1;
684 
685 		while (++ldev_num < new->ldev_count)
686 			shost_printk(KERN_CRIT, cb->host,
687 				     "Logical Drive %d Now Exists\n",
688 				     ldev_num);
689 	}
690 	if (new->ldev_count < old.ldev_count) {
691 		int ldev_num = new->ldev_count - 1;
692 
693 		while (++ldev_num < old.ldev_count)
694 			shost_printk(KERN_CRIT, cb->host,
695 				     "Logical Drive %d No Longer Exists\n",
696 				     ldev_num);
697 	}
698 	if (new->status.deferred != old.status.deferred)
699 		shost_printk(KERN_CRIT, cb->host,
700 			     "Deferred Write Error Flag is now %s\n",
701 			     (new->status.deferred ? "TRUE" : "FALSE"));
702 	if (new->ev_seq != old.ev_seq) {
703 		cb->new_ev_seq = new->ev_seq;
704 		cb->need_err_info = true;
705 		shost_printk(KERN_INFO, cb->host,
706 			     "Event log %d/%d (%d/%d) available\n",
707 			     cb->old_ev_seq, cb->new_ev_seq,
708 			     old.ev_seq, new->ev_seq);
709 	}
710 	if ((new->ldev_critical > 0 &&
711 	     new->ldev_critical != old.ldev_critical) ||
712 	    (new->ldev_offline > 0 &&
713 	     new->ldev_offline != old.ldev_offline) ||
714 	    (new->ldev_count != old.ldev_count)) {
715 		shost_printk(KERN_INFO, cb->host,
716 			     "Logical drive count changed (%d/%d/%d)\n",
717 			     new->ldev_critical,
718 			     new->ldev_offline,
719 			     new->ldev_count);
720 		cb->need_ldev_info = true;
721 	}
722 	if (new->pdev_dead > 0 ||
723 	    new->pdev_dead != old.pdev_dead ||
724 	    time_after_eq(jiffies, cb->secondary_monitor_time
725 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
726 		cb->need_bgi_status = cb->bgi_status_supported;
727 		cb->secondary_monitor_time = jiffies;
728 	}
729 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
730 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
731 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
732 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
733 		cb->need_rbld = true;
734 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
735 	}
736 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
737 		switch (new->rbld) {
738 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
739 			shost_printk(KERN_INFO, cb->host,
740 				     "Consistency Check Completed Successfully\n");
741 			break;
742 		case MYRB_STDBY_RBLD_IN_PROGRESS:
743 		case MYRB_BG_RBLD_IN_PROGRESS:
744 			break;
745 		case MYRB_BG_CHECK_IN_PROGRESS:
746 			cb->need_cc_status = true;
747 			break;
748 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
749 			shost_printk(KERN_INFO, cb->host,
750 				     "Consistency Check Completed with Error\n");
751 			break;
752 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
753 			shost_printk(KERN_INFO, cb->host,
754 				     "Consistency Check Failed - Physical Device Failed\n");
755 			break;
756 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
757 			shost_printk(KERN_INFO, cb->host,
758 				     "Consistency Check Failed - Logical Drive Failed\n");
759 			break;
760 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
761 			shost_printk(KERN_INFO, cb->host,
762 				     "Consistency Check Failed - Other Causes\n");
763 			break;
764 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
765 			shost_printk(KERN_INFO, cb->host,
766 				     "Consistency Check Successfully Terminated\n");
767 			break;
768 		}
769 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
770 		cb->need_cc_status = true;
771 
772 	return MYRB_STATUS_SUCCESS;
773 }
774 
775 /**
776  * myrb_set_pdev_state - sets the device state for a physical device
777  *
778  * Return: command status
779  */
780 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
781 		struct scsi_device *sdev, enum myrb_devstate state)
782 {
783 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
784 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
785 	unsigned short status;
786 
787 	mutex_lock(&cb->dcmd_mutex);
788 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
789 	mbox->type3D.id = MYRB_DCMD_TAG;
790 	mbox->type3D.channel = sdev->channel;
791 	mbox->type3D.target = sdev->id;
792 	mbox->type3D.state = state & 0x1F;
793 	status = myrb_exec_cmd(cb, cmd_blk);
794 	mutex_unlock(&cb->dcmd_mutex);
795 
796 	return status;
797 }
798 
799 /**
800  * myrb_enable_mmio - enables the Memory Mailbox Interface
801  *
802  * PD and P controller types have no memory mailbox, but still need the
803  * other dma mapped memory.
804  *
805  * Return: true on success, false otherwise.
806  */
807 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
808 {
809 	void __iomem *base = cb->io_base;
810 	struct pci_dev *pdev = cb->pdev;
811 	size_t err_table_size;
812 	size_t ldev_info_size;
813 	union myrb_cmd_mbox *cmd_mbox_mem;
814 	struct myrb_stat_mbox *stat_mbox_mem;
815 	union myrb_cmd_mbox mbox;
816 	unsigned short status;
817 
818 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
819 
820 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
821 		dev_err(&pdev->dev, "DMA mask out of range\n");
822 		return false;
823 	}
824 
825 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
826 					 sizeof(struct myrb_enquiry),
827 					 &cb->enquiry_addr, GFP_KERNEL);
828 	if (!cb->enquiry)
829 		return false;
830 
831 	err_table_size = sizeof(struct myrb_error_entry) *
832 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
833 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
834 					   &cb->err_table_addr, GFP_KERNEL);
835 	if (!cb->err_table)
836 		return false;
837 
838 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
839 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
840 					       &cb->ldev_info_addr, GFP_KERNEL);
841 	if (!cb->ldev_info_buf)
842 		return false;
843 
844 	/*
845 	 * Skip mailbox initialisation for PD and P Controllers
846 	 */
847 	if (!mmio_init_fn)
848 		return true;
849 
850 	/* These are the base addresses for the command memory mailbox array */
851 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
852 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
853 						cb->cmd_mbox_size,
854 						&cb->cmd_mbox_addr,
855 						GFP_KERNEL);
856 	if (!cb->first_cmd_mbox)
857 		return false;
858 
859 	cmd_mbox_mem = cb->first_cmd_mbox;
860 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
861 	cb->last_cmd_mbox = cmd_mbox_mem;
862 	cb->next_cmd_mbox = cb->first_cmd_mbox;
863 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
864 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
865 
866 	/* These are the base addresses for the status memory mailbox array */
867 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
868 	    sizeof(struct myrb_stat_mbox);
869 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
870 						 cb->stat_mbox_size,
871 						 &cb->stat_mbox_addr,
872 						 GFP_KERNEL);
873 	if (!cb->first_stat_mbox)
874 		return false;
875 
876 	stat_mbox_mem = cb->first_stat_mbox;
877 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
878 	cb->last_stat_mbox = stat_mbox_mem;
879 	cb->next_stat_mbox = cb->first_stat_mbox;
880 
881 	/* Enable the Memory Mailbox Interface. */
882 	cb->dual_mode_interface = true;
883 	mbox.typeX.opcode = 0x2B;
884 	mbox.typeX.id = 0;
885 	mbox.typeX.opcode2 = 0x14;
886 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
887 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
888 
889 	status = mmio_init_fn(pdev, base, &mbox);
890 	if (status != MYRB_STATUS_SUCCESS) {
891 		cb->dual_mode_interface = false;
892 		mbox.typeX.opcode2 = 0x10;
893 		status = mmio_init_fn(pdev, base, &mbox);
894 		if (status != MYRB_STATUS_SUCCESS) {
895 			dev_err(&pdev->dev,
896 				"Failed to enable mailbox, statux %02X\n",
897 				status);
898 			return false;
899 		}
900 	}
901 	return true;
902 }
903 
904 /**
905  * myrb_get_hba_config - reads the configuration information
906  *
907  * Reads the configuration information from the controller and
908  * initializes the controller structure.
909  *
910  * Return: 0 on success, errno otherwise
911  */
912 static int myrb_get_hba_config(struct myrb_hba *cb)
913 {
914 	struct myrb_enquiry2 *enquiry2;
915 	dma_addr_t enquiry2_addr;
916 	struct myrb_config2 *config2;
917 	dma_addr_t config2_addr;
918 	struct Scsi_Host *shost = cb->host;
919 	struct pci_dev *pdev = cb->pdev;
920 	int pchan_max = 0, pchan_cur = 0;
921 	unsigned short status;
922 	int ret = -ENODEV, memsize = 0;
923 
924 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
925 				      &enquiry2_addr, GFP_KERNEL);
926 	if (!enquiry2) {
927 		shost_printk(KERN_ERR, cb->host,
928 			     "Failed to allocate V1 enquiry2 memory\n");
929 		return -ENOMEM;
930 	}
931 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
932 				     &config2_addr, GFP_KERNEL);
933 	if (!config2) {
934 		shost_printk(KERN_ERR, cb->host,
935 			     "Failed to allocate V1 config2 memory\n");
936 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
937 				  enquiry2, enquiry2_addr);
938 		return -ENOMEM;
939 	}
940 	mutex_lock(&cb->dma_mutex);
941 	status = myrb_hba_enquiry(cb);
942 	mutex_unlock(&cb->dma_mutex);
943 	if (status != MYRB_STATUS_SUCCESS) {
944 		shost_printk(KERN_WARNING, cb->host,
945 			     "Failed it issue V1 Enquiry\n");
946 		goto out_free;
947 	}
948 
949 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
950 	if (status != MYRB_STATUS_SUCCESS) {
951 		shost_printk(KERN_WARNING, cb->host,
952 			     "Failed to issue V1 Enquiry2\n");
953 		goto out_free;
954 	}
955 
956 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
957 	if (status != MYRB_STATUS_SUCCESS) {
958 		shost_printk(KERN_WARNING, cb->host,
959 			     "Failed to issue ReadConfig2\n");
960 		goto out_free;
961 	}
962 
963 	status = myrb_get_ldev_info(cb);
964 	if (status != MYRB_STATUS_SUCCESS) {
965 		shost_printk(KERN_WARNING, cb->host,
966 			     "Failed to get logical drive information\n");
967 		goto out_free;
968 	}
969 
970 	/*
971 	 * Initialize the Controller Model Name and Full Model Name fields.
972 	 */
973 	switch (enquiry2->hw.sub_model) {
974 	case DAC960_V1_P_PD_PU:
975 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
976 			strcpy(cb->model_name, "DAC960PU");
977 		else
978 			strcpy(cb->model_name, "DAC960PD");
979 		break;
980 	case DAC960_V1_PL:
981 		strcpy(cb->model_name, "DAC960PL");
982 		break;
983 	case DAC960_V1_PG:
984 		strcpy(cb->model_name, "DAC960PG");
985 		break;
986 	case DAC960_V1_PJ:
987 		strcpy(cb->model_name, "DAC960PJ");
988 		break;
989 	case DAC960_V1_PR:
990 		strcpy(cb->model_name, "DAC960PR");
991 		break;
992 	case DAC960_V1_PT:
993 		strcpy(cb->model_name, "DAC960PT");
994 		break;
995 	case DAC960_V1_PTL0:
996 		strcpy(cb->model_name, "DAC960PTL0");
997 		break;
998 	case DAC960_V1_PRL:
999 		strcpy(cb->model_name, "DAC960PRL");
1000 		break;
1001 	case DAC960_V1_PTL1:
1002 		strcpy(cb->model_name, "DAC960PTL1");
1003 		break;
1004 	case DAC960_V1_1164P:
1005 		strcpy(cb->model_name, "eXtremeRAID 1100");
1006 		break;
1007 	default:
1008 		shost_printk(KERN_WARNING, cb->host,
1009 			     "Unknown Model %X\n",
1010 			     enquiry2->hw.sub_model);
1011 		goto out;
1012 	}
1013 	/*
1014 	 * Initialize the Controller Firmware Version field and verify that it
1015 	 * is a supported firmware version.
1016 	 * The supported firmware versions are:
1017 	 *
1018 	 * DAC1164P		    5.06 and above
1019 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1020 	 * DAC960PU/PD/PL	    3.51 and above
1021 	 * DAC960PU/PD/PL/P	    2.73 and above
1022 	 */
1023 #if defined(CONFIG_ALPHA)
1024 	/*
1025 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1026 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1027 	 * the last custom FW revision to be released by DEC for these older
1028 	 * controllers, appears to work quite well with this driver.
1029 	 *
1030 	 * Cards tested successfully were several versions each of the PD and
1031 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1032 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1033 	 * back of the board, of:
1034 	 *
1035 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1036 	 *         or D040349 (3-channel)
1037 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1038 	 *         or D040397 (3-channel)
1039 	 */
1040 # define FIRMWARE_27X	"2.70"
1041 #else
1042 # define FIRMWARE_27X	"2.73"
1043 #endif
1044 
1045 	if (enquiry2->fw.major_version == 0) {
1046 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1047 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1048 		enquiry2->fw.firmware_type = '0';
1049 		enquiry2->fw.turn_id = 0;
1050 	}
1051 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1052 		"%u.%02u-%c-%02u",
1053 		enquiry2->fw.major_version,
1054 		enquiry2->fw.minor_version,
1055 		enquiry2->fw.firmware_type,
1056 		enquiry2->fw.turn_id);
1057 	if (!((enquiry2->fw.major_version == 5 &&
1058 	       enquiry2->fw.minor_version >= 6) ||
1059 	      (enquiry2->fw.major_version == 4 &&
1060 	       enquiry2->fw.minor_version >= 6) ||
1061 	      (enquiry2->fw.major_version == 3 &&
1062 	       enquiry2->fw.minor_version >= 51) ||
1063 	      (enquiry2->fw.major_version == 2 &&
1064 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1065 		shost_printk(KERN_WARNING, cb->host,
1066 			"Firmware Version '%s' unsupported\n",
1067 			cb->fw_version);
1068 		goto out;
1069 	}
1070 	/*
1071 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1072 	 * Enclosure Management Enabled fields.
1073 	 */
1074 	switch (enquiry2->hw.model) {
1075 	case MYRB_5_CHANNEL_BOARD:
1076 		pchan_max = 5;
1077 		break;
1078 	case MYRB_3_CHANNEL_BOARD:
1079 	case MYRB_3_CHANNEL_ASIC_DAC:
1080 		pchan_max = 3;
1081 		break;
1082 	case MYRB_2_CHANNEL_BOARD:
1083 		pchan_max = 2;
1084 		break;
1085 	default:
1086 		pchan_max = enquiry2->cfg_chan;
1087 		break;
1088 	}
1089 	pchan_cur = enquiry2->cur_chan;
1090 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1091 		cb->bus_width = 32;
1092 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1093 		cb->bus_width = 16;
1094 	else
1095 		cb->bus_width = 8;
1096 	cb->ldev_block_size = enquiry2->ldev_block_size;
1097 	shost->max_channel = pchan_cur;
1098 	shost->max_id = enquiry2->max_targets;
1099 	memsize = enquiry2->mem_size >> 20;
1100 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1101 	/*
1102 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1103 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1104 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1105 	 * The Driver Queue Depth must be at most one less than the
1106 	 * Controller Queue Depth to allow for an automatic drive
1107 	 * rebuild operation.
1108 	 */
1109 	shost->can_queue = cb->enquiry->max_tcq;
1110 	if (shost->can_queue < 3)
1111 		shost->can_queue = enquiry2->max_cmds;
1112 	if (shost->can_queue < 3)
1113 		/* Play safe and disable TCQ */
1114 		shost->can_queue = 1;
1115 
1116 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1117 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1118 	shost->max_sectors = enquiry2->max_sectors;
1119 	shost->sg_tablesize = enquiry2->max_sge;
1120 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1121 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1122 	/*
1123 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1124 	 */
1125 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1126 		>> (10 - MYRB_BLKSIZE_BITS);
1127 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1128 		>> (10 - MYRB_BLKSIZE_BITS);
1129 	/* Assume 255/63 translation */
1130 	cb->ldev_geom_heads = 255;
1131 	cb->ldev_geom_sectors = 63;
1132 	if (config2->drive_geometry) {
1133 		cb->ldev_geom_heads = 128;
1134 		cb->ldev_geom_sectors = 32;
1135 	}
1136 
1137 	/*
1138 	 * Initialize the Background Initialization Status.
1139 	 */
1140 	if ((cb->fw_version[0] == '4' &&
1141 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1142 	    (cb->fw_version[0] == '5' &&
1143 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1144 		cb->bgi_status_supported = true;
1145 		myrb_bgi_control(cb);
1146 	}
1147 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1148 	ret = 0;
1149 
1150 out:
1151 	shost_printk(KERN_INFO, cb->host,
1152 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1153 	shost_printk(KERN_INFO, cb->host,
1154 		"  Firmware Version: %s, Memory Size: %dMB\n",
1155 		cb->fw_version, memsize);
1156 	if (cb->io_addr == 0)
1157 		shost_printk(KERN_INFO, cb->host,
1158 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1159 			(unsigned long)cb->pci_addr, cb->irq);
1160 	else
1161 		shost_printk(KERN_INFO, cb->host,
1162 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1163 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1164 			cb->irq);
1165 	shost_printk(KERN_INFO, cb->host,
1166 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1167 		cb->host->can_queue, cb->host->max_sectors);
1168 	shost_printk(KERN_INFO, cb->host,
1169 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1170 		     cb->host->can_queue, cb->host->sg_tablesize,
1171 		     MYRB_SCATTER_GATHER_LIMIT);
1172 	shost_printk(KERN_INFO, cb->host,
1173 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1174 		     cb->stripe_size, cb->segment_size,
1175 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1176 		     cb->safte_enabled ?
1177 		     "  SAF-TE Enclosure Management Enabled" : "");
1178 	shost_printk(KERN_INFO, cb->host,
1179 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1180 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1181 		     cb->host->max_id);
1182 
1183 	shost_printk(KERN_INFO, cb->host,
1184 		     "  Logical: 1/1 channels, %d/%d disks\n",
1185 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1186 
1187 out_free:
1188 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1189 			  enquiry2, enquiry2_addr);
1190 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1191 			  config2, config2_addr);
1192 
1193 	return ret;
1194 }
1195 
1196 /**
1197  * myrb_unmap - unmaps controller structures
1198  */
1199 static void myrb_unmap(struct myrb_hba *cb)
1200 {
1201 	if (cb->ldev_info_buf) {
1202 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1203 			MYRB_MAX_LDEVS;
1204 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1205 				  cb->ldev_info_buf, cb->ldev_info_addr);
1206 		cb->ldev_info_buf = NULL;
1207 	}
1208 	if (cb->err_table) {
1209 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1210 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1211 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1212 				  cb->err_table, cb->err_table_addr);
1213 		cb->err_table = NULL;
1214 	}
1215 	if (cb->enquiry) {
1216 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1217 				  cb->enquiry, cb->enquiry_addr);
1218 		cb->enquiry = NULL;
1219 	}
1220 	if (cb->first_stat_mbox) {
1221 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1222 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1223 		cb->first_stat_mbox = NULL;
1224 	}
1225 	if (cb->first_cmd_mbox) {
1226 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1227 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1228 		cb->first_cmd_mbox = NULL;
1229 	}
1230 }
1231 
1232 /**
1233  * myrb_cleanup - cleanup controller structures
1234  */
1235 static void myrb_cleanup(struct myrb_hba *cb)
1236 {
1237 	struct pci_dev *pdev = cb->pdev;
1238 
1239 	/* Free the memory mailbox, status, and related structures */
1240 	myrb_unmap(cb);
1241 
1242 	if (cb->mmio_base) {
1243 		cb->disable_intr(cb->io_base);
1244 		iounmap(cb->mmio_base);
1245 	}
1246 	if (cb->irq)
1247 		free_irq(cb->irq, cb);
1248 	if (cb->io_addr)
1249 		release_region(cb->io_addr, 0x80);
1250 	pci_set_drvdata(pdev, NULL);
1251 	pci_disable_device(pdev);
1252 	scsi_host_put(cb->host);
1253 }
1254 
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1256 {
1257 	struct Scsi_Host *shost = scmd->device->host;
1258 	struct myrb_hba *cb = shost_priv(shost);
1259 
1260 	cb->reset(cb->io_base);
1261 	return SUCCESS;
1262 }
1263 
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 		struct scsi_cmnd *scmd)
1266 {
1267 	struct myrb_hba *cb = shost_priv(shost);
1268 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270 	struct myrb_dcdb *dcdb;
1271 	dma_addr_t dcdb_addr;
1272 	struct scsi_device *sdev = scmd->device;
1273 	struct scatterlist *sgl;
1274 	unsigned long flags;
1275 	int nsge;
1276 
1277 	myrb_reset_cmd(cmd_blk);
1278 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1279 	if (!dcdb)
1280 		return SCSI_MLQUEUE_HOST_BUSY;
1281 	nsge = scsi_dma_map(scmd);
1282 	if (nsge > 1) {
1283 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284 		scmd->result = (DID_ERROR << 16);
1285 		scmd->scsi_done(scmd);
1286 		return 0;
1287 	}
1288 
1289 	mbox->type3.opcode = MYRB_CMD_DCDB;
1290 	mbox->type3.id = scmd->request->tag + 3;
1291 	mbox->type3.addr = dcdb_addr;
1292 	dcdb->channel = sdev->channel;
1293 	dcdb->target = sdev->id;
1294 	switch (scmd->sc_data_direction) {
1295 	case DMA_NONE:
1296 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1297 		break;
1298 	case DMA_TO_DEVICE:
1299 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1300 		break;
1301 	case DMA_FROM_DEVICE:
1302 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1303 		break;
1304 	default:
1305 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1306 		break;
1307 	}
1308 	dcdb->early_status = false;
1309 	if (scmd->request->timeout <= 10)
1310 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311 	else if (scmd->request->timeout <= 60)
1312 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313 	else if (scmd->request->timeout <= 600)
1314 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1315 	else
1316 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317 	dcdb->no_autosense = false;
1318 	dcdb->allow_disconnect = true;
1319 	sgl = scsi_sglist(scmd);
1320 	dcdb->dma_addr = sg_dma_address(sgl);
1321 	if (sg_dma_len(sgl) > USHRT_MAX) {
1322 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1324 	} else {
1325 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1326 		dcdb->xfer_len_hi4 = 0;
1327 	}
1328 	dcdb->cdb_len = scmd->cmd_len;
1329 	dcdb->sense_len = sizeof(dcdb->sense);
1330 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1331 
1332 	spin_lock_irqsave(&cb->queue_lock, flags);
1333 	cb->qcmd(cb, cmd_blk);
1334 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1335 	return 0;
1336 }
1337 
1338 static void myrb_inquiry(struct myrb_hba *cb,
1339 		struct scsi_cmnd *scmd)
1340 {
1341 	unsigned char inq[36] = {
1342 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 		0x20, 0x20, 0x20, 0x20,
1347 	};
1348 
1349 	if (cb->bus_width > 16)
1350 		inq[7] |= 1 << 6;
1351 	if (cb->bus_width > 8)
1352 		inq[7] |= 1 << 5;
1353 	memcpy(&inq[16], cb->model_name, 16);
1354 	memcpy(&inq[32], cb->fw_version, 1);
1355 	memcpy(&inq[33], &cb->fw_version[2], 2);
1356 	memcpy(&inq[35], &cb->fw_version[7], 1);
1357 
1358 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1359 }
1360 
1361 static void
1362 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363 		struct myrb_ldev_info *ldev_info)
1364 {
1365 	unsigned char modes[32], *mode_pg;
1366 	bool dbd;
1367 	size_t mode_len;
1368 
1369 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1370 	if (dbd) {
1371 		mode_len = 24;
1372 		mode_pg = &modes[4];
1373 	} else {
1374 		mode_len = 32;
1375 		mode_pg = &modes[12];
1376 	}
1377 	memset(modes, 0, sizeof(modes));
1378 	modes[0] = mode_len - 1;
1379 	if (!dbd) {
1380 		unsigned char *block_desc = &modes[4];
1381 
1382 		modes[3] = 8;
1383 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1385 	}
1386 	mode_pg[0] = 0x08;
1387 	mode_pg[1] = 0x12;
1388 	if (ldev_info->wb_enabled)
1389 		mode_pg[2] |= 0x04;
1390 	if (cb->segment_size) {
1391 		mode_pg[2] |= 0x08;
1392 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1393 	}
1394 
1395 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1396 }
1397 
1398 static void myrb_request_sense(struct myrb_hba *cb,
1399 		struct scsi_cmnd *scmd)
1400 {
1401 	scsi_build_sense_buffer(0, scmd->sense_buffer,
1402 				NO_SENSE, 0, 0);
1403 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 				 SCSI_SENSE_BUFFERSIZE);
1405 }
1406 
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 		struct myrb_ldev_info *ldev_info)
1409 {
1410 	unsigned char data[8];
1411 
1412 	dev_dbg(&scmd->device->sdev_gendev,
1413 		"Capacity %u, blocksize %u\n",
1414 		ldev_info->size, cb->ldev_block_size);
1415 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 	scsi_sg_copy_from_buffer(scmd, data, 8);
1418 }
1419 
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 		struct scsi_cmnd *scmd)
1422 {
1423 	struct myrb_hba *cb = shost_priv(shost);
1424 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 	struct myrb_ldev_info *ldev_info;
1427 	struct scsi_device *sdev = scmd->device;
1428 	struct scatterlist *sgl;
1429 	unsigned long flags;
1430 	u64 lba;
1431 	u32 block_cnt;
1432 	int nsge;
1433 
1434 	ldev_info = sdev->hostdata;
1435 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 	    ldev_info->state != MYRB_DEVICE_WO) {
1437 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 		scmd->result = (DID_BAD_TARGET << 16);
1440 		scmd->scsi_done(scmd);
1441 		return 0;
1442 	}
1443 	switch (scmd->cmnd[0]) {
1444 	case TEST_UNIT_READY:
1445 		scmd->result = (DID_OK << 16);
1446 		scmd->scsi_done(scmd);
1447 		return 0;
1448 	case INQUIRY:
1449 		if (scmd->cmnd[1] & 1) {
1450 			/* Illegal request, invalid field in CDB */
1451 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1452 						ILLEGAL_REQUEST, 0x24, 0);
1453 			scmd->result = (DRIVER_SENSE << 24) |
1454 				SAM_STAT_CHECK_CONDITION;
1455 		} else {
1456 			myrb_inquiry(cb, scmd);
1457 			scmd->result = (DID_OK << 16);
1458 		}
1459 		scmd->scsi_done(scmd);
1460 		return 0;
1461 	case SYNCHRONIZE_CACHE:
1462 		scmd->result = (DID_OK << 16);
1463 		scmd->scsi_done(scmd);
1464 		return 0;
1465 	case MODE_SENSE:
1466 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1467 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1468 			/* Illegal request, invalid field in CDB */
1469 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1470 						ILLEGAL_REQUEST, 0x24, 0);
1471 			scmd->result = (DRIVER_SENSE << 24) |
1472 				SAM_STAT_CHECK_CONDITION;
1473 		} else {
1474 			myrb_mode_sense(cb, scmd, ldev_info);
1475 			scmd->result = (DID_OK << 16);
1476 		}
1477 		scmd->scsi_done(scmd);
1478 		return 0;
1479 	case READ_CAPACITY:
1480 		if ((scmd->cmnd[1] & 1) ||
1481 		    (scmd->cmnd[8] & 1)) {
1482 			/* Illegal request, invalid field in CDB */
1483 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1484 						ILLEGAL_REQUEST, 0x24, 0);
1485 			scmd->result = (DRIVER_SENSE << 24) |
1486 				SAM_STAT_CHECK_CONDITION;
1487 			scmd->scsi_done(scmd);
1488 			return 0;
1489 		}
1490 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1491 		if (lba) {
1492 			/* Illegal request, invalid field in CDB */
1493 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1494 						ILLEGAL_REQUEST, 0x24, 0);
1495 			scmd->result = (DRIVER_SENSE << 24) |
1496 				SAM_STAT_CHECK_CONDITION;
1497 			scmd->scsi_done(scmd);
1498 			return 0;
1499 		}
1500 		myrb_read_capacity(cb, scmd, ldev_info);
1501 		scmd->scsi_done(scmd);
1502 		return 0;
1503 	case REQUEST_SENSE:
1504 		myrb_request_sense(cb, scmd);
1505 		scmd->result = (DID_OK << 16);
1506 		return 0;
1507 	case SEND_DIAGNOSTIC:
1508 		if (scmd->cmnd[1] != 0x04) {
1509 			/* Illegal request, invalid field in CDB */
1510 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1511 						ILLEGAL_REQUEST, 0x24, 0);
1512 			scmd->result = (DRIVER_SENSE << 24) |
1513 				SAM_STAT_CHECK_CONDITION;
1514 		} else {
1515 			/* Assume good status */
1516 			scmd->result = (DID_OK << 16);
1517 		}
1518 		scmd->scsi_done(scmd);
1519 		return 0;
1520 	case READ_6:
1521 		if (ldev_info->state == MYRB_DEVICE_WO) {
1522 			/* Data protect, attempt to read invalid data */
1523 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1524 						DATA_PROTECT, 0x21, 0x06);
1525 			scmd->result = (DRIVER_SENSE << 24) |
1526 				SAM_STAT_CHECK_CONDITION;
1527 			scmd->scsi_done(scmd);
1528 			return 0;
1529 		}
1530 		fallthrough;
1531 	case WRITE_6:
1532 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1533 		       (scmd->cmnd[2] << 8) |
1534 		       scmd->cmnd[3]);
1535 		block_cnt = scmd->cmnd[4];
1536 		break;
1537 	case READ_10:
1538 		if (ldev_info->state == MYRB_DEVICE_WO) {
1539 			/* Data protect, attempt to read invalid data */
1540 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1541 						DATA_PROTECT, 0x21, 0x06);
1542 			scmd->result = (DRIVER_SENSE << 24) |
1543 				SAM_STAT_CHECK_CONDITION;
1544 			scmd->scsi_done(scmd);
1545 			return 0;
1546 		}
1547 		fallthrough;
1548 	case WRITE_10:
1549 	case VERIFY:		/* 0x2F */
1550 	case WRITE_VERIFY:	/* 0x2E */
1551 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1552 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1553 		break;
1554 	case READ_12:
1555 		if (ldev_info->state == MYRB_DEVICE_WO) {
1556 			/* Data protect, attempt to read invalid data */
1557 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1558 						DATA_PROTECT, 0x21, 0x06);
1559 			scmd->result = (DRIVER_SENSE << 24) |
1560 				SAM_STAT_CHECK_CONDITION;
1561 			scmd->scsi_done(scmd);
1562 			return 0;
1563 		}
1564 		fallthrough;
1565 	case WRITE_12:
1566 	case VERIFY_12: /* 0xAF */
1567 	case WRITE_VERIFY_12:	/* 0xAE */
1568 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1569 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1570 		break;
1571 	default:
1572 		/* Illegal request, invalid opcode */
1573 		scsi_build_sense_buffer(0, scmd->sense_buffer,
1574 					ILLEGAL_REQUEST, 0x20, 0);
1575 		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1576 		scmd->scsi_done(scmd);
1577 		return 0;
1578 	}
1579 
1580 	myrb_reset_cmd(cmd_blk);
1581 	mbox->type5.id = scmd->request->tag + 3;
1582 	if (scmd->sc_data_direction == DMA_NONE)
1583 		goto submit;
1584 	nsge = scsi_dma_map(scmd);
1585 	if (nsge == 1) {
1586 		sgl = scsi_sglist(scmd);
1587 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1588 			mbox->type5.opcode = MYRB_CMD_READ;
1589 		else
1590 			mbox->type5.opcode = MYRB_CMD_WRITE;
1591 
1592 		mbox->type5.ld.xfer_len = block_cnt;
1593 		mbox->type5.ld.ldev_num = sdev->id;
1594 		mbox->type5.lba = lba;
1595 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1596 	} else {
1597 		struct myrb_sge *hw_sgl;
1598 		dma_addr_t hw_sgl_addr;
1599 		int i;
1600 
1601 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1602 		if (!hw_sgl)
1603 			return SCSI_MLQUEUE_HOST_BUSY;
1604 
1605 		cmd_blk->sgl = hw_sgl;
1606 		cmd_blk->sgl_addr = hw_sgl_addr;
1607 
1608 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1609 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1610 		else
1611 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1612 
1613 		mbox->type5.ld.xfer_len = block_cnt;
1614 		mbox->type5.ld.ldev_num = sdev->id;
1615 		mbox->type5.lba = lba;
1616 		mbox->type5.addr = hw_sgl_addr;
1617 		mbox->type5.sg_count = nsge;
1618 
1619 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1620 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1621 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1622 			hw_sgl++;
1623 		}
1624 	}
1625 submit:
1626 	spin_lock_irqsave(&cb->queue_lock, flags);
1627 	cb->qcmd(cb, cmd_blk);
1628 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1629 
1630 	return 0;
1631 }
1632 
1633 static int myrb_queuecommand(struct Scsi_Host *shost,
1634 		struct scsi_cmnd *scmd)
1635 {
1636 	struct scsi_device *sdev = scmd->device;
1637 
1638 	if (sdev->channel > myrb_logical_channel(shost)) {
1639 		scmd->result = (DID_BAD_TARGET << 16);
1640 		scmd->scsi_done(scmd);
1641 		return 0;
1642 	}
1643 	if (sdev->channel == myrb_logical_channel(shost))
1644 		return myrb_ldev_queuecommand(shost, scmd);
1645 
1646 	return myrb_pthru_queuecommand(shost, scmd);
1647 }
1648 
1649 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1650 {
1651 	struct myrb_hba *cb = shost_priv(sdev->host);
1652 	struct myrb_ldev_info *ldev_info;
1653 	unsigned short ldev_num = sdev->id;
1654 	enum raid_level level;
1655 
1656 	ldev_info = cb->ldev_info_buf + ldev_num;
1657 	if (!ldev_info)
1658 		return -ENXIO;
1659 
1660 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1661 	if (!sdev->hostdata)
1662 		return -ENOMEM;
1663 	dev_dbg(&sdev->sdev_gendev,
1664 		"slave alloc ldev %d state %x\n",
1665 		ldev_num, ldev_info->state);
1666 	memcpy(sdev->hostdata, ldev_info,
1667 	       sizeof(*ldev_info));
1668 	switch (ldev_info->raid_level) {
1669 	case MYRB_RAID_LEVEL0:
1670 		level = RAID_LEVEL_LINEAR;
1671 		break;
1672 	case MYRB_RAID_LEVEL1:
1673 		level = RAID_LEVEL_1;
1674 		break;
1675 	case MYRB_RAID_LEVEL3:
1676 		level = RAID_LEVEL_3;
1677 		break;
1678 	case MYRB_RAID_LEVEL5:
1679 		level = RAID_LEVEL_5;
1680 		break;
1681 	case MYRB_RAID_LEVEL6:
1682 		level = RAID_LEVEL_6;
1683 		break;
1684 	case MYRB_RAID_JBOD:
1685 		level = RAID_LEVEL_JBOD;
1686 		break;
1687 	default:
1688 		level = RAID_LEVEL_UNKNOWN;
1689 		break;
1690 	}
1691 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1692 	return 0;
1693 }
1694 
1695 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1696 {
1697 	struct myrb_hba *cb = shost_priv(sdev->host);
1698 	struct myrb_pdev_state *pdev_info;
1699 	unsigned short status;
1700 
1701 	if (sdev->id > MYRB_MAX_TARGETS)
1702 		return -ENXIO;
1703 
1704 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1705 	if (!pdev_info)
1706 		return -ENOMEM;
1707 
1708 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1709 				  sdev, pdev_info);
1710 	if (status != MYRB_STATUS_SUCCESS) {
1711 		dev_dbg(&sdev->sdev_gendev,
1712 			"Failed to get device state, status %x\n",
1713 			status);
1714 		kfree(pdev_info);
1715 		return -ENXIO;
1716 	}
1717 	if (!pdev_info->present) {
1718 		dev_dbg(&sdev->sdev_gendev,
1719 			"device not present, skip\n");
1720 		kfree(pdev_info);
1721 		return -ENXIO;
1722 	}
1723 	dev_dbg(&sdev->sdev_gendev,
1724 		"slave alloc pdev %d:%d state %x\n",
1725 		sdev->channel, sdev->id, pdev_info->state);
1726 	sdev->hostdata = pdev_info;
1727 
1728 	return 0;
1729 }
1730 
1731 static int myrb_slave_alloc(struct scsi_device *sdev)
1732 {
1733 	if (sdev->channel > myrb_logical_channel(sdev->host))
1734 		return -ENXIO;
1735 
1736 	if (sdev->lun > 0)
1737 		return -ENXIO;
1738 
1739 	if (sdev->channel == myrb_logical_channel(sdev->host))
1740 		return myrb_ldev_slave_alloc(sdev);
1741 
1742 	return myrb_pdev_slave_alloc(sdev);
1743 }
1744 
1745 static int myrb_slave_configure(struct scsi_device *sdev)
1746 {
1747 	struct myrb_ldev_info *ldev_info;
1748 
1749 	if (sdev->channel > myrb_logical_channel(sdev->host))
1750 		return -ENXIO;
1751 
1752 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1753 		sdev->no_uld_attach = 1;
1754 		return 0;
1755 	}
1756 	if (sdev->lun != 0)
1757 		return -ENXIO;
1758 
1759 	ldev_info = sdev->hostdata;
1760 	if (!ldev_info)
1761 		return -ENXIO;
1762 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1763 		sdev_printk(KERN_INFO, sdev,
1764 			    "Logical drive is %s\n",
1765 			    myrb_devstate_name(ldev_info->state));
1766 
1767 	sdev->tagged_supported = 1;
1768 	return 0;
1769 }
1770 
1771 static void myrb_slave_destroy(struct scsi_device *sdev)
1772 {
1773 	kfree(sdev->hostdata);
1774 }
1775 
1776 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1777 		sector_t capacity, int geom[])
1778 {
1779 	struct myrb_hba *cb = shost_priv(sdev->host);
1780 
1781 	geom[0] = cb->ldev_geom_heads;
1782 	geom[1] = cb->ldev_geom_sectors;
1783 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1784 
1785 	return 0;
1786 }
1787 
1788 static ssize_t raid_state_show(struct device *dev,
1789 		struct device_attribute *attr, char *buf)
1790 {
1791 	struct scsi_device *sdev = to_scsi_device(dev);
1792 	struct myrb_hba *cb = shost_priv(sdev->host);
1793 	int ret;
1794 
1795 	if (!sdev->hostdata)
1796 		return snprintf(buf, 16, "Unknown\n");
1797 
1798 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1799 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1800 		const char *name;
1801 
1802 		name = myrb_devstate_name(ldev_info->state);
1803 		if (name)
1804 			ret = snprintf(buf, 32, "%s\n", name);
1805 		else
1806 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1807 				       ldev_info->state);
1808 	} else {
1809 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1810 		unsigned short status;
1811 		const char *name;
1812 
1813 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1814 					  sdev, pdev_info);
1815 		if (status != MYRB_STATUS_SUCCESS)
1816 			sdev_printk(KERN_INFO, sdev,
1817 				    "Failed to get device state, status %x\n",
1818 				    status);
1819 
1820 		if (!pdev_info->present)
1821 			name = "Removed";
1822 		else
1823 			name = myrb_devstate_name(pdev_info->state);
1824 		if (name)
1825 			ret = snprintf(buf, 32, "%s\n", name);
1826 		else
1827 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1828 				       pdev_info->state);
1829 	}
1830 	return ret;
1831 }
1832 
1833 static ssize_t raid_state_store(struct device *dev,
1834 		struct device_attribute *attr, const char *buf, size_t count)
1835 {
1836 	struct scsi_device *sdev = to_scsi_device(dev);
1837 	struct myrb_hba *cb = shost_priv(sdev->host);
1838 	struct myrb_pdev_state *pdev_info;
1839 	enum myrb_devstate new_state;
1840 	unsigned short status;
1841 
1842 	if (!strncmp(buf, "kill", 4) ||
1843 	    !strncmp(buf, "offline", 7))
1844 		new_state = MYRB_DEVICE_DEAD;
1845 	else if (!strncmp(buf, "online", 6))
1846 		new_state = MYRB_DEVICE_ONLINE;
1847 	else if (!strncmp(buf, "standby", 7))
1848 		new_state = MYRB_DEVICE_STANDBY;
1849 	else
1850 		return -EINVAL;
1851 
1852 	pdev_info = sdev->hostdata;
1853 	if (!pdev_info) {
1854 		sdev_printk(KERN_INFO, sdev,
1855 			    "Failed - no physical device information\n");
1856 		return -ENXIO;
1857 	}
1858 	if (!pdev_info->present) {
1859 		sdev_printk(KERN_INFO, sdev,
1860 			    "Failed - device not present\n");
1861 		return -ENXIO;
1862 	}
1863 
1864 	if (pdev_info->state == new_state)
1865 		return count;
1866 
1867 	status = myrb_set_pdev_state(cb, sdev, new_state);
1868 	switch (status) {
1869 	case MYRB_STATUS_SUCCESS:
1870 		break;
1871 	case MYRB_STATUS_START_DEVICE_FAILED:
1872 		sdev_printk(KERN_INFO, sdev,
1873 			     "Failed - Unable to Start Device\n");
1874 		count = -EAGAIN;
1875 		break;
1876 	case MYRB_STATUS_NO_DEVICE:
1877 		sdev_printk(KERN_INFO, sdev,
1878 			    "Failed - No Device at Address\n");
1879 		count = -ENODEV;
1880 		break;
1881 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1882 		sdev_printk(KERN_INFO, sdev,
1883 			 "Failed - Invalid Channel or Target or Modifier\n");
1884 		count = -EINVAL;
1885 		break;
1886 	case MYRB_STATUS_CHANNEL_BUSY:
1887 		sdev_printk(KERN_INFO, sdev,
1888 			 "Failed - Channel Busy\n");
1889 		count = -EBUSY;
1890 		break;
1891 	default:
1892 		sdev_printk(KERN_INFO, sdev,
1893 			 "Failed - Unexpected Status %04X\n", status);
1894 		count = -EIO;
1895 		break;
1896 	}
1897 	return count;
1898 }
1899 static DEVICE_ATTR_RW(raid_state);
1900 
1901 static ssize_t raid_level_show(struct device *dev,
1902 		struct device_attribute *attr, char *buf)
1903 {
1904 	struct scsi_device *sdev = to_scsi_device(dev);
1905 
1906 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1907 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1908 		const char *name;
1909 
1910 		if (!ldev_info)
1911 			return -ENXIO;
1912 
1913 		name = myrb_raidlevel_name(ldev_info->raid_level);
1914 		if (!name)
1915 			return snprintf(buf, 32, "Invalid (%02X)\n",
1916 					ldev_info->state);
1917 		return snprintf(buf, 32, "%s\n", name);
1918 	}
1919 	return snprintf(buf, 32, "Physical Drive\n");
1920 }
1921 static DEVICE_ATTR_RO(raid_level);
1922 
1923 static ssize_t rebuild_show(struct device *dev,
1924 		struct device_attribute *attr, char *buf)
1925 {
1926 	struct scsi_device *sdev = to_scsi_device(dev);
1927 	struct myrb_hba *cb = shost_priv(sdev->host);
1928 	struct myrb_rbld_progress rbld_buf;
1929 	unsigned char status;
1930 
1931 	if (sdev->channel < myrb_logical_channel(sdev->host))
1932 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1933 
1934 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1935 
1936 	if (rbld_buf.ldev_num != sdev->id ||
1937 	    status != MYRB_STATUS_SUCCESS)
1938 		return snprintf(buf, 32, "not rebuilding\n");
1939 
1940 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1941 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1942 			rbld_buf.ldev_size);
1943 }
1944 
1945 static ssize_t rebuild_store(struct device *dev,
1946 		struct device_attribute *attr, const char *buf, size_t count)
1947 {
1948 	struct scsi_device *sdev = to_scsi_device(dev);
1949 	struct myrb_hba *cb = shost_priv(sdev->host);
1950 	struct myrb_cmdblk *cmd_blk;
1951 	union myrb_cmd_mbox *mbox;
1952 	unsigned short status;
1953 	int rc, start;
1954 	const char *msg;
1955 
1956 	rc = kstrtoint(buf, 0, &start);
1957 	if (rc)
1958 		return rc;
1959 
1960 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1961 		return -ENXIO;
1962 
1963 	status = myrb_get_rbld_progress(cb, NULL);
1964 	if (start) {
1965 		if (status == MYRB_STATUS_SUCCESS) {
1966 			sdev_printk(KERN_INFO, sdev,
1967 				    "Rebuild Not Initiated; already in progress\n");
1968 			return -EALREADY;
1969 		}
1970 		mutex_lock(&cb->dcmd_mutex);
1971 		cmd_blk = &cb->dcmd_blk;
1972 		myrb_reset_cmd(cmd_blk);
1973 		mbox = &cmd_blk->mbox;
1974 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1975 		mbox->type3D.id = MYRB_DCMD_TAG;
1976 		mbox->type3D.channel = sdev->channel;
1977 		mbox->type3D.target = sdev->id;
1978 		status = myrb_exec_cmd(cb, cmd_blk);
1979 		mutex_unlock(&cb->dcmd_mutex);
1980 	} else {
1981 		struct pci_dev *pdev = cb->pdev;
1982 		unsigned char *rate;
1983 		dma_addr_t rate_addr;
1984 
1985 		if (status != MYRB_STATUS_SUCCESS) {
1986 			sdev_printk(KERN_INFO, sdev,
1987 				    "Rebuild Not Cancelled; not in progress\n");
1988 			return 0;
1989 		}
1990 
1991 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1992 					  &rate_addr, GFP_KERNEL);
1993 		if (rate == NULL) {
1994 			sdev_printk(KERN_INFO, sdev,
1995 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1996 			return -ENOMEM;
1997 		}
1998 		mutex_lock(&cb->dcmd_mutex);
1999 		cmd_blk = &cb->dcmd_blk;
2000 		myrb_reset_cmd(cmd_blk);
2001 		mbox = &cmd_blk->mbox;
2002 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2003 		mbox->type3R.id = MYRB_DCMD_TAG;
2004 		mbox->type3R.rbld_rate = 0xFF;
2005 		mbox->type3R.addr = rate_addr;
2006 		status = myrb_exec_cmd(cb, cmd_blk);
2007 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2008 		mutex_unlock(&cb->dcmd_mutex);
2009 	}
2010 	if (status == MYRB_STATUS_SUCCESS) {
2011 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2012 			    start ? "Initiated" : "Cancelled");
2013 		return count;
2014 	}
2015 	if (!start) {
2016 		sdev_printk(KERN_INFO, sdev,
2017 			    "Rebuild Not Cancelled, status 0x%x\n",
2018 			    status);
2019 		return -EIO;
2020 	}
2021 
2022 	switch (status) {
2023 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2024 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
2025 		break;
2026 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2027 		msg = "New Disk Failed During Rebuild";
2028 		break;
2029 	case MYRB_STATUS_INVALID_ADDRESS:
2030 		msg = "Invalid Device Address";
2031 		break;
2032 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2033 		msg = "Already in Progress";
2034 		break;
2035 	default:
2036 		msg = NULL;
2037 		break;
2038 	}
2039 	if (msg)
2040 		sdev_printk(KERN_INFO, sdev,
2041 			    "Rebuild Failed - %s\n", msg);
2042 	else
2043 		sdev_printk(KERN_INFO, sdev,
2044 			    "Rebuild Failed, status 0x%x\n", status);
2045 
2046 	return -EIO;
2047 }
2048 static DEVICE_ATTR_RW(rebuild);
2049 
2050 static ssize_t consistency_check_store(struct device *dev,
2051 		struct device_attribute *attr, const char *buf, size_t count)
2052 {
2053 	struct scsi_device *sdev = to_scsi_device(dev);
2054 	struct myrb_hba *cb = shost_priv(sdev->host);
2055 	struct myrb_rbld_progress rbld_buf;
2056 	struct myrb_cmdblk *cmd_blk;
2057 	union myrb_cmd_mbox *mbox;
2058 	unsigned short ldev_num = 0xFFFF;
2059 	unsigned short status;
2060 	int rc, start;
2061 	const char *msg;
2062 
2063 	rc = kstrtoint(buf, 0, &start);
2064 	if (rc)
2065 		return rc;
2066 
2067 	if (sdev->channel < myrb_logical_channel(sdev->host))
2068 		return -ENXIO;
2069 
2070 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2071 	if (start) {
2072 		if (status == MYRB_STATUS_SUCCESS) {
2073 			sdev_printk(KERN_INFO, sdev,
2074 				    "Check Consistency Not Initiated; already in progress\n");
2075 			return -EALREADY;
2076 		}
2077 		mutex_lock(&cb->dcmd_mutex);
2078 		cmd_blk = &cb->dcmd_blk;
2079 		myrb_reset_cmd(cmd_blk);
2080 		mbox = &cmd_blk->mbox;
2081 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2082 		mbox->type3C.id = MYRB_DCMD_TAG;
2083 		mbox->type3C.ldev_num = sdev->id;
2084 		mbox->type3C.auto_restore = true;
2085 
2086 		status = myrb_exec_cmd(cb, cmd_blk);
2087 		mutex_unlock(&cb->dcmd_mutex);
2088 	} else {
2089 		struct pci_dev *pdev = cb->pdev;
2090 		unsigned char *rate;
2091 		dma_addr_t rate_addr;
2092 
2093 		if (ldev_num != sdev->id) {
2094 			sdev_printk(KERN_INFO, sdev,
2095 				    "Check Consistency Not Cancelled; not in progress\n");
2096 			return 0;
2097 		}
2098 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2099 					  &rate_addr, GFP_KERNEL);
2100 		if (rate == NULL) {
2101 			sdev_printk(KERN_INFO, sdev,
2102 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2103 			return -ENOMEM;
2104 		}
2105 		mutex_lock(&cb->dcmd_mutex);
2106 		cmd_blk = &cb->dcmd_blk;
2107 		myrb_reset_cmd(cmd_blk);
2108 		mbox = &cmd_blk->mbox;
2109 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2110 		mbox->type3R.id = MYRB_DCMD_TAG;
2111 		mbox->type3R.rbld_rate = 0xFF;
2112 		mbox->type3R.addr = rate_addr;
2113 		status = myrb_exec_cmd(cb, cmd_blk);
2114 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2115 		mutex_unlock(&cb->dcmd_mutex);
2116 	}
2117 	if (status == MYRB_STATUS_SUCCESS) {
2118 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2119 			    start ? "Initiated" : "Cancelled");
2120 		return count;
2121 	}
2122 	if (!start) {
2123 		sdev_printk(KERN_INFO, sdev,
2124 			    "Check Consistency Not Cancelled, status 0x%x\n",
2125 			    status);
2126 		return -EIO;
2127 	}
2128 
2129 	switch (status) {
2130 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2131 		msg = "Dependent Physical Device is DEAD";
2132 		break;
2133 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2134 		msg = "New Disk Failed During Rebuild";
2135 		break;
2136 	case MYRB_STATUS_INVALID_ADDRESS:
2137 		msg = "Invalid or Nonredundant Logical Drive";
2138 		break;
2139 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2140 		msg = "Already in Progress";
2141 		break;
2142 	default:
2143 		msg = NULL;
2144 		break;
2145 	}
2146 	if (msg)
2147 		sdev_printk(KERN_INFO, sdev,
2148 			    "Check Consistency Failed - %s\n", msg);
2149 	else
2150 		sdev_printk(KERN_INFO, sdev,
2151 			    "Check Consistency Failed, status 0x%x\n", status);
2152 
2153 	return -EIO;
2154 }
2155 
2156 static ssize_t consistency_check_show(struct device *dev,
2157 		struct device_attribute *attr, char *buf)
2158 {
2159 	return rebuild_show(dev, attr, buf);
2160 }
2161 static DEVICE_ATTR_RW(consistency_check);
2162 
2163 static ssize_t ctlr_num_show(struct device *dev,
2164 		struct device_attribute *attr, char *buf)
2165 {
2166 	struct Scsi_Host *shost = class_to_shost(dev);
2167 	struct myrb_hba *cb = shost_priv(shost);
2168 
2169 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2170 }
2171 static DEVICE_ATTR_RO(ctlr_num);
2172 
2173 static ssize_t firmware_show(struct device *dev,
2174 		struct device_attribute *attr, char *buf)
2175 {
2176 	struct Scsi_Host *shost = class_to_shost(dev);
2177 	struct myrb_hba *cb = shost_priv(shost);
2178 
2179 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2180 }
2181 static DEVICE_ATTR_RO(firmware);
2182 
2183 static ssize_t model_show(struct device *dev,
2184 		struct device_attribute *attr, char *buf)
2185 {
2186 	struct Scsi_Host *shost = class_to_shost(dev);
2187 	struct myrb_hba *cb = shost_priv(shost);
2188 
2189 	return snprintf(buf, 16, "%s\n", cb->model_name);
2190 }
2191 static DEVICE_ATTR_RO(model);
2192 
2193 static ssize_t flush_cache_store(struct device *dev,
2194 		struct device_attribute *attr, const char *buf, size_t count)
2195 {
2196 	struct Scsi_Host *shost = class_to_shost(dev);
2197 	struct myrb_hba *cb = shost_priv(shost);
2198 	unsigned short status;
2199 
2200 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2201 	if (status == MYRB_STATUS_SUCCESS) {
2202 		shost_printk(KERN_INFO, shost,
2203 			     "Cache Flush Completed\n");
2204 		return count;
2205 	}
2206 	shost_printk(KERN_INFO, shost,
2207 		     "Cache Flush Failed, status %x\n", status);
2208 	return -EIO;
2209 }
2210 static DEVICE_ATTR_WO(flush_cache);
2211 
2212 static struct device_attribute *myrb_sdev_attrs[] = {
2213 	&dev_attr_rebuild,
2214 	&dev_attr_consistency_check,
2215 	&dev_attr_raid_state,
2216 	&dev_attr_raid_level,
2217 	NULL,
2218 };
2219 
2220 static struct device_attribute *myrb_shost_attrs[] = {
2221 	&dev_attr_ctlr_num,
2222 	&dev_attr_model,
2223 	&dev_attr_firmware,
2224 	&dev_attr_flush_cache,
2225 	NULL,
2226 };
2227 
2228 static struct scsi_host_template myrb_template = {
2229 	.module			= THIS_MODULE,
2230 	.name			= "DAC960",
2231 	.proc_name		= "myrb",
2232 	.queuecommand		= myrb_queuecommand,
2233 	.eh_host_reset_handler	= myrb_host_reset,
2234 	.slave_alloc		= myrb_slave_alloc,
2235 	.slave_configure	= myrb_slave_configure,
2236 	.slave_destroy		= myrb_slave_destroy,
2237 	.bios_param		= myrb_biosparam,
2238 	.cmd_size		= sizeof(struct myrb_cmdblk),
2239 	.shost_attrs		= myrb_shost_attrs,
2240 	.sdev_attrs		= myrb_sdev_attrs,
2241 	.this_id		= -1,
2242 };
2243 
2244 /**
2245  * myrb_is_raid - return boolean indicating device is raid volume
2246  * @dev the device struct object
2247  */
2248 static int myrb_is_raid(struct device *dev)
2249 {
2250 	struct scsi_device *sdev = to_scsi_device(dev);
2251 
2252 	return sdev->channel == myrb_logical_channel(sdev->host);
2253 }
2254 
2255 /**
2256  * myrb_get_resync - get raid volume resync percent complete
2257  * @dev the device struct object
2258  */
2259 static void myrb_get_resync(struct device *dev)
2260 {
2261 	struct scsi_device *sdev = to_scsi_device(dev);
2262 	struct myrb_hba *cb = shost_priv(sdev->host);
2263 	struct myrb_rbld_progress rbld_buf;
2264 	unsigned int percent_complete = 0;
2265 	unsigned short status;
2266 	unsigned int ldev_size = 0, remaining = 0;
2267 
2268 	if (sdev->channel < myrb_logical_channel(sdev->host))
2269 		return;
2270 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2271 	if (status == MYRB_STATUS_SUCCESS) {
2272 		if (rbld_buf.ldev_num == sdev->id) {
2273 			ldev_size = rbld_buf.ldev_size;
2274 			remaining = rbld_buf.blocks_left;
2275 		}
2276 	}
2277 	if (remaining && ldev_size)
2278 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2279 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2280 }
2281 
2282 /**
2283  * myrb_get_state - get raid volume status
2284  * @dev the device struct object
2285  */
2286 static void myrb_get_state(struct device *dev)
2287 {
2288 	struct scsi_device *sdev = to_scsi_device(dev);
2289 	struct myrb_hba *cb = shost_priv(sdev->host);
2290 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2291 	enum raid_state state = RAID_STATE_UNKNOWN;
2292 	unsigned short status;
2293 
2294 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2295 		state = RAID_STATE_UNKNOWN;
2296 	else {
2297 		status = myrb_get_rbld_progress(cb, NULL);
2298 		if (status == MYRB_STATUS_SUCCESS)
2299 			state = RAID_STATE_RESYNCING;
2300 		else {
2301 			switch (ldev_info->state) {
2302 			case MYRB_DEVICE_ONLINE:
2303 				state = RAID_STATE_ACTIVE;
2304 				break;
2305 			case MYRB_DEVICE_WO:
2306 			case MYRB_DEVICE_CRITICAL:
2307 				state = RAID_STATE_DEGRADED;
2308 				break;
2309 			default:
2310 				state = RAID_STATE_OFFLINE;
2311 			}
2312 		}
2313 	}
2314 	raid_set_state(myrb_raid_template, dev, state);
2315 }
2316 
2317 static struct raid_function_template myrb_raid_functions = {
2318 	.cookie		= &myrb_template,
2319 	.is_raid	= myrb_is_raid,
2320 	.get_resync	= myrb_get_resync,
2321 	.get_state	= myrb_get_state,
2322 };
2323 
2324 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2325 		struct scsi_cmnd *scmd)
2326 {
2327 	unsigned short status;
2328 
2329 	if (!cmd_blk)
2330 		return;
2331 
2332 	scsi_dma_unmap(scmd);
2333 
2334 	if (cmd_blk->dcdb) {
2335 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2336 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2337 			      cmd_blk->dcdb_addr);
2338 		cmd_blk->dcdb = NULL;
2339 	}
2340 	if (cmd_blk->sgl) {
2341 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2342 		cmd_blk->sgl = NULL;
2343 		cmd_blk->sgl_addr = 0;
2344 	}
2345 	status = cmd_blk->status;
2346 	switch (status) {
2347 	case MYRB_STATUS_SUCCESS:
2348 	case MYRB_STATUS_DEVICE_BUSY:
2349 		scmd->result = (DID_OK << 16) | status;
2350 		break;
2351 	case MYRB_STATUS_BAD_DATA:
2352 		dev_dbg(&scmd->device->sdev_gendev,
2353 			"Bad Data Encountered\n");
2354 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2355 			/* Unrecovered read error */
2356 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2357 						MEDIUM_ERROR, 0x11, 0);
2358 		else
2359 			/* Write error */
2360 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2361 						MEDIUM_ERROR, 0x0C, 0);
2362 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2363 		break;
2364 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2365 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2366 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2367 			/* Unrecovered read error, auto-reallocation failed */
2368 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2369 						MEDIUM_ERROR, 0x11, 0x04);
2370 		else
2371 			/* Write error, auto-reallocation failed */
2372 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2373 						MEDIUM_ERROR, 0x0C, 0x02);
2374 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2375 		break;
2376 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2377 		dev_dbg(&scmd->device->sdev_gendev,
2378 			    "Logical Drive Nonexistent or Offline");
2379 		scmd->result = (DID_BAD_TARGET << 16);
2380 		break;
2381 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2382 		dev_dbg(&scmd->device->sdev_gendev,
2383 			    "Attempt to Access Beyond End of Logical Drive");
2384 		/* Logical block address out of range */
2385 		scsi_build_sense_buffer(0, scmd->sense_buffer,
2386 					NOT_READY, 0x21, 0);
2387 		break;
2388 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2389 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2390 		scmd->result = (DID_BAD_TARGET << 16);
2391 		break;
2392 	default:
2393 		scmd_printk(KERN_ERR, scmd,
2394 			    "Unexpected Error Status %04X", status);
2395 		scmd->result = (DID_ERROR << 16);
2396 		break;
2397 	}
2398 	scmd->scsi_done(scmd);
2399 }
2400 
2401 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2402 {
2403 	if (!cmd_blk)
2404 		return;
2405 
2406 	if (cmd_blk->completion) {
2407 		complete(cmd_blk->completion);
2408 		cmd_blk->completion = NULL;
2409 	}
2410 }
2411 
2412 static void myrb_monitor(struct work_struct *work)
2413 {
2414 	struct myrb_hba *cb = container_of(work,
2415 			struct myrb_hba, monitor_work.work);
2416 	struct Scsi_Host *shost = cb->host;
2417 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2418 
2419 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2420 
2421 	if (cb->new_ev_seq > cb->old_ev_seq) {
2422 		int event = cb->old_ev_seq;
2423 
2424 		dev_dbg(&shost->shost_gendev,
2425 			"get event log no %d/%d\n",
2426 			cb->new_ev_seq, event);
2427 		myrb_get_event(cb, event);
2428 		cb->old_ev_seq = event + 1;
2429 		interval = 10;
2430 	} else if (cb->need_err_info) {
2431 		cb->need_err_info = false;
2432 		dev_dbg(&shost->shost_gendev, "get error table\n");
2433 		myrb_get_errtable(cb);
2434 		interval = 10;
2435 	} else if (cb->need_rbld && cb->rbld_first) {
2436 		cb->need_rbld = false;
2437 		dev_dbg(&shost->shost_gendev,
2438 			"get rebuild progress\n");
2439 		myrb_update_rbld_progress(cb);
2440 		interval = 10;
2441 	} else if (cb->need_ldev_info) {
2442 		cb->need_ldev_info = false;
2443 		dev_dbg(&shost->shost_gendev,
2444 			"get logical drive info\n");
2445 		myrb_get_ldev_info(cb);
2446 		interval = 10;
2447 	} else if (cb->need_rbld) {
2448 		cb->need_rbld = false;
2449 		dev_dbg(&shost->shost_gendev,
2450 			"get rebuild progress\n");
2451 		myrb_update_rbld_progress(cb);
2452 		interval = 10;
2453 	} else if (cb->need_cc_status) {
2454 		cb->need_cc_status = false;
2455 		dev_dbg(&shost->shost_gendev,
2456 			"get consistency check progress\n");
2457 		myrb_get_cc_progress(cb);
2458 		interval = 10;
2459 	} else if (cb->need_bgi_status) {
2460 		cb->need_bgi_status = false;
2461 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2462 		myrb_bgi_control(cb);
2463 		interval = 10;
2464 	} else {
2465 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2466 		mutex_lock(&cb->dma_mutex);
2467 		myrb_hba_enquiry(cb);
2468 		mutex_unlock(&cb->dma_mutex);
2469 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2470 		    cb->need_err_info || cb->need_rbld ||
2471 		    cb->need_ldev_info || cb->need_cc_status ||
2472 		    cb->need_bgi_status) {
2473 			dev_dbg(&shost->shost_gendev,
2474 				"reschedule monitor\n");
2475 			interval = 0;
2476 		}
2477 	}
2478 	if (interval > 1)
2479 		cb->primary_monitor_time = jiffies;
2480 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2481 }
2482 
2483 /**
2484  * myrb_err_status - reports controller BIOS messages
2485  *
2486  * Controller BIOS messages are passed through the Error Status Register
2487  * when the driver performs the BIOS handshaking.
2488  *
2489  * Return: true for fatal errors and false otherwise.
2490  */
2491 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2492 		unsigned char parm0, unsigned char parm1)
2493 {
2494 	struct pci_dev *pdev = cb->pdev;
2495 
2496 	switch (error) {
2497 	case 0x00:
2498 		dev_info(&pdev->dev,
2499 			 "Physical Device %d:%d Not Responding\n",
2500 			 parm1, parm0);
2501 		break;
2502 	case 0x08:
2503 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2504 		break;
2505 	case 0x30:
2506 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2507 		break;
2508 	case 0x60:
2509 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2510 		break;
2511 	case 0x70:
2512 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2513 		break;
2514 	case 0x90:
2515 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2516 			   parm1, parm0);
2517 		break;
2518 	case 0xA0:
2519 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2520 		break;
2521 	case 0xB0:
2522 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2523 		break;
2524 	case 0xD0:
2525 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2526 		break;
2527 	case 0xF0:
2528 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2529 		return true;
2530 	default:
2531 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2532 			error);
2533 		return true;
2534 	}
2535 	return false;
2536 }
2537 
2538 /*
2539  * Hardware-specific functions
2540  */
2541 
2542 /*
2543  * DAC960 LA Series Controllers
2544  */
2545 
2546 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2547 {
2548 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2549 }
2550 
2551 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2552 {
2553 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2554 }
2555 
2556 static inline void DAC960_LA_gen_intr(void __iomem *base)
2557 {
2558 	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2559 }
2560 
2561 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2562 {
2563 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2564 }
2565 
2566 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2567 {
2568 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2569 }
2570 
2571 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2572 {
2573 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2574 
2575 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2576 }
2577 
2578 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2579 {
2580 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2581 
2582 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2583 }
2584 
2585 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2586 {
2587 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2588 }
2589 
2590 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2591 {
2592 	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2593 }
2594 
2595 static inline void DAC960_LA_ack_intr(void __iomem *base)
2596 {
2597 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2598 	       base + DAC960_LA_ODB_OFFSET);
2599 }
2600 
2601 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2602 {
2603 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2604 
2605 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2606 }
2607 
2608 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2609 {
2610 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2611 
2612 	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2613 }
2614 
2615 static inline void DAC960_LA_enable_intr(void __iomem *base)
2616 {
2617 	unsigned char odb = 0xFF;
2618 
2619 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2620 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2621 }
2622 
2623 static inline void DAC960_LA_disable_intr(void __iomem *base)
2624 {
2625 	unsigned char odb = 0xFF;
2626 
2627 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2628 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2629 }
2630 
2631 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2632 {
2633 	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2634 
2635 	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2636 }
2637 
2638 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2639 		union myrb_cmd_mbox *mbox)
2640 {
2641 	mem_mbox->words[1] = mbox->words[1];
2642 	mem_mbox->words[2] = mbox->words[2];
2643 	mem_mbox->words[3] = mbox->words[3];
2644 	/* Memory barrier to prevent reordering */
2645 	wmb();
2646 	mem_mbox->words[0] = mbox->words[0];
2647 	/* Memory barrier to force PCI access */
2648 	mb();
2649 }
2650 
2651 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2652 		union myrb_cmd_mbox *mbox)
2653 {
2654 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2655 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2656 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2657 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2658 }
2659 
2660 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2661 {
2662 	return readb(base + DAC960_LA_STSID_OFFSET);
2663 }
2664 
2665 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2666 {
2667 	return readw(base + DAC960_LA_STS_OFFSET);
2668 }
2669 
2670 static inline bool
2671 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2672 		unsigned char *param0, unsigned char *param1)
2673 {
2674 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2675 
2676 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2677 		return false;
2678 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2679 
2680 	*error = errsts;
2681 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2682 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2683 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2684 	return true;
2685 }
2686 
2687 static inline unsigned short
2688 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2689 		union myrb_cmd_mbox *mbox)
2690 {
2691 	unsigned short status;
2692 	int timeout = 0;
2693 
2694 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2695 		if (!DAC960_LA_hw_mbox_is_full(base))
2696 			break;
2697 		udelay(10);
2698 		timeout++;
2699 	}
2700 	if (DAC960_LA_hw_mbox_is_full(base)) {
2701 		dev_err(&pdev->dev,
2702 			"Timeout waiting for empty mailbox\n");
2703 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2704 	}
2705 	DAC960_LA_write_hw_mbox(base, mbox);
2706 	DAC960_LA_hw_mbox_new_cmd(base);
2707 	timeout = 0;
2708 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2709 		if (DAC960_LA_hw_mbox_status_available(base))
2710 			break;
2711 		udelay(10);
2712 		timeout++;
2713 	}
2714 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2715 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2716 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2717 	}
2718 	status = DAC960_LA_read_status(base);
2719 	DAC960_LA_ack_hw_mbox_intr(base);
2720 	DAC960_LA_ack_hw_mbox_status(base);
2721 
2722 	return status;
2723 }
2724 
2725 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2726 		struct myrb_hba *cb, void __iomem *base)
2727 {
2728 	int timeout = 0;
2729 	unsigned char error, parm0, parm1;
2730 
2731 	DAC960_LA_disable_intr(base);
2732 	DAC960_LA_ack_hw_mbox_status(base);
2733 	udelay(1000);
2734 	while (DAC960_LA_init_in_progress(base) &&
2735 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2736 		if (DAC960_LA_read_error_status(base, &error,
2737 					      &parm0, &parm1) &&
2738 		    myrb_err_status(cb, error, parm0, parm1))
2739 			return -ENODEV;
2740 		udelay(10);
2741 		timeout++;
2742 	}
2743 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2744 		dev_err(&pdev->dev,
2745 			"Timeout waiting for Controller Initialisation\n");
2746 		return -ETIMEDOUT;
2747 	}
2748 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2749 		dev_err(&pdev->dev,
2750 			"Unable to Enable Memory Mailbox Interface\n");
2751 		DAC960_LA_reset_ctrl(base);
2752 		return -ENODEV;
2753 	}
2754 	DAC960_LA_enable_intr(base);
2755 	cb->qcmd = myrb_qcmd;
2756 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2757 	if (cb->dual_mode_interface)
2758 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2759 	else
2760 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2761 	cb->disable_intr = DAC960_LA_disable_intr;
2762 	cb->reset = DAC960_LA_reset_ctrl;
2763 
2764 	return 0;
2765 }
2766 
2767 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2768 {
2769 	struct myrb_hba *cb = arg;
2770 	void __iomem *base = cb->io_base;
2771 	struct myrb_stat_mbox *next_stat_mbox;
2772 	unsigned long flags;
2773 
2774 	spin_lock_irqsave(&cb->queue_lock, flags);
2775 	DAC960_LA_ack_intr(base);
2776 	next_stat_mbox = cb->next_stat_mbox;
2777 	while (next_stat_mbox->valid) {
2778 		unsigned char id = next_stat_mbox->id;
2779 		struct scsi_cmnd *scmd = NULL;
2780 		struct myrb_cmdblk *cmd_blk = NULL;
2781 
2782 		if (id == MYRB_DCMD_TAG)
2783 			cmd_blk = &cb->dcmd_blk;
2784 		else if (id == MYRB_MCMD_TAG)
2785 			cmd_blk = &cb->mcmd_blk;
2786 		else {
2787 			scmd = scsi_host_find_tag(cb->host, id - 3);
2788 			if (scmd)
2789 				cmd_blk = scsi_cmd_priv(scmd);
2790 		}
2791 		if (cmd_blk)
2792 			cmd_blk->status = next_stat_mbox->status;
2793 		else
2794 			dev_err(&cb->pdev->dev,
2795 				"Unhandled command completion %d\n", id);
2796 
2797 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2798 		if (++next_stat_mbox > cb->last_stat_mbox)
2799 			next_stat_mbox = cb->first_stat_mbox;
2800 
2801 		if (cmd_blk) {
2802 			if (id < 3)
2803 				myrb_handle_cmdblk(cb, cmd_blk);
2804 			else
2805 				myrb_handle_scsi(cb, cmd_blk, scmd);
2806 		}
2807 	}
2808 	cb->next_stat_mbox = next_stat_mbox;
2809 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2810 	return IRQ_HANDLED;
2811 }
2812 
2813 struct myrb_privdata DAC960_LA_privdata = {
2814 	.hw_init =	DAC960_LA_hw_init,
2815 	.irq_handler =	DAC960_LA_intr_handler,
2816 	.mmio_size =	DAC960_LA_mmio_size,
2817 };
2818 
2819 /*
2820  * DAC960 PG Series Controllers
2821  */
2822 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2823 {
2824 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2825 }
2826 
2827 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2828 {
2829 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2830 }
2831 
2832 static inline void DAC960_PG_gen_intr(void __iomem *base)
2833 {
2834 	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2835 }
2836 
2837 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2838 {
2839 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2840 }
2841 
2842 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2843 {
2844 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2845 }
2846 
2847 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2848 {
2849 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2850 
2851 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2852 }
2853 
2854 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2855 {
2856 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2857 
2858 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2859 }
2860 
2861 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2862 {
2863 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2864 }
2865 
2866 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2867 {
2868 	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2869 }
2870 
2871 static inline void DAC960_PG_ack_intr(void __iomem *base)
2872 {
2873 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2874 	       base + DAC960_PG_ODB_OFFSET);
2875 }
2876 
2877 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2878 {
2879 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2880 
2881 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2882 }
2883 
2884 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2885 {
2886 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2887 
2888 	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2889 }
2890 
2891 static inline void DAC960_PG_enable_intr(void __iomem *base)
2892 {
2893 	unsigned int imask = (unsigned int)-1;
2894 
2895 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2896 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2897 }
2898 
2899 static inline void DAC960_PG_disable_intr(void __iomem *base)
2900 {
2901 	unsigned int imask = (unsigned int)-1;
2902 
2903 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2904 }
2905 
2906 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2907 {
2908 	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2909 
2910 	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2911 }
2912 
2913 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2914 		union myrb_cmd_mbox *mbox)
2915 {
2916 	mem_mbox->words[1] = mbox->words[1];
2917 	mem_mbox->words[2] = mbox->words[2];
2918 	mem_mbox->words[3] = mbox->words[3];
2919 	/* Memory barrier to prevent reordering */
2920 	wmb();
2921 	mem_mbox->words[0] = mbox->words[0];
2922 	/* Memory barrier to force PCI access */
2923 	mb();
2924 }
2925 
2926 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2927 		union myrb_cmd_mbox *mbox)
2928 {
2929 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2930 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2931 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2932 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2933 }
2934 
2935 static inline unsigned char
2936 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2937 {
2938 	return readb(base + DAC960_PG_STSID_OFFSET);
2939 }
2940 
2941 static inline unsigned short
2942 DAC960_PG_read_status(void __iomem *base)
2943 {
2944 	return readw(base + DAC960_PG_STS_OFFSET);
2945 }
2946 
2947 static inline bool
2948 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2949 		unsigned char *param0, unsigned char *param1)
2950 {
2951 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2952 
2953 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2954 		return false;
2955 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2956 	*error = errsts;
2957 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2958 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2959 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2960 	return true;
2961 }
2962 
2963 static inline unsigned short
2964 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2965 		union myrb_cmd_mbox *mbox)
2966 {
2967 	unsigned short status;
2968 	int timeout = 0;
2969 
2970 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2971 		if (!DAC960_PG_hw_mbox_is_full(base))
2972 			break;
2973 		udelay(10);
2974 		timeout++;
2975 	}
2976 	if (DAC960_PG_hw_mbox_is_full(base)) {
2977 		dev_err(&pdev->dev,
2978 			"Timeout waiting for empty mailbox\n");
2979 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2980 	}
2981 	DAC960_PG_write_hw_mbox(base, mbox);
2982 	DAC960_PG_hw_mbox_new_cmd(base);
2983 
2984 	timeout = 0;
2985 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2986 		if (DAC960_PG_hw_mbox_status_available(base))
2987 			break;
2988 		udelay(10);
2989 		timeout++;
2990 	}
2991 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2992 		dev_err(&pdev->dev,
2993 			"Timeout waiting for mailbox status\n");
2994 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2995 	}
2996 	status = DAC960_PG_read_status(base);
2997 	DAC960_PG_ack_hw_mbox_intr(base);
2998 	DAC960_PG_ack_hw_mbox_status(base);
2999 
3000 	return status;
3001 }
3002 
3003 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3004 		struct myrb_hba *cb, void __iomem *base)
3005 {
3006 	int timeout = 0;
3007 	unsigned char error, parm0, parm1;
3008 
3009 	DAC960_PG_disable_intr(base);
3010 	DAC960_PG_ack_hw_mbox_status(base);
3011 	udelay(1000);
3012 	while (DAC960_PG_init_in_progress(base) &&
3013 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3014 		if (DAC960_PG_read_error_status(base, &error,
3015 						&parm0, &parm1) &&
3016 		    myrb_err_status(cb, error, parm0, parm1))
3017 			return -EIO;
3018 		udelay(10);
3019 		timeout++;
3020 	}
3021 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3022 		dev_err(&pdev->dev,
3023 			"Timeout waiting for Controller Initialisation\n");
3024 		return -ETIMEDOUT;
3025 	}
3026 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3027 		dev_err(&pdev->dev,
3028 			"Unable to Enable Memory Mailbox Interface\n");
3029 		DAC960_PG_reset_ctrl(base);
3030 		return -ENODEV;
3031 	}
3032 	DAC960_PG_enable_intr(base);
3033 	cb->qcmd = myrb_qcmd;
3034 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3035 	if (cb->dual_mode_interface)
3036 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3037 	else
3038 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3039 	cb->disable_intr = DAC960_PG_disable_intr;
3040 	cb->reset = DAC960_PG_reset_ctrl;
3041 
3042 	return 0;
3043 }
3044 
3045 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3046 {
3047 	struct myrb_hba *cb = arg;
3048 	void __iomem *base = cb->io_base;
3049 	struct myrb_stat_mbox *next_stat_mbox;
3050 	unsigned long flags;
3051 
3052 	spin_lock_irqsave(&cb->queue_lock, flags);
3053 	DAC960_PG_ack_intr(base);
3054 	next_stat_mbox = cb->next_stat_mbox;
3055 	while (next_stat_mbox->valid) {
3056 		unsigned char id = next_stat_mbox->id;
3057 		struct scsi_cmnd *scmd = NULL;
3058 		struct myrb_cmdblk *cmd_blk = NULL;
3059 
3060 		if (id == MYRB_DCMD_TAG)
3061 			cmd_blk = &cb->dcmd_blk;
3062 		else if (id == MYRB_MCMD_TAG)
3063 			cmd_blk = &cb->mcmd_blk;
3064 		else {
3065 			scmd = scsi_host_find_tag(cb->host, id - 3);
3066 			if (scmd)
3067 				cmd_blk = scsi_cmd_priv(scmd);
3068 		}
3069 		if (cmd_blk)
3070 			cmd_blk->status = next_stat_mbox->status;
3071 		else
3072 			dev_err(&cb->pdev->dev,
3073 				"Unhandled command completion %d\n", id);
3074 
3075 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3076 		if (++next_stat_mbox > cb->last_stat_mbox)
3077 			next_stat_mbox = cb->first_stat_mbox;
3078 
3079 		if (id < 3)
3080 			myrb_handle_cmdblk(cb, cmd_blk);
3081 		else
3082 			myrb_handle_scsi(cb, cmd_blk, scmd);
3083 	}
3084 	cb->next_stat_mbox = next_stat_mbox;
3085 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3086 	return IRQ_HANDLED;
3087 }
3088 
3089 struct myrb_privdata DAC960_PG_privdata = {
3090 	.hw_init =	DAC960_PG_hw_init,
3091 	.irq_handler =	DAC960_PG_intr_handler,
3092 	.mmio_size =	DAC960_PG_mmio_size,
3093 };
3094 
3095 
3096 /*
3097  * DAC960 PD Series Controllers
3098  */
3099 
3100 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3101 {
3102 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3103 }
3104 
3105 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3106 {
3107 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3108 }
3109 
3110 static inline void DAC960_PD_gen_intr(void __iomem *base)
3111 {
3112 	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3113 }
3114 
3115 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3116 {
3117 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3118 }
3119 
3120 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3121 {
3122 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3123 
3124 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3125 }
3126 
3127 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3128 {
3129 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3130 
3131 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3132 }
3133 
3134 static inline void DAC960_PD_ack_intr(void __iomem *base)
3135 {
3136 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3137 }
3138 
3139 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3140 {
3141 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3142 
3143 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3144 }
3145 
3146 static inline void DAC960_PD_enable_intr(void __iomem *base)
3147 {
3148 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3149 }
3150 
3151 static inline void DAC960_PD_disable_intr(void __iomem *base)
3152 {
3153 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3154 }
3155 
3156 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3157 {
3158 	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3159 
3160 	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3161 }
3162 
3163 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3164 		union myrb_cmd_mbox *mbox)
3165 {
3166 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3167 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3168 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3169 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3170 }
3171 
3172 static inline unsigned char
3173 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3174 {
3175 	return readb(base + DAC960_PD_STSID_OFFSET);
3176 }
3177 
3178 static inline unsigned short
3179 DAC960_PD_read_status(void __iomem *base)
3180 {
3181 	return readw(base + DAC960_PD_STS_OFFSET);
3182 }
3183 
3184 static inline bool
3185 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3186 		unsigned char *param0, unsigned char *param1)
3187 {
3188 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3189 
3190 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3191 		return false;
3192 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3193 	*error = errsts;
3194 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3195 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3196 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3197 	return true;
3198 }
3199 
3200 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3201 {
3202 	void __iomem *base = cb->io_base;
3203 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3204 
3205 	while (DAC960_PD_hw_mbox_is_full(base))
3206 		udelay(1);
3207 	DAC960_PD_write_cmd_mbox(base, mbox);
3208 	DAC960_PD_hw_mbox_new_cmd(base);
3209 }
3210 
3211 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3212 		struct myrb_hba *cb, void __iomem *base)
3213 {
3214 	int timeout = 0;
3215 	unsigned char error, parm0, parm1;
3216 
3217 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3218 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3219 			(unsigned long)cb->io_addr);
3220 		return -EBUSY;
3221 	}
3222 	DAC960_PD_disable_intr(base);
3223 	DAC960_PD_ack_hw_mbox_status(base);
3224 	udelay(1000);
3225 	while (DAC960_PD_init_in_progress(base) &&
3226 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3227 		if (DAC960_PD_read_error_status(base, &error,
3228 					      &parm0, &parm1) &&
3229 		    myrb_err_status(cb, error, parm0, parm1))
3230 			return -EIO;
3231 		udelay(10);
3232 		timeout++;
3233 	}
3234 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3235 		dev_err(&pdev->dev,
3236 			"Timeout waiting for Controller Initialisation\n");
3237 		return -ETIMEDOUT;
3238 	}
3239 	if (!myrb_enable_mmio(cb, NULL)) {
3240 		dev_err(&pdev->dev,
3241 			"Unable to Enable Memory Mailbox Interface\n");
3242 		DAC960_PD_reset_ctrl(base);
3243 		return -ENODEV;
3244 	}
3245 	DAC960_PD_enable_intr(base);
3246 	cb->qcmd = DAC960_PD_qcmd;
3247 	cb->disable_intr = DAC960_PD_disable_intr;
3248 	cb->reset = DAC960_PD_reset_ctrl;
3249 
3250 	return 0;
3251 }
3252 
3253 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3254 {
3255 	struct myrb_hba *cb = arg;
3256 	void __iomem *base = cb->io_base;
3257 	unsigned long flags;
3258 
3259 	spin_lock_irqsave(&cb->queue_lock, flags);
3260 	while (DAC960_PD_hw_mbox_status_available(base)) {
3261 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3262 		struct scsi_cmnd *scmd = NULL;
3263 		struct myrb_cmdblk *cmd_blk = NULL;
3264 
3265 		if (id == MYRB_DCMD_TAG)
3266 			cmd_blk = &cb->dcmd_blk;
3267 		else if (id == MYRB_MCMD_TAG)
3268 			cmd_blk = &cb->mcmd_blk;
3269 		else {
3270 			scmd = scsi_host_find_tag(cb->host, id - 3);
3271 			if (scmd)
3272 				cmd_blk = scsi_cmd_priv(scmd);
3273 		}
3274 		if (cmd_blk)
3275 			cmd_blk->status = DAC960_PD_read_status(base);
3276 		else
3277 			dev_err(&cb->pdev->dev,
3278 				"Unhandled command completion %d\n", id);
3279 
3280 		DAC960_PD_ack_intr(base);
3281 		DAC960_PD_ack_hw_mbox_status(base);
3282 
3283 		if (id < 3)
3284 			myrb_handle_cmdblk(cb, cmd_blk);
3285 		else
3286 			myrb_handle_scsi(cb, cmd_blk, scmd);
3287 	}
3288 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3289 	return IRQ_HANDLED;
3290 }
3291 
3292 struct myrb_privdata DAC960_PD_privdata = {
3293 	.hw_init =	DAC960_PD_hw_init,
3294 	.irq_handler =	DAC960_PD_intr_handler,
3295 	.mmio_size =	DAC960_PD_mmio_size,
3296 };
3297 
3298 
3299 /*
3300  * DAC960 P Series Controllers
3301  *
3302  * Similar to the DAC960 PD Series Controllers, but some commands have
3303  * to be translated.
3304  */
3305 
3306 static inline void myrb_translate_enquiry(void *enq)
3307 {
3308 	memcpy(enq + 132, enq + 36, 64);
3309 	memset(enq + 36, 0, 96);
3310 }
3311 
3312 static inline void myrb_translate_devstate(void *state)
3313 {
3314 	memcpy(state + 2, state + 3, 1);
3315 	memmove(state + 4, state + 5, 2);
3316 	memmove(state + 6, state + 8, 4);
3317 }
3318 
3319 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3320 {
3321 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3322 	int ldev_num = mbox->type5.ld.ldev_num;
3323 
3324 	mbox->bytes[3] &= 0x7;
3325 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3326 	mbox->bytes[7] = ldev_num;
3327 }
3328 
3329 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3330 {
3331 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3332 	int ldev_num = mbox->bytes[7];
3333 
3334 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3335 	mbox->bytes[3] &= 0x7;
3336 	mbox->bytes[3] |= ldev_num << 3;
3337 }
3338 
3339 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3340 {
3341 	void __iomem *base = cb->io_base;
3342 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3343 
3344 	switch (mbox->common.opcode) {
3345 	case MYRB_CMD_ENQUIRY:
3346 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3347 		break;
3348 	case MYRB_CMD_GET_DEVICE_STATE:
3349 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3350 		break;
3351 	case MYRB_CMD_READ:
3352 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3353 		myrb_translate_to_rw_command(cmd_blk);
3354 		break;
3355 	case MYRB_CMD_WRITE:
3356 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3357 		myrb_translate_to_rw_command(cmd_blk);
3358 		break;
3359 	case MYRB_CMD_READ_SG:
3360 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3361 		myrb_translate_to_rw_command(cmd_blk);
3362 		break;
3363 	case MYRB_CMD_WRITE_SG:
3364 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3365 		myrb_translate_to_rw_command(cmd_blk);
3366 		break;
3367 	default:
3368 		break;
3369 	}
3370 	while (DAC960_PD_hw_mbox_is_full(base))
3371 		udelay(1);
3372 	DAC960_PD_write_cmd_mbox(base, mbox);
3373 	DAC960_PD_hw_mbox_new_cmd(base);
3374 }
3375 
3376 
3377 static int DAC960_P_hw_init(struct pci_dev *pdev,
3378 		struct myrb_hba *cb, void __iomem *base)
3379 {
3380 	int timeout = 0;
3381 	unsigned char error, parm0, parm1;
3382 
3383 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3384 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3385 			(unsigned long)cb->io_addr);
3386 		return -EBUSY;
3387 	}
3388 	DAC960_PD_disable_intr(base);
3389 	DAC960_PD_ack_hw_mbox_status(base);
3390 	udelay(1000);
3391 	while (DAC960_PD_init_in_progress(base) &&
3392 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3393 		if (DAC960_PD_read_error_status(base, &error,
3394 						&parm0, &parm1) &&
3395 		    myrb_err_status(cb, error, parm0, parm1))
3396 			return -EAGAIN;
3397 		udelay(10);
3398 		timeout++;
3399 	}
3400 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3401 		dev_err(&pdev->dev,
3402 			"Timeout waiting for Controller Initialisation\n");
3403 		return -ETIMEDOUT;
3404 	}
3405 	if (!myrb_enable_mmio(cb, NULL)) {
3406 		dev_err(&pdev->dev,
3407 			"Unable to allocate DMA mapped memory\n");
3408 		DAC960_PD_reset_ctrl(base);
3409 		return -ETIMEDOUT;
3410 	}
3411 	DAC960_PD_enable_intr(base);
3412 	cb->qcmd = DAC960_P_qcmd;
3413 	cb->disable_intr = DAC960_PD_disable_intr;
3414 	cb->reset = DAC960_PD_reset_ctrl;
3415 
3416 	return 0;
3417 }
3418 
3419 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3420 {
3421 	struct myrb_hba *cb = arg;
3422 	void __iomem *base = cb->io_base;
3423 	unsigned long flags;
3424 
3425 	spin_lock_irqsave(&cb->queue_lock, flags);
3426 	while (DAC960_PD_hw_mbox_status_available(base)) {
3427 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3428 		struct scsi_cmnd *scmd = NULL;
3429 		struct myrb_cmdblk *cmd_blk = NULL;
3430 		union myrb_cmd_mbox *mbox;
3431 		enum myrb_cmd_opcode op;
3432 
3433 
3434 		if (id == MYRB_DCMD_TAG)
3435 			cmd_blk = &cb->dcmd_blk;
3436 		else if (id == MYRB_MCMD_TAG)
3437 			cmd_blk = &cb->mcmd_blk;
3438 		else {
3439 			scmd = scsi_host_find_tag(cb->host, id - 3);
3440 			if (scmd)
3441 				cmd_blk = scsi_cmd_priv(scmd);
3442 		}
3443 		if (cmd_blk)
3444 			cmd_blk->status = DAC960_PD_read_status(base);
3445 		else
3446 			dev_err(&cb->pdev->dev,
3447 				"Unhandled command completion %d\n", id);
3448 
3449 		DAC960_PD_ack_intr(base);
3450 		DAC960_PD_ack_hw_mbox_status(base);
3451 
3452 		if (!cmd_blk)
3453 			continue;
3454 
3455 		mbox = &cmd_blk->mbox;
3456 		op = mbox->common.opcode;
3457 		switch (op) {
3458 		case MYRB_CMD_ENQUIRY_OLD:
3459 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3460 			myrb_translate_enquiry(cb->enquiry);
3461 			break;
3462 		case MYRB_CMD_READ_OLD:
3463 			mbox->common.opcode = MYRB_CMD_READ;
3464 			myrb_translate_from_rw_command(cmd_blk);
3465 			break;
3466 		case MYRB_CMD_WRITE_OLD:
3467 			mbox->common.opcode = MYRB_CMD_WRITE;
3468 			myrb_translate_from_rw_command(cmd_blk);
3469 			break;
3470 		case MYRB_CMD_READ_SG_OLD:
3471 			mbox->common.opcode = MYRB_CMD_READ_SG;
3472 			myrb_translate_from_rw_command(cmd_blk);
3473 			break;
3474 		case MYRB_CMD_WRITE_SG_OLD:
3475 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3476 			myrb_translate_from_rw_command(cmd_blk);
3477 			break;
3478 		default:
3479 			break;
3480 		}
3481 		if (id < 3)
3482 			myrb_handle_cmdblk(cb, cmd_blk);
3483 		else
3484 			myrb_handle_scsi(cb, cmd_blk, scmd);
3485 	}
3486 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3487 	return IRQ_HANDLED;
3488 }
3489 
3490 struct myrb_privdata DAC960_P_privdata = {
3491 	.hw_init =	DAC960_P_hw_init,
3492 	.irq_handler =	DAC960_P_intr_handler,
3493 	.mmio_size =	DAC960_PD_mmio_size,
3494 };
3495 
3496 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3497 		const struct pci_device_id *entry)
3498 {
3499 	struct myrb_privdata *privdata =
3500 		(struct myrb_privdata *)entry->driver_data;
3501 	irq_handler_t irq_handler = privdata->irq_handler;
3502 	unsigned int mmio_size = privdata->mmio_size;
3503 	struct Scsi_Host *shost;
3504 	struct myrb_hba *cb = NULL;
3505 
3506 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3507 	if (!shost) {
3508 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3509 		return NULL;
3510 	}
3511 	shost->max_cmd_len = 12;
3512 	shost->max_lun = 256;
3513 	cb = shost_priv(shost);
3514 	mutex_init(&cb->dcmd_mutex);
3515 	mutex_init(&cb->dma_mutex);
3516 	cb->pdev = pdev;
3517 
3518 	if (pci_enable_device(pdev))
3519 		goto failure;
3520 
3521 	if (privdata->hw_init == DAC960_PD_hw_init ||
3522 	    privdata->hw_init == DAC960_P_hw_init) {
3523 		cb->io_addr = pci_resource_start(pdev, 0);
3524 		cb->pci_addr = pci_resource_start(pdev, 1);
3525 	} else
3526 		cb->pci_addr = pci_resource_start(pdev, 0);
3527 
3528 	pci_set_drvdata(pdev, cb);
3529 	spin_lock_init(&cb->queue_lock);
3530 	if (mmio_size < PAGE_SIZE)
3531 		mmio_size = PAGE_SIZE;
3532 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3533 	if (cb->mmio_base == NULL) {
3534 		dev_err(&pdev->dev,
3535 			"Unable to map Controller Register Window\n");
3536 		goto failure;
3537 	}
3538 
3539 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3540 	if (privdata->hw_init(pdev, cb, cb->io_base))
3541 		goto failure;
3542 
3543 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3544 		dev_err(&pdev->dev,
3545 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3546 		goto failure;
3547 	}
3548 	cb->irq = pdev->irq;
3549 	return cb;
3550 
3551 failure:
3552 	dev_err(&pdev->dev,
3553 		"Failed to initialize Controller\n");
3554 	myrb_cleanup(cb);
3555 	return NULL;
3556 }
3557 
3558 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3559 {
3560 	struct myrb_hba *cb;
3561 	int ret;
3562 
3563 	cb = myrb_detect(dev, entry);
3564 	if (!cb)
3565 		return -ENODEV;
3566 
3567 	ret = myrb_get_hba_config(cb);
3568 	if (ret < 0) {
3569 		myrb_cleanup(cb);
3570 		return ret;
3571 	}
3572 
3573 	if (!myrb_create_mempools(dev, cb)) {
3574 		ret = -ENOMEM;
3575 		goto failed;
3576 	}
3577 
3578 	ret = scsi_add_host(cb->host, &dev->dev);
3579 	if (ret) {
3580 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3581 		myrb_destroy_mempools(cb);
3582 		goto failed;
3583 	}
3584 	scsi_scan_host(cb->host);
3585 	return 0;
3586 failed:
3587 	myrb_cleanup(cb);
3588 	return ret;
3589 }
3590 
3591 
3592 static void myrb_remove(struct pci_dev *pdev)
3593 {
3594 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3595 
3596 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3597 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3598 	myrb_cleanup(cb);
3599 	myrb_destroy_mempools(cb);
3600 }
3601 
3602 
3603 static const struct pci_device_id myrb_id_table[] = {
3604 	{
3605 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3606 			       PCI_DEVICE_ID_DEC_21285,
3607 			       PCI_VENDOR_ID_MYLEX,
3608 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3609 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3610 	},
3611 	{
3612 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3613 	},
3614 	{
3615 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3616 	},
3617 	{
3618 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3619 	},
3620 	{0, },
3621 };
3622 
3623 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3624 
3625 static struct pci_driver myrb_pci_driver = {
3626 	.name		= "myrb",
3627 	.id_table	= myrb_id_table,
3628 	.probe		= myrb_probe,
3629 	.remove		= myrb_remove,
3630 };
3631 
3632 static int __init myrb_init_module(void)
3633 {
3634 	int ret;
3635 
3636 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3637 	if (!myrb_raid_template)
3638 		return -ENODEV;
3639 
3640 	ret = pci_register_driver(&myrb_pci_driver);
3641 	if (ret)
3642 		raid_class_release(myrb_raid_template);
3643 
3644 	return ret;
3645 }
3646 
3647 static void __exit myrb_cleanup_module(void)
3648 {
3649 	pci_unregister_driver(&myrb_pci_driver);
3650 	raid_class_release(myrb_raid_template);
3651 }
3652 
3653 module_init(myrb_init_module);
3654 module_exit(myrb_cleanup_module);
3655 
3656 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3657 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3658 MODULE_LICENSE("GPL");
3659