xref: /openbmc/linux/drivers/scsi/myrb.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /**
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /**
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /**
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /**
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /**
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	WARN_ON(in_interrupt());
198 	wait_for_completion(&cmpl);
199 	return cmd_blk->status;
200 }
201 
202 /**
203  * myrb_exec_type3 - executes a type 3 command and waits for completion.
204  *
205  * Return: command status
206  */
207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 		enum myrb_cmd_opcode op, dma_addr_t addr)
209 {
210 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 	unsigned short status;
213 
214 	mutex_lock(&cb->dcmd_mutex);
215 	myrb_reset_cmd(cmd_blk);
216 	mbox->type3.id = MYRB_DCMD_TAG;
217 	mbox->type3.opcode = op;
218 	mbox->type3.addr = addr;
219 	status = myrb_exec_cmd(cb, cmd_blk);
220 	mutex_unlock(&cb->dcmd_mutex);
221 	return status;
222 }
223 
224 /**
225  * myrb_exec_type3D - executes a type 3D command and waits for completion.
226  *
227  * Return: command status
228  */
229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 		struct myrb_pdev_state *pdev_info)
232 {
233 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 	unsigned short status;
236 	dma_addr_t pdev_info_addr;
237 
238 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 					sizeof(struct myrb_pdev_state),
240 					DMA_FROM_DEVICE);
241 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 		return MYRB_STATUS_SUBSYS_FAILED;
243 
244 	mutex_lock(&cb->dcmd_mutex);
245 	myrb_reset_cmd(cmd_blk);
246 	mbox->type3D.id = MYRB_DCMD_TAG;
247 	mbox->type3D.opcode = op;
248 	mbox->type3D.channel = sdev->channel;
249 	mbox->type3D.target = sdev->id;
250 	mbox->type3D.addr = pdev_info_addr;
251 	status = myrb_exec_cmd(cb, cmd_blk);
252 	mutex_unlock(&cb->dcmd_mutex);
253 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 	if (status == MYRB_STATUS_SUCCESS &&
256 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 		myrb_translate_devstate(pdev_info);
258 
259 	return status;
260 }
261 
262 static char *myrb_event_msg[] = {
263 	"killed because write recovery failed",
264 	"killed because of SCSI bus reset failure",
265 	"killed because of double check condition",
266 	"killed because it was removed",
267 	"killed because of gross error on SCSI chip",
268 	"killed because of bad tag returned from drive",
269 	"killed because of timeout on SCSI command",
270 	"killed because of reset SCSI command issued from system",
271 	"killed because busy or parity error count exceeded limit",
272 	"killed because of 'kill drive' command from system",
273 	"killed because of selection timeout",
274 	"killed due to SCSI phase sequence error",
275 	"killed due to unknown status",
276 };
277 
278 /**
279  * myrb_get_event - get event log from HBA
280  * @cb: pointer to the hba structure
281  * @event: number of the event
282  *
283  * Execute a type 3E command and logs the event message
284  */
285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 {
287 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 	struct myrb_log_entry *ev_buf;
290 	dma_addr_t ev_addr;
291 	unsigned short status;
292 
293 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 				    sizeof(struct myrb_log_entry),
295 				    &ev_addr, GFP_KERNEL);
296 	if (!ev_buf)
297 		return;
298 
299 	myrb_reset_cmd(cmd_blk);
300 	mbox->type3E.id = MYRB_MCMD_TAG;
301 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 	mbox->type3E.opqual = 1;
304 	mbox->type3E.ev_seq = event;
305 	mbox->type3E.addr = ev_addr;
306 	status = myrb_exec_cmd(cb, cmd_blk);
307 	if (status != MYRB_STATUS_SUCCESS)
308 		shost_printk(KERN_INFO, cb->host,
309 			     "Failed to get event log %d, status %04x\n",
310 			     event, status);
311 
312 	else if (ev_buf->seq_num == event) {
313 		struct scsi_sense_hdr sshdr;
314 
315 		memset(&sshdr, 0, sizeof(sshdr));
316 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317 
318 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 		    sshdr.asc == 0x80 &&
320 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 			shost_printk(KERN_CRIT, cb->host,
322 				     "Physical drive %d:%d: %s\n",
323 				     ev_buf->channel, ev_buf->target,
324 				     myrb_event_msg[sshdr.ascq]);
325 		else
326 			shost_printk(KERN_CRIT, cb->host,
327 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 				     ev_buf->channel, ev_buf->target,
329 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
330 	}
331 
332 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
333 			  ev_buf, ev_addr);
334 }
335 
336 /**
337  * myrb_get_errtable - retrieves the error table from the controller
338  *
339  * Executes a type 3 command and logs the error table from the controller.
340  */
341 static void myrb_get_errtable(struct myrb_hba *cb)
342 {
343 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 	unsigned short status;
346 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347 
348 	memcpy(&old_table, cb->err_table, sizeof(old_table));
349 
350 	myrb_reset_cmd(cmd_blk);
351 	mbox->type3.id = MYRB_MCMD_TAG;
352 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 	mbox->type3.addr = cb->err_table_addr;
354 	status = myrb_exec_cmd(cb, cmd_blk);
355 	if (status == MYRB_STATUS_SUCCESS) {
356 		struct myrb_error_entry *table = cb->err_table;
357 		struct myrb_error_entry *new, *old;
358 		size_t err_table_offset;
359 		struct scsi_device *sdev;
360 
361 		shost_for_each_device(sdev, cb->host) {
362 			if (sdev->channel >= myrb_logical_channel(cb->host))
363 				continue;
364 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 				+ sdev->id;
366 			new = table + err_table_offset;
367 			old = &old_table[err_table_offset];
368 			if (new->parity_err == old->parity_err &&
369 			    new->soft_err == old->soft_err &&
370 			    new->hard_err == old->hard_err &&
371 			    new->misc_err == old->misc_err)
372 				continue;
373 			sdev_printk(KERN_CRIT, sdev,
374 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 				    new->parity_err, new->soft_err,
376 				    new->hard_err, new->misc_err);
377 		}
378 	}
379 }
380 
381 /**
382  * myrb_get_ldev_info - retrieves the logical device table from the controller
383  *
384  * Executes a type 3 command and updates the logical device table.
385  *
386  * Return: command status
387  */
388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 {
390 	unsigned short status;
391 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 	struct Scsi_Host *shost = cb->host;
393 
394 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 				 cb->ldev_info_addr);
396 	if (status != MYRB_STATUS_SUCCESS)
397 		return status;
398 
399 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 		struct myrb_ldev_info *old = NULL;
401 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 		struct scsi_device *sdev;
403 
404 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
405 					  ldev_num, 0);
406 		if (!sdev) {
407 			if (new->state == MYRB_DEVICE_OFFLINE)
408 				continue;
409 			shost_printk(KERN_INFO, shost,
410 				     "Adding Logical Drive %d in state %s\n",
411 				     ldev_num, myrb_devstate_name(new->state));
412 			scsi_add_device(shost, myrb_logical_channel(shost),
413 					ldev_num, 0);
414 			continue;
415 		}
416 		old = sdev->hostdata;
417 		if (new->state != old->state)
418 			shost_printk(KERN_INFO, shost,
419 				     "Logical Drive %d is now %s\n",
420 				     ldev_num, myrb_devstate_name(new->state));
421 		if (new->wb_enabled != old->wb_enabled)
422 			sdev_printk(KERN_INFO, sdev,
423 				    "Logical Drive is now WRITE %s\n",
424 				    (new->wb_enabled ? "BACK" : "THRU"));
425 		memcpy(old, new, sizeof(*new));
426 		scsi_device_put(sdev);
427 	}
428 	return status;
429 }
430 
431 /**
432  * myrb_get_rbld_progress - get rebuild progress information
433  *
434  * Executes a type 3 command and returns the rebuild progress
435  * information.
436  *
437  * Return: command status
438  */
439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 		struct myrb_rbld_progress *rbld)
441 {
442 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 	struct myrb_rbld_progress *rbld_buf;
445 	dma_addr_t rbld_addr;
446 	unsigned short status;
447 
448 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 				      sizeof(struct myrb_rbld_progress),
450 				      &rbld_addr, GFP_KERNEL);
451 	if (!rbld_buf)
452 		return MYRB_STATUS_RBLD_NOT_CHECKED;
453 
454 	myrb_reset_cmd(cmd_blk);
455 	mbox->type3.id = MYRB_MCMD_TAG;
456 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 	mbox->type3.addr = rbld_addr;
458 	status = myrb_exec_cmd(cb, cmd_blk);
459 	if (rbld)
460 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 			  rbld_buf, rbld_addr);
463 	return status;
464 }
465 
466 /**
467  * myrb_update_rbld_progress - updates the rebuild status
468  *
469  * Updates the rebuild status for the attached logical devices.
470  *
471  */
472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
473 {
474 	struct myrb_rbld_progress rbld_buf;
475 	unsigned short status;
476 
477 	status = myrb_get_rbld_progress(cb, &rbld_buf);
478 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 		status = MYRB_STATUS_RBLD_SUCCESS;
481 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 		unsigned int blocks_done =
483 			rbld_buf.ldev_size - rbld_buf.blocks_left;
484 		struct scsi_device *sdev;
485 
486 		sdev = scsi_device_lookup(cb->host,
487 					  myrb_logical_channel(cb->host),
488 					  rbld_buf.ldev_num, 0);
489 		if (!sdev)
490 			return;
491 
492 		switch (status) {
493 		case MYRB_STATUS_SUCCESS:
494 			sdev_printk(KERN_INFO, sdev,
495 				    "Rebuild in Progress, %d%% completed\n",
496 				    (100 * (blocks_done >> 7))
497 				    / (rbld_buf.ldev_size >> 7));
498 			break;
499 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 			sdev_printk(KERN_INFO, sdev,
501 				    "Rebuild Failed due to Logical Drive Failure\n");
502 			break;
503 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 			sdev_printk(KERN_INFO, sdev,
505 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
506 			break;
507 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 			sdev_printk(KERN_INFO, sdev,
509 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
510 			break;
511 		case MYRB_STATUS_RBLD_SUCCESS:
512 			sdev_printk(KERN_INFO, sdev,
513 				    "Rebuild Completed Successfully\n");
514 			break;
515 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 			sdev_printk(KERN_INFO, sdev,
517 				     "Rebuild Successfully Terminated\n");
518 			break;
519 		default:
520 			break;
521 		}
522 		scsi_device_put(sdev);
523 	}
524 	cb->last_rbld_status = status;
525 }
526 
527 /**
528  * myrb_get_cc_progress - retrieve the rebuild status
529  *
530  * Execute a type 3 Command and fetch the rebuild / consistency check
531  * status.
532  */
533 static void myrb_get_cc_progress(struct myrb_hba *cb)
534 {
535 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 	struct myrb_rbld_progress *rbld_buf;
538 	dma_addr_t rbld_addr;
539 	unsigned short status;
540 
541 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 				      sizeof(struct myrb_rbld_progress),
543 				      &rbld_addr, GFP_KERNEL);
544 	if (!rbld_buf) {
545 		cb->need_cc_status = true;
546 		return;
547 	}
548 	myrb_reset_cmd(cmd_blk);
549 	mbox->type3.id = MYRB_MCMD_TAG;
550 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 	mbox->type3.addr = rbld_addr;
552 	status = myrb_exec_cmd(cb, cmd_blk);
553 	if (status == MYRB_STATUS_SUCCESS) {
554 		unsigned int ldev_num = rbld_buf->ldev_num;
555 		unsigned int ldev_size = rbld_buf->ldev_size;
556 		unsigned int blocks_done =
557 			ldev_size - rbld_buf->blocks_left;
558 		struct scsi_device *sdev;
559 
560 		sdev = scsi_device_lookup(cb->host,
561 					  myrb_logical_channel(cb->host),
562 					  ldev_num, 0);
563 		if (sdev) {
564 			sdev_printk(KERN_INFO, sdev,
565 				    "Consistency Check in Progress: %d%% completed\n",
566 				    (100 * (blocks_done >> 7))
567 				    / (ldev_size >> 7));
568 			scsi_device_put(sdev);
569 		}
570 	}
571 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 			  rbld_buf, rbld_addr);
573 }
574 
575 /**
576  * myrb_bgi_control - updates background initialisation status
577  *
578  * Executes a type 3B command and updates the background initialisation status
579  */
580 static void myrb_bgi_control(struct myrb_hba *cb)
581 {
582 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 	struct myrb_bgi_status *bgi, *last_bgi;
585 	dma_addr_t bgi_addr;
586 	struct scsi_device *sdev = NULL;
587 	unsigned short status;
588 
589 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 				 &bgi_addr, GFP_KERNEL);
591 	if (!bgi) {
592 		shost_printk(KERN_ERR, cb->host,
593 			     "Failed to allocate bgi memory\n");
594 		return;
595 	}
596 	myrb_reset_cmd(cmd_blk);
597 	mbox->type3B.id = MYRB_DCMD_TAG;
598 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 	mbox->type3B.optype = 0x20;
600 	mbox->type3B.addr = bgi_addr;
601 	status = myrb_exec_cmd(cb, cmd_blk);
602 	last_bgi = &cb->bgi_status;
603 	sdev = scsi_device_lookup(cb->host,
604 				  myrb_logical_channel(cb->host),
605 				  bgi->ldev_num, 0);
606 	switch (status) {
607 	case MYRB_STATUS_SUCCESS:
608 		switch (bgi->status) {
609 		case MYRB_BGI_INVALID:
610 			break;
611 		case MYRB_BGI_STARTED:
612 			if (!sdev)
613 				break;
614 			sdev_printk(KERN_INFO, sdev,
615 				    "Background Initialization Started\n");
616 			break;
617 		case MYRB_BGI_INPROGRESS:
618 			if (!sdev)
619 				break;
620 			if (bgi->blocks_done == last_bgi->blocks_done &&
621 			    bgi->ldev_num == last_bgi->ldev_num)
622 				break;
623 			sdev_printk(KERN_INFO, sdev,
624 				 "Background Initialization in Progress: %d%% completed\n",
625 				 (100 * (bgi->blocks_done >> 7))
626 				 / (bgi->ldev_size >> 7));
627 			break;
628 		case MYRB_BGI_SUSPENDED:
629 			if (!sdev)
630 				break;
631 			sdev_printk(KERN_INFO, sdev,
632 				    "Background Initialization Suspended\n");
633 			break;
634 		case MYRB_BGI_CANCELLED:
635 			if (!sdev)
636 				break;
637 			sdev_printk(KERN_INFO, sdev,
638 				    "Background Initialization Cancelled\n");
639 			break;
640 		}
641 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642 		break;
643 	case MYRB_STATUS_BGI_SUCCESS:
644 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 			sdev_printk(KERN_INFO, sdev,
646 				    "Background Initialization Completed Successfully\n");
647 		cb->bgi_status.status = MYRB_BGI_INVALID;
648 		break;
649 	case MYRB_STATUS_BGI_ABORTED:
650 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 			sdev_printk(KERN_INFO, sdev,
652 				    "Background Initialization Aborted\n");
653 		fallthrough;
654 	case MYRB_STATUS_NO_BGI_INPROGRESS:
655 		cb->bgi_status.status = MYRB_BGI_INVALID;
656 		break;
657 	}
658 	if (sdev)
659 		scsi_device_put(sdev);
660 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
661 			  bgi, bgi_addr);
662 }
663 
664 /**
665  * myrb_hba_enquiry - updates the controller status
666  *
667  * Executes a DAC_V1_Enquiry command and updates the controller status.
668  *
669  * Return: command status
670  */
671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
672 {
673 	struct myrb_enquiry old, *new;
674 	unsigned short status;
675 
676 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
677 
678 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 	if (status != MYRB_STATUS_SUCCESS)
680 		return status;
681 
682 	new = cb->enquiry;
683 	if (new->ldev_count > old.ldev_count) {
684 		int ldev_num = old.ldev_count - 1;
685 
686 		while (++ldev_num < new->ldev_count)
687 			shost_printk(KERN_CRIT, cb->host,
688 				     "Logical Drive %d Now Exists\n",
689 				     ldev_num);
690 	}
691 	if (new->ldev_count < old.ldev_count) {
692 		int ldev_num = new->ldev_count - 1;
693 
694 		while (++ldev_num < old.ldev_count)
695 			shost_printk(KERN_CRIT, cb->host,
696 				     "Logical Drive %d No Longer Exists\n",
697 				     ldev_num);
698 	}
699 	if (new->status.deferred != old.status.deferred)
700 		shost_printk(KERN_CRIT, cb->host,
701 			     "Deferred Write Error Flag is now %s\n",
702 			     (new->status.deferred ? "TRUE" : "FALSE"));
703 	if (new->ev_seq != old.ev_seq) {
704 		cb->new_ev_seq = new->ev_seq;
705 		cb->need_err_info = true;
706 		shost_printk(KERN_INFO, cb->host,
707 			     "Event log %d/%d (%d/%d) available\n",
708 			     cb->old_ev_seq, cb->new_ev_seq,
709 			     old.ev_seq, new->ev_seq);
710 	}
711 	if ((new->ldev_critical > 0 &&
712 	     new->ldev_critical != old.ldev_critical) ||
713 	    (new->ldev_offline > 0 &&
714 	     new->ldev_offline != old.ldev_offline) ||
715 	    (new->ldev_count != old.ldev_count)) {
716 		shost_printk(KERN_INFO, cb->host,
717 			     "Logical drive count changed (%d/%d/%d)\n",
718 			     new->ldev_critical,
719 			     new->ldev_offline,
720 			     new->ldev_count);
721 		cb->need_ldev_info = true;
722 	}
723 	if (new->pdev_dead > 0 ||
724 	    new->pdev_dead != old.pdev_dead ||
725 	    time_after_eq(jiffies, cb->secondary_monitor_time
726 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 		cb->need_bgi_status = cb->bgi_status_supported;
728 		cb->secondary_monitor_time = jiffies;
729 	}
730 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 		cb->need_rbld = true;
735 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
736 	}
737 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
738 		switch (new->rbld) {
739 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 			shost_printk(KERN_INFO, cb->host,
741 				     "Consistency Check Completed Successfully\n");
742 			break;
743 		case MYRB_STDBY_RBLD_IN_PROGRESS:
744 		case MYRB_BG_RBLD_IN_PROGRESS:
745 			break;
746 		case MYRB_BG_CHECK_IN_PROGRESS:
747 			cb->need_cc_status = true;
748 			break;
749 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 			shost_printk(KERN_INFO, cb->host,
751 				     "Consistency Check Completed with Error\n");
752 			break;
753 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 			shost_printk(KERN_INFO, cb->host,
755 				     "Consistency Check Failed - Physical Device Failed\n");
756 			break;
757 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 			shost_printk(KERN_INFO, cb->host,
759 				     "Consistency Check Failed - Logical Drive Failed\n");
760 			break;
761 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 			shost_printk(KERN_INFO, cb->host,
763 				     "Consistency Check Failed - Other Causes\n");
764 			break;
765 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 			shost_printk(KERN_INFO, cb->host,
767 				     "Consistency Check Successfully Terminated\n");
768 			break;
769 		}
770 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 		cb->need_cc_status = true;
772 
773 	return MYRB_STATUS_SUCCESS;
774 }
775 
776 /**
777  * myrb_set_pdev_state - sets the device state for a physical device
778  *
779  * Return: command status
780  */
781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 		struct scsi_device *sdev, enum myrb_devstate state)
783 {
784 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 	unsigned short status;
787 
788 	mutex_lock(&cb->dcmd_mutex);
789 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 	mbox->type3D.id = MYRB_DCMD_TAG;
791 	mbox->type3D.channel = sdev->channel;
792 	mbox->type3D.target = sdev->id;
793 	mbox->type3D.state = state & 0x1F;
794 	status = myrb_exec_cmd(cb, cmd_blk);
795 	mutex_unlock(&cb->dcmd_mutex);
796 
797 	return status;
798 }
799 
800 /**
801  * myrb_enable_mmio - enables the Memory Mailbox Interface
802  *
803  * PD and P controller types have no memory mailbox, but still need the
804  * other dma mapped memory.
805  *
806  * Return: true on success, false otherwise.
807  */
808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
809 {
810 	void __iomem *base = cb->io_base;
811 	struct pci_dev *pdev = cb->pdev;
812 	size_t err_table_size;
813 	size_t ldev_info_size;
814 	union myrb_cmd_mbox *cmd_mbox_mem;
815 	struct myrb_stat_mbox *stat_mbox_mem;
816 	union myrb_cmd_mbox mbox;
817 	unsigned short status;
818 
819 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
820 
821 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 		dev_err(&pdev->dev, "DMA mask out of range\n");
823 		return false;
824 	}
825 
826 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 					 sizeof(struct myrb_enquiry),
828 					 &cb->enquiry_addr, GFP_KERNEL);
829 	if (!cb->enquiry)
830 		return false;
831 
832 	err_table_size = sizeof(struct myrb_error_entry) *
833 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 					   &cb->err_table_addr, GFP_KERNEL);
836 	if (!cb->err_table)
837 		return false;
838 
839 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 					       &cb->ldev_info_addr, GFP_KERNEL);
842 	if (!cb->ldev_info_buf)
843 		return false;
844 
845 	/*
846 	 * Skip mailbox initialisation for PD and P Controllers
847 	 */
848 	if (!mmio_init_fn)
849 		return true;
850 
851 	/* These are the base addresses for the command memory mailbox array */
852 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854 						cb->cmd_mbox_size,
855 						&cb->cmd_mbox_addr,
856 						GFP_KERNEL);
857 	if (!cb->first_cmd_mbox)
858 		return false;
859 
860 	cmd_mbox_mem = cb->first_cmd_mbox;
861 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 	cb->last_cmd_mbox = cmd_mbox_mem;
863 	cb->next_cmd_mbox = cb->first_cmd_mbox;
864 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866 
867 	/* These are the base addresses for the status memory mailbox array */
868 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 	    sizeof(struct myrb_stat_mbox);
870 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871 						 cb->stat_mbox_size,
872 						 &cb->stat_mbox_addr,
873 						 GFP_KERNEL);
874 	if (!cb->first_stat_mbox)
875 		return false;
876 
877 	stat_mbox_mem = cb->first_stat_mbox;
878 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 	cb->last_stat_mbox = stat_mbox_mem;
880 	cb->next_stat_mbox = cb->first_stat_mbox;
881 
882 	/* Enable the Memory Mailbox Interface. */
883 	cb->dual_mode_interface = true;
884 	mbox.typeX.opcode = 0x2B;
885 	mbox.typeX.id = 0;
886 	mbox.typeX.opcode2 = 0x14;
887 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
889 
890 	status = mmio_init_fn(pdev, base, &mbox);
891 	if (status != MYRB_STATUS_SUCCESS) {
892 		cb->dual_mode_interface = false;
893 		mbox.typeX.opcode2 = 0x10;
894 		status = mmio_init_fn(pdev, base, &mbox);
895 		if (status != MYRB_STATUS_SUCCESS) {
896 			dev_err(&pdev->dev,
897 				"Failed to enable mailbox, statux %02X\n",
898 				status);
899 			return false;
900 		}
901 	}
902 	return true;
903 }
904 
905 /**
906  * myrb_get_hba_config - reads the configuration information
907  *
908  * Reads the configuration information from the controller and
909  * initializes the controller structure.
910  *
911  * Return: 0 on success, errno otherwise
912  */
913 static int myrb_get_hba_config(struct myrb_hba *cb)
914 {
915 	struct myrb_enquiry2 *enquiry2;
916 	dma_addr_t enquiry2_addr;
917 	struct myrb_config2 *config2;
918 	dma_addr_t config2_addr;
919 	struct Scsi_Host *shost = cb->host;
920 	struct pci_dev *pdev = cb->pdev;
921 	int pchan_max = 0, pchan_cur = 0;
922 	unsigned short status;
923 	int ret = -ENODEV, memsize = 0;
924 
925 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 				      &enquiry2_addr, GFP_KERNEL);
927 	if (!enquiry2) {
928 		shost_printk(KERN_ERR, cb->host,
929 			     "Failed to allocate V1 enquiry2 memory\n");
930 		return -ENOMEM;
931 	}
932 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 				     &config2_addr, GFP_KERNEL);
934 	if (!config2) {
935 		shost_printk(KERN_ERR, cb->host,
936 			     "Failed to allocate V1 config2 memory\n");
937 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 				  enquiry2, enquiry2_addr);
939 		return -ENOMEM;
940 	}
941 	mutex_lock(&cb->dma_mutex);
942 	status = myrb_hba_enquiry(cb);
943 	mutex_unlock(&cb->dma_mutex);
944 	if (status != MYRB_STATUS_SUCCESS) {
945 		shost_printk(KERN_WARNING, cb->host,
946 			     "Failed it issue V1 Enquiry\n");
947 		goto out_free;
948 	}
949 
950 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 	if (status != MYRB_STATUS_SUCCESS) {
952 		shost_printk(KERN_WARNING, cb->host,
953 			     "Failed to issue V1 Enquiry2\n");
954 		goto out_free;
955 	}
956 
957 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 	if (status != MYRB_STATUS_SUCCESS) {
959 		shost_printk(KERN_WARNING, cb->host,
960 			     "Failed to issue ReadConfig2\n");
961 		goto out_free;
962 	}
963 
964 	status = myrb_get_ldev_info(cb);
965 	if (status != MYRB_STATUS_SUCCESS) {
966 		shost_printk(KERN_WARNING, cb->host,
967 			     "Failed to get logical drive information\n");
968 		goto out_free;
969 	}
970 
971 	/*
972 	 * Initialize the Controller Model Name and Full Model Name fields.
973 	 */
974 	switch (enquiry2->hw.sub_model) {
975 	case DAC960_V1_P_PD_PU:
976 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 			strcpy(cb->model_name, "DAC960PU");
978 		else
979 			strcpy(cb->model_name, "DAC960PD");
980 		break;
981 	case DAC960_V1_PL:
982 		strcpy(cb->model_name, "DAC960PL");
983 		break;
984 	case DAC960_V1_PG:
985 		strcpy(cb->model_name, "DAC960PG");
986 		break;
987 	case DAC960_V1_PJ:
988 		strcpy(cb->model_name, "DAC960PJ");
989 		break;
990 	case DAC960_V1_PR:
991 		strcpy(cb->model_name, "DAC960PR");
992 		break;
993 	case DAC960_V1_PT:
994 		strcpy(cb->model_name, "DAC960PT");
995 		break;
996 	case DAC960_V1_PTL0:
997 		strcpy(cb->model_name, "DAC960PTL0");
998 		break;
999 	case DAC960_V1_PRL:
1000 		strcpy(cb->model_name, "DAC960PRL");
1001 		break;
1002 	case DAC960_V1_PTL1:
1003 		strcpy(cb->model_name, "DAC960PTL1");
1004 		break;
1005 	case DAC960_V1_1164P:
1006 		strcpy(cb->model_name, "eXtremeRAID 1100");
1007 		break;
1008 	default:
1009 		shost_printk(KERN_WARNING, cb->host,
1010 			     "Unknown Model %X\n",
1011 			     enquiry2->hw.sub_model);
1012 		goto out;
1013 	}
1014 	/*
1015 	 * Initialize the Controller Firmware Version field and verify that it
1016 	 * is a supported firmware version.
1017 	 * The supported firmware versions are:
1018 	 *
1019 	 * DAC1164P		    5.06 and above
1020 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1021 	 * DAC960PU/PD/PL	    3.51 and above
1022 	 * DAC960PU/PD/PL/P	    2.73 and above
1023 	 */
1024 #if defined(CONFIG_ALPHA)
1025 	/*
1026 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 	 * the last custom FW revision to be released by DEC for these older
1029 	 * controllers, appears to work quite well with this driver.
1030 	 *
1031 	 * Cards tested successfully were several versions each of the PD and
1032 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 	 * back of the board, of:
1035 	 *
1036 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1037 	 *         or D040349 (3-channel)
1038 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1039 	 *         or D040397 (3-channel)
1040 	 */
1041 # define FIRMWARE_27X	"2.70"
1042 #else
1043 # define FIRMWARE_27X	"2.73"
1044 #endif
1045 
1046 	if (enquiry2->fw.major_version == 0) {
1047 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 		enquiry2->fw.firmware_type = '0';
1050 		enquiry2->fw.turn_id = 0;
1051 	}
1052 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1053 		"%u.%02u-%c-%02u",
1054 		enquiry2->fw.major_version,
1055 		enquiry2->fw.minor_version,
1056 		enquiry2->fw.firmware_type,
1057 		enquiry2->fw.turn_id);
1058 	if (!((enquiry2->fw.major_version == 5 &&
1059 	       enquiry2->fw.minor_version >= 6) ||
1060 	      (enquiry2->fw.major_version == 4 &&
1061 	       enquiry2->fw.minor_version >= 6) ||
1062 	      (enquiry2->fw.major_version == 3 &&
1063 	       enquiry2->fw.minor_version >= 51) ||
1064 	      (enquiry2->fw.major_version == 2 &&
1065 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066 		shost_printk(KERN_WARNING, cb->host,
1067 			"Firmware Version '%s' unsupported\n",
1068 			cb->fw_version);
1069 		goto out;
1070 	}
1071 	/*
1072 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1073 	 * Enclosure Management Enabled fields.
1074 	 */
1075 	switch (enquiry2->hw.model) {
1076 	case MYRB_5_CHANNEL_BOARD:
1077 		pchan_max = 5;
1078 		break;
1079 	case MYRB_3_CHANNEL_BOARD:
1080 	case MYRB_3_CHANNEL_ASIC_DAC:
1081 		pchan_max = 3;
1082 		break;
1083 	case MYRB_2_CHANNEL_BOARD:
1084 		pchan_max = 2;
1085 		break;
1086 	default:
1087 		pchan_max = enquiry2->cfg_chan;
1088 		break;
1089 	}
1090 	pchan_cur = enquiry2->cur_chan;
1091 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1092 		cb->bus_width = 32;
1093 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1094 		cb->bus_width = 16;
1095 	else
1096 		cb->bus_width = 8;
1097 	cb->ldev_block_size = enquiry2->ldev_block_size;
1098 	shost->max_channel = pchan_cur;
1099 	shost->max_id = enquiry2->max_targets;
1100 	memsize = enquiry2->mem_size >> 20;
1101 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1102 	/*
1103 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1104 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1105 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1106 	 * The Driver Queue Depth must be at most one less than the
1107 	 * Controller Queue Depth to allow for an automatic drive
1108 	 * rebuild operation.
1109 	 */
1110 	shost->can_queue = cb->enquiry->max_tcq;
1111 	if (shost->can_queue < 3)
1112 		shost->can_queue = enquiry2->max_cmds;
1113 	if (shost->can_queue < 3)
1114 		/* Play safe and disable TCQ */
1115 		shost->can_queue = 1;
1116 
1117 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1118 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1119 	shost->max_sectors = enquiry2->max_sectors;
1120 	shost->sg_tablesize = enquiry2->max_sge;
1121 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1122 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1123 	/*
1124 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1125 	 */
1126 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1127 		>> (10 - MYRB_BLKSIZE_BITS);
1128 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129 		>> (10 - MYRB_BLKSIZE_BITS);
1130 	/* Assume 255/63 translation */
1131 	cb->ldev_geom_heads = 255;
1132 	cb->ldev_geom_sectors = 63;
1133 	if (config2->drive_geometry) {
1134 		cb->ldev_geom_heads = 128;
1135 		cb->ldev_geom_sectors = 32;
1136 	}
1137 
1138 	/*
1139 	 * Initialize the Background Initialization Status.
1140 	 */
1141 	if ((cb->fw_version[0] == '4' &&
1142 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1143 	    (cb->fw_version[0] == '5' &&
1144 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1145 		cb->bgi_status_supported = true;
1146 		myrb_bgi_control(cb);
1147 	}
1148 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1149 	ret = 0;
1150 
1151 out:
1152 	shost_printk(KERN_INFO, cb->host,
1153 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1154 	shost_printk(KERN_INFO, cb->host,
1155 		"  Firmware Version: %s, Memory Size: %dMB\n",
1156 		cb->fw_version, memsize);
1157 	if (cb->io_addr == 0)
1158 		shost_printk(KERN_INFO, cb->host,
1159 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1160 			(unsigned long)cb->pci_addr, cb->irq);
1161 	else
1162 		shost_printk(KERN_INFO, cb->host,
1163 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1164 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165 			cb->irq);
1166 	shost_printk(KERN_INFO, cb->host,
1167 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1168 		cb->host->can_queue, cb->host->max_sectors);
1169 	shost_printk(KERN_INFO, cb->host,
1170 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1171 		     cb->host->can_queue, cb->host->sg_tablesize,
1172 		     MYRB_SCATTER_GATHER_LIMIT);
1173 	shost_printk(KERN_INFO, cb->host,
1174 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1175 		     cb->stripe_size, cb->segment_size,
1176 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177 		     cb->safte_enabled ?
1178 		     "  SAF-TE Enclosure Management Enabled" : "");
1179 	shost_printk(KERN_INFO, cb->host,
1180 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1181 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 		     cb->host->max_id);
1183 
1184 	shost_printk(KERN_INFO, cb->host,
1185 		     "  Logical: 1/1 channels, %d/%d disks\n",
1186 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187 
1188 out_free:
1189 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1190 			  enquiry2, enquiry2_addr);
1191 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1192 			  config2, config2_addr);
1193 
1194 	return ret;
1195 }
1196 
1197 /**
1198  * myrb_unmap - unmaps controller structures
1199  */
1200 static void myrb_unmap(struct myrb_hba *cb)
1201 {
1202 	if (cb->ldev_info_buf) {
1203 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1204 			MYRB_MAX_LDEVS;
1205 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206 				  cb->ldev_info_buf, cb->ldev_info_addr);
1207 		cb->ldev_info_buf = NULL;
1208 	}
1209 	if (cb->err_table) {
1210 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1211 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1212 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1213 				  cb->err_table, cb->err_table_addr);
1214 		cb->err_table = NULL;
1215 	}
1216 	if (cb->enquiry) {
1217 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218 				  cb->enquiry, cb->enquiry_addr);
1219 		cb->enquiry = NULL;
1220 	}
1221 	if (cb->first_stat_mbox) {
1222 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1224 		cb->first_stat_mbox = NULL;
1225 	}
1226 	if (cb->first_cmd_mbox) {
1227 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229 		cb->first_cmd_mbox = NULL;
1230 	}
1231 }
1232 
1233 /**
1234  * myrb_cleanup - cleanup controller structures
1235  */
1236 static void myrb_cleanup(struct myrb_hba *cb)
1237 {
1238 	struct pci_dev *pdev = cb->pdev;
1239 
1240 	/* Free the memory mailbox, status, and related structures */
1241 	myrb_unmap(cb);
1242 
1243 	if (cb->mmio_base) {
1244 		cb->disable_intr(cb->io_base);
1245 		iounmap(cb->mmio_base);
1246 	}
1247 	if (cb->irq)
1248 		free_irq(cb->irq, cb);
1249 	if (cb->io_addr)
1250 		release_region(cb->io_addr, 0x80);
1251 	pci_set_drvdata(pdev, NULL);
1252 	pci_disable_device(pdev);
1253 	scsi_host_put(cb->host);
1254 }
1255 
1256 static int myrb_host_reset(struct scsi_cmnd *scmd)
1257 {
1258 	struct Scsi_Host *shost = scmd->device->host;
1259 	struct myrb_hba *cb = shost_priv(shost);
1260 
1261 	cb->reset(cb->io_base);
1262 	return SUCCESS;
1263 }
1264 
1265 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1266 		struct scsi_cmnd *scmd)
1267 {
1268 	struct myrb_hba *cb = shost_priv(shost);
1269 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 	struct myrb_dcdb *dcdb;
1272 	dma_addr_t dcdb_addr;
1273 	struct scsi_device *sdev = scmd->device;
1274 	struct scatterlist *sgl;
1275 	unsigned long flags;
1276 	int nsge;
1277 
1278 	myrb_reset_cmd(cmd_blk);
1279 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280 	if (!dcdb)
1281 		return SCSI_MLQUEUE_HOST_BUSY;
1282 	nsge = scsi_dma_map(scmd);
1283 	if (nsge > 1) {
1284 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 		scmd->result = (DID_ERROR << 16);
1286 		scmd->scsi_done(scmd);
1287 		return 0;
1288 	}
1289 
1290 	mbox->type3.opcode = MYRB_CMD_DCDB;
1291 	mbox->type3.id = scmd->request->tag + 3;
1292 	mbox->type3.addr = dcdb_addr;
1293 	dcdb->channel = sdev->channel;
1294 	dcdb->target = sdev->id;
1295 	switch (scmd->sc_data_direction) {
1296 	case DMA_NONE:
1297 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298 		break;
1299 	case DMA_TO_DEVICE:
1300 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301 		break;
1302 	case DMA_FROM_DEVICE:
1303 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304 		break;
1305 	default:
1306 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307 		break;
1308 	}
1309 	dcdb->early_status = false;
1310 	if (scmd->request->timeout <= 10)
1311 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 	else if (scmd->request->timeout <= 60)
1313 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 	else if (scmd->request->timeout <= 600)
1315 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316 	else
1317 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 	dcdb->no_autosense = false;
1319 	dcdb->allow_disconnect = true;
1320 	sgl = scsi_sglist(scmd);
1321 	dcdb->dma_addr = sg_dma_address(sgl);
1322 	if (sg_dma_len(sgl) > USHRT_MAX) {
1323 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325 	} else {
1326 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 		dcdb->xfer_len_hi4 = 0;
1328 	}
1329 	dcdb->cdb_len = scmd->cmd_len;
1330 	dcdb->sense_len = sizeof(dcdb->sense);
1331 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332 
1333 	spin_lock_irqsave(&cb->queue_lock, flags);
1334 	cb->qcmd(cb, cmd_blk);
1335 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1336 	return 0;
1337 }
1338 
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340 		struct scsi_cmnd *scmd)
1341 {
1342 	unsigned char inq[36] = {
1343 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 		0x20, 0x20, 0x20, 0x20,
1348 	};
1349 
1350 	if (cb->bus_width > 16)
1351 		inq[7] |= 1 << 6;
1352 	if (cb->bus_width > 8)
1353 		inq[7] |= 1 << 5;
1354 	memcpy(&inq[16], cb->model_name, 16);
1355 	memcpy(&inq[32], cb->fw_version, 1);
1356 	memcpy(&inq[33], &cb->fw_version[2], 2);
1357 	memcpy(&inq[35], &cb->fw_version[7], 1);
1358 
1359 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360 }
1361 
1362 static void
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 		struct myrb_ldev_info *ldev_info)
1365 {
1366 	unsigned char modes[32], *mode_pg;
1367 	bool dbd;
1368 	size_t mode_len;
1369 
1370 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371 	if (dbd) {
1372 		mode_len = 24;
1373 		mode_pg = &modes[4];
1374 	} else {
1375 		mode_len = 32;
1376 		mode_pg = &modes[12];
1377 	}
1378 	memset(modes, 0, sizeof(modes));
1379 	modes[0] = mode_len - 1;
1380 	if (!dbd) {
1381 		unsigned char *block_desc = &modes[4];
1382 
1383 		modes[3] = 8;
1384 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386 	}
1387 	mode_pg[0] = 0x08;
1388 	mode_pg[1] = 0x12;
1389 	if (ldev_info->wb_enabled)
1390 		mode_pg[2] |= 0x04;
1391 	if (cb->segment_size) {
1392 		mode_pg[2] |= 0x08;
1393 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394 	}
1395 
1396 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397 }
1398 
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400 		struct scsi_cmnd *scmd)
1401 {
1402 	scsi_build_sense_buffer(0, scmd->sense_buffer,
1403 				NO_SENSE, 0, 0);
1404 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1405 				 SCSI_SENSE_BUFFERSIZE);
1406 }
1407 
1408 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1409 		struct myrb_ldev_info *ldev_info)
1410 {
1411 	unsigned char data[8];
1412 
1413 	dev_dbg(&scmd->device->sdev_gendev,
1414 		"Capacity %u, blocksize %u\n",
1415 		ldev_info->size, cb->ldev_block_size);
1416 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1417 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1418 	scsi_sg_copy_from_buffer(scmd, data, 8);
1419 }
1420 
1421 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1422 		struct scsi_cmnd *scmd)
1423 {
1424 	struct myrb_hba *cb = shost_priv(shost);
1425 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1426 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1427 	struct myrb_ldev_info *ldev_info;
1428 	struct scsi_device *sdev = scmd->device;
1429 	struct scatterlist *sgl;
1430 	unsigned long flags;
1431 	u64 lba;
1432 	u32 block_cnt;
1433 	int nsge;
1434 
1435 	ldev_info = sdev->hostdata;
1436 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1437 	    ldev_info->state != MYRB_DEVICE_WO) {
1438 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1439 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1440 		scmd->result = (DID_BAD_TARGET << 16);
1441 		scmd->scsi_done(scmd);
1442 		return 0;
1443 	}
1444 	switch (scmd->cmnd[0]) {
1445 	case TEST_UNIT_READY:
1446 		scmd->result = (DID_OK << 16);
1447 		scmd->scsi_done(scmd);
1448 		return 0;
1449 	case INQUIRY:
1450 		if (scmd->cmnd[1] & 1) {
1451 			/* Illegal request, invalid field in CDB */
1452 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1453 						ILLEGAL_REQUEST, 0x24, 0);
1454 			scmd->result = (DRIVER_SENSE << 24) |
1455 				SAM_STAT_CHECK_CONDITION;
1456 		} else {
1457 			myrb_inquiry(cb, scmd);
1458 			scmd->result = (DID_OK << 16);
1459 		}
1460 		scmd->scsi_done(scmd);
1461 		return 0;
1462 	case SYNCHRONIZE_CACHE:
1463 		scmd->result = (DID_OK << 16);
1464 		scmd->scsi_done(scmd);
1465 		return 0;
1466 	case MODE_SENSE:
1467 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1468 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1469 			/* Illegal request, invalid field in CDB */
1470 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1471 						ILLEGAL_REQUEST, 0x24, 0);
1472 			scmd->result = (DRIVER_SENSE << 24) |
1473 				SAM_STAT_CHECK_CONDITION;
1474 		} else {
1475 			myrb_mode_sense(cb, scmd, ldev_info);
1476 			scmd->result = (DID_OK << 16);
1477 		}
1478 		scmd->scsi_done(scmd);
1479 		return 0;
1480 	case READ_CAPACITY:
1481 		if ((scmd->cmnd[1] & 1) ||
1482 		    (scmd->cmnd[8] & 1)) {
1483 			/* Illegal request, invalid field in CDB */
1484 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1485 						ILLEGAL_REQUEST, 0x24, 0);
1486 			scmd->result = (DRIVER_SENSE << 24) |
1487 				SAM_STAT_CHECK_CONDITION;
1488 			scmd->scsi_done(scmd);
1489 			return 0;
1490 		}
1491 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1492 		if (lba) {
1493 			/* Illegal request, invalid field in CDB */
1494 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1495 						ILLEGAL_REQUEST, 0x24, 0);
1496 			scmd->result = (DRIVER_SENSE << 24) |
1497 				SAM_STAT_CHECK_CONDITION;
1498 			scmd->scsi_done(scmd);
1499 			return 0;
1500 		}
1501 		myrb_read_capacity(cb, scmd, ldev_info);
1502 		scmd->scsi_done(scmd);
1503 		return 0;
1504 	case REQUEST_SENSE:
1505 		myrb_request_sense(cb, scmd);
1506 		scmd->result = (DID_OK << 16);
1507 		return 0;
1508 	case SEND_DIAGNOSTIC:
1509 		if (scmd->cmnd[1] != 0x04) {
1510 			/* Illegal request, invalid field in CDB */
1511 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1512 						ILLEGAL_REQUEST, 0x24, 0);
1513 			scmd->result = (DRIVER_SENSE << 24) |
1514 				SAM_STAT_CHECK_CONDITION;
1515 		} else {
1516 			/* Assume good status */
1517 			scmd->result = (DID_OK << 16);
1518 		}
1519 		scmd->scsi_done(scmd);
1520 		return 0;
1521 	case READ_6:
1522 		if (ldev_info->state == MYRB_DEVICE_WO) {
1523 			/* Data protect, attempt to read invalid data */
1524 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1525 						DATA_PROTECT, 0x21, 0x06);
1526 			scmd->result = (DRIVER_SENSE << 24) |
1527 				SAM_STAT_CHECK_CONDITION;
1528 			scmd->scsi_done(scmd);
1529 			return 0;
1530 		}
1531 		fallthrough;
1532 	case WRITE_6:
1533 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1534 		       (scmd->cmnd[2] << 8) |
1535 		       scmd->cmnd[3]);
1536 		block_cnt = scmd->cmnd[4];
1537 		break;
1538 	case READ_10:
1539 		if (ldev_info->state == MYRB_DEVICE_WO) {
1540 			/* Data protect, attempt to read invalid data */
1541 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1542 						DATA_PROTECT, 0x21, 0x06);
1543 			scmd->result = (DRIVER_SENSE << 24) |
1544 				SAM_STAT_CHECK_CONDITION;
1545 			scmd->scsi_done(scmd);
1546 			return 0;
1547 		}
1548 		fallthrough;
1549 	case WRITE_10:
1550 	case VERIFY:		/* 0x2F */
1551 	case WRITE_VERIFY:	/* 0x2E */
1552 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1553 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1554 		break;
1555 	case READ_12:
1556 		if (ldev_info->state == MYRB_DEVICE_WO) {
1557 			/* Data protect, attempt to read invalid data */
1558 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1559 						DATA_PROTECT, 0x21, 0x06);
1560 			scmd->result = (DRIVER_SENSE << 24) |
1561 				SAM_STAT_CHECK_CONDITION;
1562 			scmd->scsi_done(scmd);
1563 			return 0;
1564 		}
1565 		fallthrough;
1566 	case WRITE_12:
1567 	case VERIFY_12: /* 0xAF */
1568 	case WRITE_VERIFY_12:	/* 0xAE */
1569 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1570 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1571 		break;
1572 	default:
1573 		/* Illegal request, invalid opcode */
1574 		scsi_build_sense_buffer(0, scmd->sense_buffer,
1575 					ILLEGAL_REQUEST, 0x20, 0);
1576 		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1577 		scmd->scsi_done(scmd);
1578 		return 0;
1579 	}
1580 
1581 	myrb_reset_cmd(cmd_blk);
1582 	mbox->type5.id = scmd->request->tag + 3;
1583 	if (scmd->sc_data_direction == DMA_NONE)
1584 		goto submit;
1585 	nsge = scsi_dma_map(scmd);
1586 	if (nsge == 1) {
1587 		sgl = scsi_sglist(scmd);
1588 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1589 			mbox->type5.opcode = MYRB_CMD_READ;
1590 		else
1591 			mbox->type5.opcode = MYRB_CMD_WRITE;
1592 
1593 		mbox->type5.ld.xfer_len = block_cnt;
1594 		mbox->type5.ld.ldev_num = sdev->id;
1595 		mbox->type5.lba = lba;
1596 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1597 	} else {
1598 		struct myrb_sge *hw_sgl;
1599 		dma_addr_t hw_sgl_addr;
1600 		int i;
1601 
1602 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1603 		if (!hw_sgl)
1604 			return SCSI_MLQUEUE_HOST_BUSY;
1605 
1606 		cmd_blk->sgl = hw_sgl;
1607 		cmd_blk->sgl_addr = hw_sgl_addr;
1608 
1609 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1610 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1611 		else
1612 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1613 
1614 		mbox->type5.ld.xfer_len = block_cnt;
1615 		mbox->type5.ld.ldev_num = sdev->id;
1616 		mbox->type5.lba = lba;
1617 		mbox->type5.addr = hw_sgl_addr;
1618 		mbox->type5.sg_count = nsge;
1619 
1620 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1621 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1622 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1623 			hw_sgl++;
1624 		}
1625 	}
1626 submit:
1627 	spin_lock_irqsave(&cb->queue_lock, flags);
1628 	cb->qcmd(cb, cmd_blk);
1629 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1630 
1631 	return 0;
1632 }
1633 
1634 static int myrb_queuecommand(struct Scsi_Host *shost,
1635 		struct scsi_cmnd *scmd)
1636 {
1637 	struct scsi_device *sdev = scmd->device;
1638 
1639 	if (sdev->channel > myrb_logical_channel(shost)) {
1640 		scmd->result = (DID_BAD_TARGET << 16);
1641 		scmd->scsi_done(scmd);
1642 		return 0;
1643 	}
1644 	if (sdev->channel == myrb_logical_channel(shost))
1645 		return myrb_ldev_queuecommand(shost, scmd);
1646 
1647 	return myrb_pthru_queuecommand(shost, scmd);
1648 }
1649 
1650 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1651 {
1652 	struct myrb_hba *cb = shost_priv(sdev->host);
1653 	struct myrb_ldev_info *ldev_info;
1654 	unsigned short ldev_num = sdev->id;
1655 	enum raid_level level;
1656 
1657 	ldev_info = cb->ldev_info_buf + ldev_num;
1658 	if (!ldev_info)
1659 		return -ENXIO;
1660 
1661 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1662 	if (!sdev->hostdata)
1663 		return -ENOMEM;
1664 	dev_dbg(&sdev->sdev_gendev,
1665 		"slave alloc ldev %d state %x\n",
1666 		ldev_num, ldev_info->state);
1667 	memcpy(sdev->hostdata, ldev_info,
1668 	       sizeof(*ldev_info));
1669 	switch (ldev_info->raid_level) {
1670 	case MYRB_RAID_LEVEL0:
1671 		level = RAID_LEVEL_LINEAR;
1672 		break;
1673 	case MYRB_RAID_LEVEL1:
1674 		level = RAID_LEVEL_1;
1675 		break;
1676 	case MYRB_RAID_LEVEL3:
1677 		level = RAID_LEVEL_3;
1678 		break;
1679 	case MYRB_RAID_LEVEL5:
1680 		level = RAID_LEVEL_5;
1681 		break;
1682 	case MYRB_RAID_LEVEL6:
1683 		level = RAID_LEVEL_6;
1684 		break;
1685 	case MYRB_RAID_JBOD:
1686 		level = RAID_LEVEL_JBOD;
1687 		break;
1688 	default:
1689 		level = RAID_LEVEL_UNKNOWN;
1690 		break;
1691 	}
1692 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1693 	return 0;
1694 }
1695 
1696 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1697 {
1698 	struct myrb_hba *cb = shost_priv(sdev->host);
1699 	struct myrb_pdev_state *pdev_info;
1700 	unsigned short status;
1701 
1702 	if (sdev->id > MYRB_MAX_TARGETS)
1703 		return -ENXIO;
1704 
1705 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1706 	if (!pdev_info)
1707 		return -ENOMEM;
1708 
1709 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1710 				  sdev, pdev_info);
1711 	if (status != MYRB_STATUS_SUCCESS) {
1712 		dev_dbg(&sdev->sdev_gendev,
1713 			"Failed to get device state, status %x\n",
1714 			status);
1715 		kfree(pdev_info);
1716 		return -ENXIO;
1717 	}
1718 	if (!pdev_info->present) {
1719 		dev_dbg(&sdev->sdev_gendev,
1720 			"device not present, skip\n");
1721 		kfree(pdev_info);
1722 		return -ENXIO;
1723 	}
1724 	dev_dbg(&sdev->sdev_gendev,
1725 		"slave alloc pdev %d:%d state %x\n",
1726 		sdev->channel, sdev->id, pdev_info->state);
1727 	sdev->hostdata = pdev_info;
1728 
1729 	return 0;
1730 }
1731 
1732 static int myrb_slave_alloc(struct scsi_device *sdev)
1733 {
1734 	if (sdev->channel > myrb_logical_channel(sdev->host))
1735 		return -ENXIO;
1736 
1737 	if (sdev->lun > 0)
1738 		return -ENXIO;
1739 
1740 	if (sdev->channel == myrb_logical_channel(sdev->host))
1741 		return myrb_ldev_slave_alloc(sdev);
1742 
1743 	return myrb_pdev_slave_alloc(sdev);
1744 }
1745 
1746 static int myrb_slave_configure(struct scsi_device *sdev)
1747 {
1748 	struct myrb_ldev_info *ldev_info;
1749 
1750 	if (sdev->channel > myrb_logical_channel(sdev->host))
1751 		return -ENXIO;
1752 
1753 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1754 		sdev->no_uld_attach = 1;
1755 		return 0;
1756 	}
1757 	if (sdev->lun != 0)
1758 		return -ENXIO;
1759 
1760 	ldev_info = sdev->hostdata;
1761 	if (!ldev_info)
1762 		return -ENXIO;
1763 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1764 		sdev_printk(KERN_INFO, sdev,
1765 			    "Logical drive is %s\n",
1766 			    myrb_devstate_name(ldev_info->state));
1767 
1768 	sdev->tagged_supported = 1;
1769 	return 0;
1770 }
1771 
1772 static void myrb_slave_destroy(struct scsi_device *sdev)
1773 {
1774 	kfree(sdev->hostdata);
1775 }
1776 
1777 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1778 		sector_t capacity, int geom[])
1779 {
1780 	struct myrb_hba *cb = shost_priv(sdev->host);
1781 
1782 	geom[0] = cb->ldev_geom_heads;
1783 	geom[1] = cb->ldev_geom_sectors;
1784 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1785 
1786 	return 0;
1787 }
1788 
1789 static ssize_t raid_state_show(struct device *dev,
1790 		struct device_attribute *attr, char *buf)
1791 {
1792 	struct scsi_device *sdev = to_scsi_device(dev);
1793 	struct myrb_hba *cb = shost_priv(sdev->host);
1794 	int ret;
1795 
1796 	if (!sdev->hostdata)
1797 		return snprintf(buf, 16, "Unknown\n");
1798 
1799 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1800 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1801 		const char *name;
1802 
1803 		name = myrb_devstate_name(ldev_info->state);
1804 		if (name)
1805 			ret = snprintf(buf, 32, "%s\n", name);
1806 		else
1807 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1808 				       ldev_info->state);
1809 	} else {
1810 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1811 		unsigned short status;
1812 		const char *name;
1813 
1814 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1815 					  sdev, pdev_info);
1816 		if (status != MYRB_STATUS_SUCCESS)
1817 			sdev_printk(KERN_INFO, sdev,
1818 				    "Failed to get device state, status %x\n",
1819 				    status);
1820 
1821 		if (!pdev_info->present)
1822 			name = "Removed";
1823 		else
1824 			name = myrb_devstate_name(pdev_info->state);
1825 		if (name)
1826 			ret = snprintf(buf, 32, "%s\n", name);
1827 		else
1828 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1829 				       pdev_info->state);
1830 	}
1831 	return ret;
1832 }
1833 
1834 static ssize_t raid_state_store(struct device *dev,
1835 		struct device_attribute *attr, const char *buf, size_t count)
1836 {
1837 	struct scsi_device *sdev = to_scsi_device(dev);
1838 	struct myrb_hba *cb = shost_priv(sdev->host);
1839 	struct myrb_pdev_state *pdev_info;
1840 	enum myrb_devstate new_state;
1841 	unsigned short status;
1842 
1843 	if (!strncmp(buf, "kill", 4) ||
1844 	    !strncmp(buf, "offline", 7))
1845 		new_state = MYRB_DEVICE_DEAD;
1846 	else if (!strncmp(buf, "online", 6))
1847 		new_state = MYRB_DEVICE_ONLINE;
1848 	else if (!strncmp(buf, "standby", 7))
1849 		new_state = MYRB_DEVICE_STANDBY;
1850 	else
1851 		return -EINVAL;
1852 
1853 	pdev_info = sdev->hostdata;
1854 	if (!pdev_info) {
1855 		sdev_printk(KERN_INFO, sdev,
1856 			    "Failed - no physical device information\n");
1857 		return -ENXIO;
1858 	}
1859 	if (!pdev_info->present) {
1860 		sdev_printk(KERN_INFO, sdev,
1861 			    "Failed - device not present\n");
1862 		return -ENXIO;
1863 	}
1864 
1865 	if (pdev_info->state == new_state)
1866 		return count;
1867 
1868 	status = myrb_set_pdev_state(cb, sdev, new_state);
1869 	switch (status) {
1870 	case MYRB_STATUS_SUCCESS:
1871 		break;
1872 	case MYRB_STATUS_START_DEVICE_FAILED:
1873 		sdev_printk(KERN_INFO, sdev,
1874 			     "Failed - Unable to Start Device\n");
1875 		count = -EAGAIN;
1876 		break;
1877 	case MYRB_STATUS_NO_DEVICE:
1878 		sdev_printk(KERN_INFO, sdev,
1879 			    "Failed - No Device at Address\n");
1880 		count = -ENODEV;
1881 		break;
1882 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1883 		sdev_printk(KERN_INFO, sdev,
1884 			 "Failed - Invalid Channel or Target or Modifier\n");
1885 		count = -EINVAL;
1886 		break;
1887 	case MYRB_STATUS_CHANNEL_BUSY:
1888 		sdev_printk(KERN_INFO, sdev,
1889 			 "Failed - Channel Busy\n");
1890 		count = -EBUSY;
1891 		break;
1892 	default:
1893 		sdev_printk(KERN_INFO, sdev,
1894 			 "Failed - Unexpected Status %04X\n", status);
1895 		count = -EIO;
1896 		break;
1897 	}
1898 	return count;
1899 }
1900 static DEVICE_ATTR_RW(raid_state);
1901 
1902 static ssize_t raid_level_show(struct device *dev,
1903 		struct device_attribute *attr, char *buf)
1904 {
1905 	struct scsi_device *sdev = to_scsi_device(dev);
1906 
1907 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1908 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1909 		const char *name;
1910 
1911 		if (!ldev_info)
1912 			return -ENXIO;
1913 
1914 		name = myrb_raidlevel_name(ldev_info->raid_level);
1915 		if (!name)
1916 			return snprintf(buf, 32, "Invalid (%02X)\n",
1917 					ldev_info->state);
1918 		return snprintf(buf, 32, "%s\n", name);
1919 	}
1920 	return snprintf(buf, 32, "Physical Drive\n");
1921 }
1922 static DEVICE_ATTR_RO(raid_level);
1923 
1924 static ssize_t rebuild_show(struct device *dev,
1925 		struct device_attribute *attr, char *buf)
1926 {
1927 	struct scsi_device *sdev = to_scsi_device(dev);
1928 	struct myrb_hba *cb = shost_priv(sdev->host);
1929 	struct myrb_rbld_progress rbld_buf;
1930 	unsigned char status;
1931 
1932 	if (sdev->channel < myrb_logical_channel(sdev->host))
1933 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1934 
1935 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1936 
1937 	if (rbld_buf.ldev_num != sdev->id ||
1938 	    status != MYRB_STATUS_SUCCESS)
1939 		return snprintf(buf, 32, "not rebuilding\n");
1940 
1941 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1942 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1943 			rbld_buf.ldev_size);
1944 }
1945 
1946 static ssize_t rebuild_store(struct device *dev,
1947 		struct device_attribute *attr, const char *buf, size_t count)
1948 {
1949 	struct scsi_device *sdev = to_scsi_device(dev);
1950 	struct myrb_hba *cb = shost_priv(sdev->host);
1951 	struct myrb_cmdblk *cmd_blk;
1952 	union myrb_cmd_mbox *mbox;
1953 	unsigned short status;
1954 	int rc, start;
1955 	const char *msg;
1956 
1957 	rc = kstrtoint(buf, 0, &start);
1958 	if (rc)
1959 		return rc;
1960 
1961 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1962 		return -ENXIO;
1963 
1964 	status = myrb_get_rbld_progress(cb, NULL);
1965 	if (start) {
1966 		if (status == MYRB_STATUS_SUCCESS) {
1967 			sdev_printk(KERN_INFO, sdev,
1968 				    "Rebuild Not Initiated; already in progress\n");
1969 			return -EALREADY;
1970 		}
1971 		mutex_lock(&cb->dcmd_mutex);
1972 		cmd_blk = &cb->dcmd_blk;
1973 		myrb_reset_cmd(cmd_blk);
1974 		mbox = &cmd_blk->mbox;
1975 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1976 		mbox->type3D.id = MYRB_DCMD_TAG;
1977 		mbox->type3D.channel = sdev->channel;
1978 		mbox->type3D.target = sdev->id;
1979 		status = myrb_exec_cmd(cb, cmd_blk);
1980 		mutex_unlock(&cb->dcmd_mutex);
1981 	} else {
1982 		struct pci_dev *pdev = cb->pdev;
1983 		unsigned char *rate;
1984 		dma_addr_t rate_addr;
1985 
1986 		if (status != MYRB_STATUS_SUCCESS) {
1987 			sdev_printk(KERN_INFO, sdev,
1988 				    "Rebuild Not Cancelled; not in progress\n");
1989 			return 0;
1990 		}
1991 
1992 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1993 					  &rate_addr, GFP_KERNEL);
1994 		if (rate == NULL) {
1995 			sdev_printk(KERN_INFO, sdev,
1996 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1997 			return -ENOMEM;
1998 		}
1999 		mutex_lock(&cb->dcmd_mutex);
2000 		cmd_blk = &cb->dcmd_blk;
2001 		myrb_reset_cmd(cmd_blk);
2002 		mbox = &cmd_blk->mbox;
2003 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2004 		mbox->type3R.id = MYRB_DCMD_TAG;
2005 		mbox->type3R.rbld_rate = 0xFF;
2006 		mbox->type3R.addr = rate_addr;
2007 		status = myrb_exec_cmd(cb, cmd_blk);
2008 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2009 		mutex_unlock(&cb->dcmd_mutex);
2010 	}
2011 	if (status == MYRB_STATUS_SUCCESS) {
2012 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2013 			    start ? "Initiated" : "Cancelled");
2014 		return count;
2015 	}
2016 	if (!start) {
2017 		sdev_printk(KERN_INFO, sdev,
2018 			    "Rebuild Not Cancelled, status 0x%x\n",
2019 			    status);
2020 		return -EIO;
2021 	}
2022 
2023 	switch (status) {
2024 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2025 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
2026 		break;
2027 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2028 		msg = "New Disk Failed During Rebuild";
2029 		break;
2030 	case MYRB_STATUS_INVALID_ADDRESS:
2031 		msg = "Invalid Device Address";
2032 		break;
2033 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2034 		msg = "Already in Progress";
2035 		break;
2036 	default:
2037 		msg = NULL;
2038 		break;
2039 	}
2040 	if (msg)
2041 		sdev_printk(KERN_INFO, sdev,
2042 			    "Rebuild Failed - %s\n", msg);
2043 	else
2044 		sdev_printk(KERN_INFO, sdev,
2045 			    "Rebuild Failed, status 0x%x\n", status);
2046 
2047 	return -EIO;
2048 }
2049 static DEVICE_ATTR_RW(rebuild);
2050 
2051 static ssize_t consistency_check_store(struct device *dev,
2052 		struct device_attribute *attr, const char *buf, size_t count)
2053 {
2054 	struct scsi_device *sdev = to_scsi_device(dev);
2055 	struct myrb_hba *cb = shost_priv(sdev->host);
2056 	struct myrb_rbld_progress rbld_buf;
2057 	struct myrb_cmdblk *cmd_blk;
2058 	union myrb_cmd_mbox *mbox;
2059 	unsigned short ldev_num = 0xFFFF;
2060 	unsigned short status;
2061 	int rc, start;
2062 	const char *msg;
2063 
2064 	rc = kstrtoint(buf, 0, &start);
2065 	if (rc)
2066 		return rc;
2067 
2068 	if (sdev->channel < myrb_logical_channel(sdev->host))
2069 		return -ENXIO;
2070 
2071 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2072 	if (start) {
2073 		if (status == MYRB_STATUS_SUCCESS) {
2074 			sdev_printk(KERN_INFO, sdev,
2075 				    "Check Consistency Not Initiated; already in progress\n");
2076 			return -EALREADY;
2077 		}
2078 		mutex_lock(&cb->dcmd_mutex);
2079 		cmd_blk = &cb->dcmd_blk;
2080 		myrb_reset_cmd(cmd_blk);
2081 		mbox = &cmd_blk->mbox;
2082 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2083 		mbox->type3C.id = MYRB_DCMD_TAG;
2084 		mbox->type3C.ldev_num = sdev->id;
2085 		mbox->type3C.auto_restore = true;
2086 
2087 		status = myrb_exec_cmd(cb, cmd_blk);
2088 		mutex_unlock(&cb->dcmd_mutex);
2089 	} else {
2090 		struct pci_dev *pdev = cb->pdev;
2091 		unsigned char *rate;
2092 		dma_addr_t rate_addr;
2093 
2094 		if (ldev_num != sdev->id) {
2095 			sdev_printk(KERN_INFO, sdev,
2096 				    "Check Consistency Not Cancelled; not in progress\n");
2097 			return 0;
2098 		}
2099 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2100 					  &rate_addr, GFP_KERNEL);
2101 		if (rate == NULL) {
2102 			sdev_printk(KERN_INFO, sdev,
2103 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2104 			return -ENOMEM;
2105 		}
2106 		mutex_lock(&cb->dcmd_mutex);
2107 		cmd_blk = &cb->dcmd_blk;
2108 		myrb_reset_cmd(cmd_blk);
2109 		mbox = &cmd_blk->mbox;
2110 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2111 		mbox->type3R.id = MYRB_DCMD_TAG;
2112 		mbox->type3R.rbld_rate = 0xFF;
2113 		mbox->type3R.addr = rate_addr;
2114 		status = myrb_exec_cmd(cb, cmd_blk);
2115 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2116 		mutex_unlock(&cb->dcmd_mutex);
2117 	}
2118 	if (status == MYRB_STATUS_SUCCESS) {
2119 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2120 			    start ? "Initiated" : "Cancelled");
2121 		return count;
2122 	}
2123 	if (!start) {
2124 		sdev_printk(KERN_INFO, sdev,
2125 			    "Check Consistency Not Cancelled, status 0x%x\n",
2126 			    status);
2127 		return -EIO;
2128 	}
2129 
2130 	switch (status) {
2131 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2132 		msg = "Dependent Physical Device is DEAD";
2133 		break;
2134 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2135 		msg = "New Disk Failed During Rebuild";
2136 		break;
2137 	case MYRB_STATUS_INVALID_ADDRESS:
2138 		msg = "Invalid or Nonredundant Logical Drive";
2139 		break;
2140 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2141 		msg = "Already in Progress";
2142 		break;
2143 	default:
2144 		msg = NULL;
2145 		break;
2146 	}
2147 	if (msg)
2148 		sdev_printk(KERN_INFO, sdev,
2149 			    "Check Consistency Failed - %s\n", msg);
2150 	else
2151 		sdev_printk(KERN_INFO, sdev,
2152 			    "Check Consistency Failed, status 0x%x\n", status);
2153 
2154 	return -EIO;
2155 }
2156 
2157 static ssize_t consistency_check_show(struct device *dev,
2158 		struct device_attribute *attr, char *buf)
2159 {
2160 	return rebuild_show(dev, attr, buf);
2161 }
2162 static DEVICE_ATTR_RW(consistency_check);
2163 
2164 static ssize_t ctlr_num_show(struct device *dev,
2165 		struct device_attribute *attr, char *buf)
2166 {
2167 	struct Scsi_Host *shost = class_to_shost(dev);
2168 	struct myrb_hba *cb = shost_priv(shost);
2169 
2170 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2171 }
2172 static DEVICE_ATTR_RO(ctlr_num);
2173 
2174 static ssize_t firmware_show(struct device *dev,
2175 		struct device_attribute *attr, char *buf)
2176 {
2177 	struct Scsi_Host *shost = class_to_shost(dev);
2178 	struct myrb_hba *cb = shost_priv(shost);
2179 
2180 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2181 }
2182 static DEVICE_ATTR_RO(firmware);
2183 
2184 static ssize_t model_show(struct device *dev,
2185 		struct device_attribute *attr, char *buf)
2186 {
2187 	struct Scsi_Host *shost = class_to_shost(dev);
2188 	struct myrb_hba *cb = shost_priv(shost);
2189 
2190 	return snprintf(buf, 16, "%s\n", cb->model_name);
2191 }
2192 static DEVICE_ATTR_RO(model);
2193 
2194 static ssize_t flush_cache_store(struct device *dev,
2195 		struct device_attribute *attr, const char *buf, size_t count)
2196 {
2197 	struct Scsi_Host *shost = class_to_shost(dev);
2198 	struct myrb_hba *cb = shost_priv(shost);
2199 	unsigned short status;
2200 
2201 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2202 	if (status == MYRB_STATUS_SUCCESS) {
2203 		shost_printk(KERN_INFO, shost,
2204 			     "Cache Flush Completed\n");
2205 		return count;
2206 	}
2207 	shost_printk(KERN_INFO, shost,
2208 		     "Cache Flush Failed, status %x\n", status);
2209 	return -EIO;
2210 }
2211 static DEVICE_ATTR_WO(flush_cache);
2212 
2213 static struct device_attribute *myrb_sdev_attrs[] = {
2214 	&dev_attr_rebuild,
2215 	&dev_attr_consistency_check,
2216 	&dev_attr_raid_state,
2217 	&dev_attr_raid_level,
2218 	NULL,
2219 };
2220 
2221 static struct device_attribute *myrb_shost_attrs[] = {
2222 	&dev_attr_ctlr_num,
2223 	&dev_attr_model,
2224 	&dev_attr_firmware,
2225 	&dev_attr_flush_cache,
2226 	NULL,
2227 };
2228 
2229 static struct scsi_host_template myrb_template = {
2230 	.module			= THIS_MODULE,
2231 	.name			= "DAC960",
2232 	.proc_name		= "myrb",
2233 	.queuecommand		= myrb_queuecommand,
2234 	.eh_host_reset_handler	= myrb_host_reset,
2235 	.slave_alloc		= myrb_slave_alloc,
2236 	.slave_configure	= myrb_slave_configure,
2237 	.slave_destroy		= myrb_slave_destroy,
2238 	.bios_param		= myrb_biosparam,
2239 	.cmd_size		= sizeof(struct myrb_cmdblk),
2240 	.shost_attrs		= myrb_shost_attrs,
2241 	.sdev_attrs		= myrb_sdev_attrs,
2242 	.this_id		= -1,
2243 };
2244 
2245 /**
2246  * myrb_is_raid - return boolean indicating device is raid volume
2247  * @dev the device struct object
2248  */
2249 static int myrb_is_raid(struct device *dev)
2250 {
2251 	struct scsi_device *sdev = to_scsi_device(dev);
2252 
2253 	return sdev->channel == myrb_logical_channel(sdev->host);
2254 }
2255 
2256 /**
2257  * myrb_get_resync - get raid volume resync percent complete
2258  * @dev the device struct object
2259  */
2260 static void myrb_get_resync(struct device *dev)
2261 {
2262 	struct scsi_device *sdev = to_scsi_device(dev);
2263 	struct myrb_hba *cb = shost_priv(sdev->host);
2264 	struct myrb_rbld_progress rbld_buf;
2265 	unsigned int percent_complete = 0;
2266 	unsigned short status;
2267 	unsigned int ldev_size = 0, remaining = 0;
2268 
2269 	if (sdev->channel < myrb_logical_channel(sdev->host))
2270 		return;
2271 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2272 	if (status == MYRB_STATUS_SUCCESS) {
2273 		if (rbld_buf.ldev_num == sdev->id) {
2274 			ldev_size = rbld_buf.ldev_size;
2275 			remaining = rbld_buf.blocks_left;
2276 		}
2277 	}
2278 	if (remaining && ldev_size)
2279 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2280 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2281 }
2282 
2283 /**
2284  * myrb_get_state - get raid volume status
2285  * @dev the device struct object
2286  */
2287 static void myrb_get_state(struct device *dev)
2288 {
2289 	struct scsi_device *sdev = to_scsi_device(dev);
2290 	struct myrb_hba *cb = shost_priv(sdev->host);
2291 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2292 	enum raid_state state = RAID_STATE_UNKNOWN;
2293 	unsigned short status;
2294 
2295 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2296 		state = RAID_STATE_UNKNOWN;
2297 	else {
2298 		status = myrb_get_rbld_progress(cb, NULL);
2299 		if (status == MYRB_STATUS_SUCCESS)
2300 			state = RAID_STATE_RESYNCING;
2301 		else {
2302 			switch (ldev_info->state) {
2303 			case MYRB_DEVICE_ONLINE:
2304 				state = RAID_STATE_ACTIVE;
2305 				break;
2306 			case MYRB_DEVICE_WO:
2307 			case MYRB_DEVICE_CRITICAL:
2308 				state = RAID_STATE_DEGRADED;
2309 				break;
2310 			default:
2311 				state = RAID_STATE_OFFLINE;
2312 			}
2313 		}
2314 	}
2315 	raid_set_state(myrb_raid_template, dev, state);
2316 }
2317 
2318 static struct raid_function_template myrb_raid_functions = {
2319 	.cookie		= &myrb_template,
2320 	.is_raid	= myrb_is_raid,
2321 	.get_resync	= myrb_get_resync,
2322 	.get_state	= myrb_get_state,
2323 };
2324 
2325 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2326 		struct scsi_cmnd *scmd)
2327 {
2328 	unsigned short status;
2329 
2330 	if (!cmd_blk)
2331 		return;
2332 
2333 	scsi_dma_unmap(scmd);
2334 
2335 	if (cmd_blk->dcdb) {
2336 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2337 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2338 			      cmd_blk->dcdb_addr);
2339 		cmd_blk->dcdb = NULL;
2340 	}
2341 	if (cmd_blk->sgl) {
2342 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2343 		cmd_blk->sgl = NULL;
2344 		cmd_blk->sgl_addr = 0;
2345 	}
2346 	status = cmd_blk->status;
2347 	switch (status) {
2348 	case MYRB_STATUS_SUCCESS:
2349 	case MYRB_STATUS_DEVICE_BUSY:
2350 		scmd->result = (DID_OK << 16) | status;
2351 		break;
2352 	case MYRB_STATUS_BAD_DATA:
2353 		dev_dbg(&scmd->device->sdev_gendev,
2354 			"Bad Data Encountered\n");
2355 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2356 			/* Unrecovered read error */
2357 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2358 						MEDIUM_ERROR, 0x11, 0);
2359 		else
2360 			/* Write error */
2361 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2362 						MEDIUM_ERROR, 0x0C, 0);
2363 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2364 		break;
2365 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2366 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2367 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2368 			/* Unrecovered read error, auto-reallocation failed */
2369 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2370 						MEDIUM_ERROR, 0x11, 0x04);
2371 		else
2372 			/* Write error, auto-reallocation failed */
2373 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2374 						MEDIUM_ERROR, 0x0C, 0x02);
2375 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2376 		break;
2377 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2378 		dev_dbg(&scmd->device->sdev_gendev,
2379 			    "Logical Drive Nonexistent or Offline");
2380 		scmd->result = (DID_BAD_TARGET << 16);
2381 		break;
2382 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2383 		dev_dbg(&scmd->device->sdev_gendev,
2384 			    "Attempt to Access Beyond End of Logical Drive");
2385 		/* Logical block address out of range */
2386 		scsi_build_sense_buffer(0, scmd->sense_buffer,
2387 					NOT_READY, 0x21, 0);
2388 		break;
2389 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2390 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2391 		scmd->result = (DID_BAD_TARGET << 16);
2392 		break;
2393 	default:
2394 		scmd_printk(KERN_ERR, scmd,
2395 			    "Unexpected Error Status %04X", status);
2396 		scmd->result = (DID_ERROR << 16);
2397 		break;
2398 	}
2399 	scmd->scsi_done(scmd);
2400 }
2401 
2402 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2403 {
2404 	if (!cmd_blk)
2405 		return;
2406 
2407 	if (cmd_blk->completion) {
2408 		complete(cmd_blk->completion);
2409 		cmd_blk->completion = NULL;
2410 	}
2411 }
2412 
2413 static void myrb_monitor(struct work_struct *work)
2414 {
2415 	struct myrb_hba *cb = container_of(work,
2416 			struct myrb_hba, monitor_work.work);
2417 	struct Scsi_Host *shost = cb->host;
2418 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2419 
2420 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2421 
2422 	if (cb->new_ev_seq > cb->old_ev_seq) {
2423 		int event = cb->old_ev_seq;
2424 
2425 		dev_dbg(&shost->shost_gendev,
2426 			"get event log no %d/%d\n",
2427 			cb->new_ev_seq, event);
2428 		myrb_get_event(cb, event);
2429 		cb->old_ev_seq = event + 1;
2430 		interval = 10;
2431 	} else if (cb->need_err_info) {
2432 		cb->need_err_info = false;
2433 		dev_dbg(&shost->shost_gendev, "get error table\n");
2434 		myrb_get_errtable(cb);
2435 		interval = 10;
2436 	} else if (cb->need_rbld && cb->rbld_first) {
2437 		cb->need_rbld = false;
2438 		dev_dbg(&shost->shost_gendev,
2439 			"get rebuild progress\n");
2440 		myrb_update_rbld_progress(cb);
2441 		interval = 10;
2442 	} else if (cb->need_ldev_info) {
2443 		cb->need_ldev_info = false;
2444 		dev_dbg(&shost->shost_gendev,
2445 			"get logical drive info\n");
2446 		myrb_get_ldev_info(cb);
2447 		interval = 10;
2448 	} else if (cb->need_rbld) {
2449 		cb->need_rbld = false;
2450 		dev_dbg(&shost->shost_gendev,
2451 			"get rebuild progress\n");
2452 		myrb_update_rbld_progress(cb);
2453 		interval = 10;
2454 	} else if (cb->need_cc_status) {
2455 		cb->need_cc_status = false;
2456 		dev_dbg(&shost->shost_gendev,
2457 			"get consistency check progress\n");
2458 		myrb_get_cc_progress(cb);
2459 		interval = 10;
2460 	} else if (cb->need_bgi_status) {
2461 		cb->need_bgi_status = false;
2462 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2463 		myrb_bgi_control(cb);
2464 		interval = 10;
2465 	} else {
2466 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2467 		mutex_lock(&cb->dma_mutex);
2468 		myrb_hba_enquiry(cb);
2469 		mutex_unlock(&cb->dma_mutex);
2470 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2471 		    cb->need_err_info || cb->need_rbld ||
2472 		    cb->need_ldev_info || cb->need_cc_status ||
2473 		    cb->need_bgi_status) {
2474 			dev_dbg(&shost->shost_gendev,
2475 				"reschedule monitor\n");
2476 			interval = 0;
2477 		}
2478 	}
2479 	if (interval > 1)
2480 		cb->primary_monitor_time = jiffies;
2481 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2482 }
2483 
2484 /**
2485  * myrb_err_status - reports controller BIOS messages
2486  *
2487  * Controller BIOS messages are passed through the Error Status Register
2488  * when the driver performs the BIOS handshaking.
2489  *
2490  * Return: true for fatal errors and false otherwise.
2491  */
2492 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2493 		unsigned char parm0, unsigned char parm1)
2494 {
2495 	struct pci_dev *pdev = cb->pdev;
2496 
2497 	switch (error) {
2498 	case 0x00:
2499 		dev_info(&pdev->dev,
2500 			 "Physical Device %d:%d Not Responding\n",
2501 			 parm1, parm0);
2502 		break;
2503 	case 0x08:
2504 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2505 		break;
2506 	case 0x30:
2507 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2508 		break;
2509 	case 0x60:
2510 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2511 		break;
2512 	case 0x70:
2513 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2514 		break;
2515 	case 0x90:
2516 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2517 			   parm1, parm0);
2518 		break;
2519 	case 0xA0:
2520 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2521 		break;
2522 	case 0xB0:
2523 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2524 		break;
2525 	case 0xD0:
2526 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2527 		break;
2528 	case 0xF0:
2529 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2530 		return true;
2531 	default:
2532 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2533 			error);
2534 		return true;
2535 	}
2536 	return false;
2537 }
2538 
2539 /*
2540  * Hardware-specific functions
2541  */
2542 
2543 /*
2544  * DAC960 LA Series Controllers
2545  */
2546 
2547 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2548 {
2549 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2550 }
2551 
2552 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2553 {
2554 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2555 }
2556 
2557 static inline void DAC960_LA_gen_intr(void __iomem *base)
2558 {
2559 	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2560 }
2561 
2562 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2563 {
2564 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2565 }
2566 
2567 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2568 {
2569 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2570 }
2571 
2572 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2573 {
2574 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2575 
2576 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2577 }
2578 
2579 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2580 {
2581 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2582 
2583 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2584 }
2585 
2586 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2587 {
2588 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2589 }
2590 
2591 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2592 {
2593 	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2594 }
2595 
2596 static inline void DAC960_LA_ack_intr(void __iomem *base)
2597 {
2598 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2599 	       base + DAC960_LA_ODB_OFFSET);
2600 }
2601 
2602 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2603 {
2604 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2605 
2606 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2607 }
2608 
2609 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2610 {
2611 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2612 
2613 	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2614 }
2615 
2616 static inline void DAC960_LA_enable_intr(void __iomem *base)
2617 {
2618 	unsigned char odb = 0xFF;
2619 
2620 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2621 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2622 }
2623 
2624 static inline void DAC960_LA_disable_intr(void __iomem *base)
2625 {
2626 	unsigned char odb = 0xFF;
2627 
2628 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2629 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2630 }
2631 
2632 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2633 {
2634 	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2635 
2636 	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2637 }
2638 
2639 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2640 		union myrb_cmd_mbox *mbox)
2641 {
2642 	mem_mbox->words[1] = mbox->words[1];
2643 	mem_mbox->words[2] = mbox->words[2];
2644 	mem_mbox->words[3] = mbox->words[3];
2645 	/* Memory barrier to prevent reordering */
2646 	wmb();
2647 	mem_mbox->words[0] = mbox->words[0];
2648 	/* Memory barrier to force PCI access */
2649 	mb();
2650 }
2651 
2652 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2653 		union myrb_cmd_mbox *mbox)
2654 {
2655 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2656 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2657 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2658 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2659 }
2660 
2661 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2662 {
2663 	return readb(base + DAC960_LA_STSID_OFFSET);
2664 }
2665 
2666 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2667 {
2668 	return readw(base + DAC960_LA_STS_OFFSET);
2669 }
2670 
2671 static inline bool
2672 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2673 		unsigned char *param0, unsigned char *param1)
2674 {
2675 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2676 
2677 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2678 		return false;
2679 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2680 
2681 	*error = errsts;
2682 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2683 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2684 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2685 	return true;
2686 }
2687 
2688 static inline unsigned short
2689 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2690 		union myrb_cmd_mbox *mbox)
2691 {
2692 	unsigned short status;
2693 	int timeout = 0;
2694 
2695 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2696 		if (!DAC960_LA_hw_mbox_is_full(base))
2697 			break;
2698 		udelay(10);
2699 		timeout++;
2700 	}
2701 	if (DAC960_LA_hw_mbox_is_full(base)) {
2702 		dev_err(&pdev->dev,
2703 			"Timeout waiting for empty mailbox\n");
2704 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2705 	}
2706 	DAC960_LA_write_hw_mbox(base, mbox);
2707 	DAC960_LA_hw_mbox_new_cmd(base);
2708 	timeout = 0;
2709 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2710 		if (DAC960_LA_hw_mbox_status_available(base))
2711 			break;
2712 		udelay(10);
2713 		timeout++;
2714 	}
2715 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2716 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2717 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2718 	}
2719 	status = DAC960_LA_read_status(base);
2720 	DAC960_LA_ack_hw_mbox_intr(base);
2721 	DAC960_LA_ack_hw_mbox_status(base);
2722 
2723 	return status;
2724 }
2725 
2726 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2727 		struct myrb_hba *cb, void __iomem *base)
2728 {
2729 	int timeout = 0;
2730 	unsigned char error, parm0, parm1;
2731 
2732 	DAC960_LA_disable_intr(base);
2733 	DAC960_LA_ack_hw_mbox_status(base);
2734 	udelay(1000);
2735 	while (DAC960_LA_init_in_progress(base) &&
2736 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2737 		if (DAC960_LA_read_error_status(base, &error,
2738 					      &parm0, &parm1) &&
2739 		    myrb_err_status(cb, error, parm0, parm1))
2740 			return -ENODEV;
2741 		udelay(10);
2742 		timeout++;
2743 	}
2744 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2745 		dev_err(&pdev->dev,
2746 			"Timeout waiting for Controller Initialisation\n");
2747 		return -ETIMEDOUT;
2748 	}
2749 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2750 		dev_err(&pdev->dev,
2751 			"Unable to Enable Memory Mailbox Interface\n");
2752 		DAC960_LA_reset_ctrl(base);
2753 		return -ENODEV;
2754 	}
2755 	DAC960_LA_enable_intr(base);
2756 	cb->qcmd = myrb_qcmd;
2757 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2758 	if (cb->dual_mode_interface)
2759 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2760 	else
2761 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2762 	cb->disable_intr = DAC960_LA_disable_intr;
2763 	cb->reset = DAC960_LA_reset_ctrl;
2764 
2765 	return 0;
2766 }
2767 
2768 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2769 {
2770 	struct myrb_hba *cb = arg;
2771 	void __iomem *base = cb->io_base;
2772 	struct myrb_stat_mbox *next_stat_mbox;
2773 	unsigned long flags;
2774 
2775 	spin_lock_irqsave(&cb->queue_lock, flags);
2776 	DAC960_LA_ack_intr(base);
2777 	next_stat_mbox = cb->next_stat_mbox;
2778 	while (next_stat_mbox->valid) {
2779 		unsigned char id = next_stat_mbox->id;
2780 		struct scsi_cmnd *scmd = NULL;
2781 		struct myrb_cmdblk *cmd_blk = NULL;
2782 
2783 		if (id == MYRB_DCMD_TAG)
2784 			cmd_blk = &cb->dcmd_blk;
2785 		else if (id == MYRB_MCMD_TAG)
2786 			cmd_blk = &cb->mcmd_blk;
2787 		else {
2788 			scmd = scsi_host_find_tag(cb->host, id - 3);
2789 			if (scmd)
2790 				cmd_blk = scsi_cmd_priv(scmd);
2791 		}
2792 		if (cmd_blk)
2793 			cmd_blk->status = next_stat_mbox->status;
2794 		else
2795 			dev_err(&cb->pdev->dev,
2796 				"Unhandled command completion %d\n", id);
2797 
2798 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2799 		if (++next_stat_mbox > cb->last_stat_mbox)
2800 			next_stat_mbox = cb->first_stat_mbox;
2801 
2802 		if (cmd_blk) {
2803 			if (id < 3)
2804 				myrb_handle_cmdblk(cb, cmd_blk);
2805 			else
2806 				myrb_handle_scsi(cb, cmd_blk, scmd);
2807 		}
2808 	}
2809 	cb->next_stat_mbox = next_stat_mbox;
2810 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2811 	return IRQ_HANDLED;
2812 }
2813 
2814 struct myrb_privdata DAC960_LA_privdata = {
2815 	.hw_init =	DAC960_LA_hw_init,
2816 	.irq_handler =	DAC960_LA_intr_handler,
2817 	.mmio_size =	DAC960_LA_mmio_size,
2818 };
2819 
2820 /*
2821  * DAC960 PG Series Controllers
2822  */
2823 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2824 {
2825 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2826 }
2827 
2828 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2829 {
2830 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2831 }
2832 
2833 static inline void DAC960_PG_gen_intr(void __iomem *base)
2834 {
2835 	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2836 }
2837 
2838 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2839 {
2840 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2841 }
2842 
2843 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2844 {
2845 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2846 }
2847 
2848 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2849 {
2850 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2851 
2852 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2853 }
2854 
2855 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2856 {
2857 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2858 
2859 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2860 }
2861 
2862 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2863 {
2864 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2865 }
2866 
2867 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2868 {
2869 	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2870 }
2871 
2872 static inline void DAC960_PG_ack_intr(void __iomem *base)
2873 {
2874 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2875 	       base + DAC960_PG_ODB_OFFSET);
2876 }
2877 
2878 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2879 {
2880 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2881 
2882 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2883 }
2884 
2885 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2886 {
2887 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2888 
2889 	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2890 }
2891 
2892 static inline void DAC960_PG_enable_intr(void __iomem *base)
2893 {
2894 	unsigned int imask = (unsigned int)-1;
2895 
2896 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2897 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2898 }
2899 
2900 static inline void DAC960_PG_disable_intr(void __iomem *base)
2901 {
2902 	unsigned int imask = (unsigned int)-1;
2903 
2904 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2905 }
2906 
2907 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2908 {
2909 	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2910 
2911 	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2912 }
2913 
2914 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2915 		union myrb_cmd_mbox *mbox)
2916 {
2917 	mem_mbox->words[1] = mbox->words[1];
2918 	mem_mbox->words[2] = mbox->words[2];
2919 	mem_mbox->words[3] = mbox->words[3];
2920 	/* Memory barrier to prevent reordering */
2921 	wmb();
2922 	mem_mbox->words[0] = mbox->words[0];
2923 	/* Memory barrier to force PCI access */
2924 	mb();
2925 }
2926 
2927 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2928 		union myrb_cmd_mbox *mbox)
2929 {
2930 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2931 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2932 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2933 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2934 }
2935 
2936 static inline unsigned char
2937 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2938 {
2939 	return readb(base + DAC960_PG_STSID_OFFSET);
2940 }
2941 
2942 static inline unsigned short
2943 DAC960_PG_read_status(void __iomem *base)
2944 {
2945 	return readw(base + DAC960_PG_STS_OFFSET);
2946 }
2947 
2948 static inline bool
2949 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2950 		unsigned char *param0, unsigned char *param1)
2951 {
2952 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2953 
2954 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2955 		return false;
2956 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2957 	*error = errsts;
2958 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2959 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2960 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2961 	return true;
2962 }
2963 
2964 static inline unsigned short
2965 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2966 		union myrb_cmd_mbox *mbox)
2967 {
2968 	unsigned short status;
2969 	int timeout = 0;
2970 
2971 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2972 		if (!DAC960_PG_hw_mbox_is_full(base))
2973 			break;
2974 		udelay(10);
2975 		timeout++;
2976 	}
2977 	if (DAC960_PG_hw_mbox_is_full(base)) {
2978 		dev_err(&pdev->dev,
2979 			"Timeout waiting for empty mailbox\n");
2980 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2981 	}
2982 	DAC960_PG_write_hw_mbox(base, mbox);
2983 	DAC960_PG_hw_mbox_new_cmd(base);
2984 
2985 	timeout = 0;
2986 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2987 		if (DAC960_PG_hw_mbox_status_available(base))
2988 			break;
2989 		udelay(10);
2990 		timeout++;
2991 	}
2992 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2993 		dev_err(&pdev->dev,
2994 			"Timeout waiting for mailbox status\n");
2995 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2996 	}
2997 	status = DAC960_PG_read_status(base);
2998 	DAC960_PG_ack_hw_mbox_intr(base);
2999 	DAC960_PG_ack_hw_mbox_status(base);
3000 
3001 	return status;
3002 }
3003 
3004 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3005 		struct myrb_hba *cb, void __iomem *base)
3006 {
3007 	int timeout = 0;
3008 	unsigned char error, parm0, parm1;
3009 
3010 	DAC960_PG_disable_intr(base);
3011 	DAC960_PG_ack_hw_mbox_status(base);
3012 	udelay(1000);
3013 	while (DAC960_PG_init_in_progress(base) &&
3014 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3015 		if (DAC960_PG_read_error_status(base, &error,
3016 						&parm0, &parm1) &&
3017 		    myrb_err_status(cb, error, parm0, parm1))
3018 			return -EIO;
3019 		udelay(10);
3020 		timeout++;
3021 	}
3022 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3023 		dev_err(&pdev->dev,
3024 			"Timeout waiting for Controller Initialisation\n");
3025 		return -ETIMEDOUT;
3026 	}
3027 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3028 		dev_err(&pdev->dev,
3029 			"Unable to Enable Memory Mailbox Interface\n");
3030 		DAC960_PG_reset_ctrl(base);
3031 		return -ENODEV;
3032 	}
3033 	DAC960_PG_enable_intr(base);
3034 	cb->qcmd = myrb_qcmd;
3035 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3036 	if (cb->dual_mode_interface)
3037 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3038 	else
3039 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3040 	cb->disable_intr = DAC960_PG_disable_intr;
3041 	cb->reset = DAC960_PG_reset_ctrl;
3042 
3043 	return 0;
3044 }
3045 
3046 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3047 {
3048 	struct myrb_hba *cb = arg;
3049 	void __iomem *base = cb->io_base;
3050 	struct myrb_stat_mbox *next_stat_mbox;
3051 	unsigned long flags;
3052 
3053 	spin_lock_irqsave(&cb->queue_lock, flags);
3054 	DAC960_PG_ack_intr(base);
3055 	next_stat_mbox = cb->next_stat_mbox;
3056 	while (next_stat_mbox->valid) {
3057 		unsigned char id = next_stat_mbox->id;
3058 		struct scsi_cmnd *scmd = NULL;
3059 		struct myrb_cmdblk *cmd_blk = NULL;
3060 
3061 		if (id == MYRB_DCMD_TAG)
3062 			cmd_blk = &cb->dcmd_blk;
3063 		else if (id == MYRB_MCMD_TAG)
3064 			cmd_blk = &cb->mcmd_blk;
3065 		else {
3066 			scmd = scsi_host_find_tag(cb->host, id - 3);
3067 			if (scmd)
3068 				cmd_blk = scsi_cmd_priv(scmd);
3069 		}
3070 		if (cmd_blk)
3071 			cmd_blk->status = next_stat_mbox->status;
3072 		else
3073 			dev_err(&cb->pdev->dev,
3074 				"Unhandled command completion %d\n", id);
3075 
3076 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3077 		if (++next_stat_mbox > cb->last_stat_mbox)
3078 			next_stat_mbox = cb->first_stat_mbox;
3079 
3080 		if (id < 3)
3081 			myrb_handle_cmdblk(cb, cmd_blk);
3082 		else
3083 			myrb_handle_scsi(cb, cmd_blk, scmd);
3084 	}
3085 	cb->next_stat_mbox = next_stat_mbox;
3086 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3087 	return IRQ_HANDLED;
3088 }
3089 
3090 struct myrb_privdata DAC960_PG_privdata = {
3091 	.hw_init =	DAC960_PG_hw_init,
3092 	.irq_handler =	DAC960_PG_intr_handler,
3093 	.mmio_size =	DAC960_PG_mmio_size,
3094 };
3095 
3096 
3097 /*
3098  * DAC960 PD Series Controllers
3099  */
3100 
3101 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3102 {
3103 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3104 }
3105 
3106 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3107 {
3108 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3109 }
3110 
3111 static inline void DAC960_PD_gen_intr(void __iomem *base)
3112 {
3113 	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3114 }
3115 
3116 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3117 {
3118 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3119 }
3120 
3121 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3122 {
3123 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3124 
3125 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3126 }
3127 
3128 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3129 {
3130 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3131 
3132 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3133 }
3134 
3135 static inline void DAC960_PD_ack_intr(void __iomem *base)
3136 {
3137 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3138 }
3139 
3140 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3141 {
3142 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3143 
3144 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3145 }
3146 
3147 static inline void DAC960_PD_enable_intr(void __iomem *base)
3148 {
3149 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3150 }
3151 
3152 static inline void DAC960_PD_disable_intr(void __iomem *base)
3153 {
3154 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3155 }
3156 
3157 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3158 {
3159 	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3160 
3161 	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3162 }
3163 
3164 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3165 		union myrb_cmd_mbox *mbox)
3166 {
3167 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3168 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3169 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3170 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3171 }
3172 
3173 static inline unsigned char
3174 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3175 {
3176 	return readb(base + DAC960_PD_STSID_OFFSET);
3177 }
3178 
3179 static inline unsigned short
3180 DAC960_PD_read_status(void __iomem *base)
3181 {
3182 	return readw(base + DAC960_PD_STS_OFFSET);
3183 }
3184 
3185 static inline bool
3186 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3187 		unsigned char *param0, unsigned char *param1)
3188 {
3189 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3190 
3191 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3192 		return false;
3193 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3194 	*error = errsts;
3195 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3196 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3197 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3198 	return true;
3199 }
3200 
3201 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3202 {
3203 	void __iomem *base = cb->io_base;
3204 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3205 
3206 	while (DAC960_PD_hw_mbox_is_full(base))
3207 		udelay(1);
3208 	DAC960_PD_write_cmd_mbox(base, mbox);
3209 	DAC960_PD_hw_mbox_new_cmd(base);
3210 }
3211 
3212 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3213 		struct myrb_hba *cb, void __iomem *base)
3214 {
3215 	int timeout = 0;
3216 	unsigned char error, parm0, parm1;
3217 
3218 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3219 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3220 			(unsigned long)cb->io_addr);
3221 		return -EBUSY;
3222 	}
3223 	DAC960_PD_disable_intr(base);
3224 	DAC960_PD_ack_hw_mbox_status(base);
3225 	udelay(1000);
3226 	while (DAC960_PD_init_in_progress(base) &&
3227 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3228 		if (DAC960_PD_read_error_status(base, &error,
3229 					      &parm0, &parm1) &&
3230 		    myrb_err_status(cb, error, parm0, parm1))
3231 			return -EIO;
3232 		udelay(10);
3233 		timeout++;
3234 	}
3235 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3236 		dev_err(&pdev->dev,
3237 			"Timeout waiting for Controller Initialisation\n");
3238 		return -ETIMEDOUT;
3239 	}
3240 	if (!myrb_enable_mmio(cb, NULL)) {
3241 		dev_err(&pdev->dev,
3242 			"Unable to Enable Memory Mailbox Interface\n");
3243 		DAC960_PD_reset_ctrl(base);
3244 		return -ENODEV;
3245 	}
3246 	DAC960_PD_enable_intr(base);
3247 	cb->qcmd = DAC960_PD_qcmd;
3248 	cb->disable_intr = DAC960_PD_disable_intr;
3249 	cb->reset = DAC960_PD_reset_ctrl;
3250 
3251 	return 0;
3252 }
3253 
3254 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3255 {
3256 	struct myrb_hba *cb = arg;
3257 	void __iomem *base = cb->io_base;
3258 	unsigned long flags;
3259 
3260 	spin_lock_irqsave(&cb->queue_lock, flags);
3261 	while (DAC960_PD_hw_mbox_status_available(base)) {
3262 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3263 		struct scsi_cmnd *scmd = NULL;
3264 		struct myrb_cmdblk *cmd_blk = NULL;
3265 
3266 		if (id == MYRB_DCMD_TAG)
3267 			cmd_blk = &cb->dcmd_blk;
3268 		else if (id == MYRB_MCMD_TAG)
3269 			cmd_blk = &cb->mcmd_blk;
3270 		else {
3271 			scmd = scsi_host_find_tag(cb->host, id - 3);
3272 			if (scmd)
3273 				cmd_blk = scsi_cmd_priv(scmd);
3274 		}
3275 		if (cmd_blk)
3276 			cmd_blk->status = DAC960_PD_read_status(base);
3277 		else
3278 			dev_err(&cb->pdev->dev,
3279 				"Unhandled command completion %d\n", id);
3280 
3281 		DAC960_PD_ack_intr(base);
3282 		DAC960_PD_ack_hw_mbox_status(base);
3283 
3284 		if (id < 3)
3285 			myrb_handle_cmdblk(cb, cmd_blk);
3286 		else
3287 			myrb_handle_scsi(cb, cmd_blk, scmd);
3288 	}
3289 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3290 	return IRQ_HANDLED;
3291 }
3292 
3293 struct myrb_privdata DAC960_PD_privdata = {
3294 	.hw_init =	DAC960_PD_hw_init,
3295 	.irq_handler =	DAC960_PD_intr_handler,
3296 	.mmio_size =	DAC960_PD_mmio_size,
3297 };
3298 
3299 
3300 /*
3301  * DAC960 P Series Controllers
3302  *
3303  * Similar to the DAC960 PD Series Controllers, but some commands have
3304  * to be translated.
3305  */
3306 
3307 static inline void myrb_translate_enquiry(void *enq)
3308 {
3309 	memcpy(enq + 132, enq + 36, 64);
3310 	memset(enq + 36, 0, 96);
3311 }
3312 
3313 static inline void myrb_translate_devstate(void *state)
3314 {
3315 	memcpy(state + 2, state + 3, 1);
3316 	memmove(state + 4, state + 5, 2);
3317 	memmove(state + 6, state + 8, 4);
3318 }
3319 
3320 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3321 {
3322 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3323 	int ldev_num = mbox->type5.ld.ldev_num;
3324 
3325 	mbox->bytes[3] &= 0x7;
3326 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3327 	mbox->bytes[7] = ldev_num;
3328 }
3329 
3330 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3331 {
3332 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3333 	int ldev_num = mbox->bytes[7];
3334 
3335 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3336 	mbox->bytes[3] &= 0x7;
3337 	mbox->bytes[3] |= ldev_num << 3;
3338 }
3339 
3340 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3341 {
3342 	void __iomem *base = cb->io_base;
3343 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3344 
3345 	switch (mbox->common.opcode) {
3346 	case MYRB_CMD_ENQUIRY:
3347 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3348 		break;
3349 	case MYRB_CMD_GET_DEVICE_STATE:
3350 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3351 		break;
3352 	case MYRB_CMD_READ:
3353 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3354 		myrb_translate_to_rw_command(cmd_blk);
3355 		break;
3356 	case MYRB_CMD_WRITE:
3357 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3358 		myrb_translate_to_rw_command(cmd_blk);
3359 		break;
3360 	case MYRB_CMD_READ_SG:
3361 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3362 		myrb_translate_to_rw_command(cmd_blk);
3363 		break;
3364 	case MYRB_CMD_WRITE_SG:
3365 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3366 		myrb_translate_to_rw_command(cmd_blk);
3367 		break;
3368 	default:
3369 		break;
3370 	}
3371 	while (DAC960_PD_hw_mbox_is_full(base))
3372 		udelay(1);
3373 	DAC960_PD_write_cmd_mbox(base, mbox);
3374 	DAC960_PD_hw_mbox_new_cmd(base);
3375 }
3376 
3377 
3378 static int DAC960_P_hw_init(struct pci_dev *pdev,
3379 		struct myrb_hba *cb, void __iomem *base)
3380 {
3381 	int timeout = 0;
3382 	unsigned char error, parm0, parm1;
3383 
3384 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3385 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3386 			(unsigned long)cb->io_addr);
3387 		return -EBUSY;
3388 	}
3389 	DAC960_PD_disable_intr(base);
3390 	DAC960_PD_ack_hw_mbox_status(base);
3391 	udelay(1000);
3392 	while (DAC960_PD_init_in_progress(base) &&
3393 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3394 		if (DAC960_PD_read_error_status(base, &error,
3395 						&parm0, &parm1) &&
3396 		    myrb_err_status(cb, error, parm0, parm1))
3397 			return -EAGAIN;
3398 		udelay(10);
3399 		timeout++;
3400 	}
3401 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3402 		dev_err(&pdev->dev,
3403 			"Timeout waiting for Controller Initialisation\n");
3404 		return -ETIMEDOUT;
3405 	}
3406 	if (!myrb_enable_mmio(cb, NULL)) {
3407 		dev_err(&pdev->dev,
3408 			"Unable to allocate DMA mapped memory\n");
3409 		DAC960_PD_reset_ctrl(base);
3410 		return -ETIMEDOUT;
3411 	}
3412 	DAC960_PD_enable_intr(base);
3413 	cb->qcmd = DAC960_P_qcmd;
3414 	cb->disable_intr = DAC960_PD_disable_intr;
3415 	cb->reset = DAC960_PD_reset_ctrl;
3416 
3417 	return 0;
3418 }
3419 
3420 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3421 {
3422 	struct myrb_hba *cb = arg;
3423 	void __iomem *base = cb->io_base;
3424 	unsigned long flags;
3425 
3426 	spin_lock_irqsave(&cb->queue_lock, flags);
3427 	while (DAC960_PD_hw_mbox_status_available(base)) {
3428 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3429 		struct scsi_cmnd *scmd = NULL;
3430 		struct myrb_cmdblk *cmd_blk = NULL;
3431 		union myrb_cmd_mbox *mbox;
3432 		enum myrb_cmd_opcode op;
3433 
3434 
3435 		if (id == MYRB_DCMD_TAG)
3436 			cmd_blk = &cb->dcmd_blk;
3437 		else if (id == MYRB_MCMD_TAG)
3438 			cmd_blk = &cb->mcmd_blk;
3439 		else {
3440 			scmd = scsi_host_find_tag(cb->host, id - 3);
3441 			if (scmd)
3442 				cmd_blk = scsi_cmd_priv(scmd);
3443 		}
3444 		if (cmd_blk)
3445 			cmd_blk->status = DAC960_PD_read_status(base);
3446 		else
3447 			dev_err(&cb->pdev->dev,
3448 				"Unhandled command completion %d\n", id);
3449 
3450 		DAC960_PD_ack_intr(base);
3451 		DAC960_PD_ack_hw_mbox_status(base);
3452 
3453 		if (!cmd_blk)
3454 			continue;
3455 
3456 		mbox = &cmd_blk->mbox;
3457 		op = mbox->common.opcode;
3458 		switch (op) {
3459 		case MYRB_CMD_ENQUIRY_OLD:
3460 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3461 			myrb_translate_enquiry(cb->enquiry);
3462 			break;
3463 		case MYRB_CMD_READ_OLD:
3464 			mbox->common.opcode = MYRB_CMD_READ;
3465 			myrb_translate_from_rw_command(cmd_blk);
3466 			break;
3467 		case MYRB_CMD_WRITE_OLD:
3468 			mbox->common.opcode = MYRB_CMD_WRITE;
3469 			myrb_translate_from_rw_command(cmd_blk);
3470 			break;
3471 		case MYRB_CMD_READ_SG_OLD:
3472 			mbox->common.opcode = MYRB_CMD_READ_SG;
3473 			myrb_translate_from_rw_command(cmd_blk);
3474 			break;
3475 		case MYRB_CMD_WRITE_SG_OLD:
3476 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3477 			myrb_translate_from_rw_command(cmd_blk);
3478 			break;
3479 		default:
3480 			break;
3481 		}
3482 		if (id < 3)
3483 			myrb_handle_cmdblk(cb, cmd_blk);
3484 		else
3485 			myrb_handle_scsi(cb, cmd_blk, scmd);
3486 	}
3487 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3488 	return IRQ_HANDLED;
3489 }
3490 
3491 struct myrb_privdata DAC960_P_privdata = {
3492 	.hw_init =	DAC960_P_hw_init,
3493 	.irq_handler =	DAC960_P_intr_handler,
3494 	.mmio_size =	DAC960_PD_mmio_size,
3495 };
3496 
3497 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3498 		const struct pci_device_id *entry)
3499 {
3500 	struct myrb_privdata *privdata =
3501 		(struct myrb_privdata *)entry->driver_data;
3502 	irq_handler_t irq_handler = privdata->irq_handler;
3503 	unsigned int mmio_size = privdata->mmio_size;
3504 	struct Scsi_Host *shost;
3505 	struct myrb_hba *cb = NULL;
3506 
3507 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3508 	if (!shost) {
3509 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3510 		return NULL;
3511 	}
3512 	shost->max_cmd_len = 12;
3513 	shost->max_lun = 256;
3514 	cb = shost_priv(shost);
3515 	mutex_init(&cb->dcmd_mutex);
3516 	mutex_init(&cb->dma_mutex);
3517 	cb->pdev = pdev;
3518 
3519 	if (pci_enable_device(pdev))
3520 		goto failure;
3521 
3522 	if (privdata->hw_init == DAC960_PD_hw_init ||
3523 	    privdata->hw_init == DAC960_P_hw_init) {
3524 		cb->io_addr = pci_resource_start(pdev, 0);
3525 		cb->pci_addr = pci_resource_start(pdev, 1);
3526 	} else
3527 		cb->pci_addr = pci_resource_start(pdev, 0);
3528 
3529 	pci_set_drvdata(pdev, cb);
3530 	spin_lock_init(&cb->queue_lock);
3531 	if (mmio_size < PAGE_SIZE)
3532 		mmio_size = PAGE_SIZE;
3533 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3534 	if (cb->mmio_base == NULL) {
3535 		dev_err(&pdev->dev,
3536 			"Unable to map Controller Register Window\n");
3537 		goto failure;
3538 	}
3539 
3540 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3541 	if (privdata->hw_init(pdev, cb, cb->io_base))
3542 		goto failure;
3543 
3544 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3545 		dev_err(&pdev->dev,
3546 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3547 		goto failure;
3548 	}
3549 	cb->irq = pdev->irq;
3550 	return cb;
3551 
3552 failure:
3553 	dev_err(&pdev->dev,
3554 		"Failed to initialize Controller\n");
3555 	myrb_cleanup(cb);
3556 	return NULL;
3557 }
3558 
3559 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3560 {
3561 	struct myrb_hba *cb;
3562 	int ret;
3563 
3564 	cb = myrb_detect(dev, entry);
3565 	if (!cb)
3566 		return -ENODEV;
3567 
3568 	ret = myrb_get_hba_config(cb);
3569 	if (ret < 0) {
3570 		myrb_cleanup(cb);
3571 		return ret;
3572 	}
3573 
3574 	if (!myrb_create_mempools(dev, cb)) {
3575 		ret = -ENOMEM;
3576 		goto failed;
3577 	}
3578 
3579 	ret = scsi_add_host(cb->host, &dev->dev);
3580 	if (ret) {
3581 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3582 		myrb_destroy_mempools(cb);
3583 		goto failed;
3584 	}
3585 	scsi_scan_host(cb->host);
3586 	return 0;
3587 failed:
3588 	myrb_cleanup(cb);
3589 	return ret;
3590 }
3591 
3592 
3593 static void myrb_remove(struct pci_dev *pdev)
3594 {
3595 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3596 
3597 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3598 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3599 	myrb_cleanup(cb);
3600 	myrb_destroy_mempools(cb);
3601 }
3602 
3603 
3604 static const struct pci_device_id myrb_id_table[] = {
3605 	{
3606 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3607 			       PCI_DEVICE_ID_DEC_21285,
3608 			       PCI_VENDOR_ID_MYLEX,
3609 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3610 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3611 	},
3612 	{
3613 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3614 	},
3615 	{
3616 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3617 	},
3618 	{
3619 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3620 	},
3621 	{0, },
3622 };
3623 
3624 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3625 
3626 static struct pci_driver myrb_pci_driver = {
3627 	.name		= "myrb",
3628 	.id_table	= myrb_id_table,
3629 	.probe		= myrb_probe,
3630 	.remove		= myrb_remove,
3631 };
3632 
3633 static int __init myrb_init_module(void)
3634 {
3635 	int ret;
3636 
3637 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3638 	if (!myrb_raid_template)
3639 		return -ENODEV;
3640 
3641 	ret = pci_register_driver(&myrb_pci_driver);
3642 	if (ret)
3643 		raid_class_release(myrb_raid_template);
3644 
3645 	return ret;
3646 }
3647 
3648 static void __exit myrb_cleanup_module(void)
3649 {
3650 	pci_unregister_driver(&myrb_pci_driver);
3651 	raid_class_release(myrb_raid_template);
3652 }
3653 
3654 module_init(myrb_init_module);
3655 module_exit(myrb_cleanup_module);
3656 
3657 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3658 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3659 MODULE_LICENSE("GPL");
3660