xref: /openbmc/linux/drivers/scsi/device_handler/scsi_dh_rdac.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Engenio/LSI RDAC SCSI Device Handler
3  *
4  * Copyright (C) 2005 Mike Christie. All rights reserved.
5  * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  *
21  */
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 
28 #define RDAC_NAME "rdac"
29 #define RDAC_RETRY_COUNT 5
30 
31 /*
32  * LSI mode page stuff
33  *
34  * These struct definitions and the forming of the
35  * mode page were taken from the LSI RDAC 2.4 GPL'd
36  * driver, and then converted to Linux conventions.
37  */
38 #define RDAC_QUIESCENCE_TIME 20;
39 /*
40  * Page Codes
41  */
42 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
43 
44 /*
45  * Controller modes definitions
46  */
47 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS	0x02
48 
49 /*
50  * RDAC Options field
51  */
52 #define RDAC_FORCED_QUIESENCE 0x02
53 
54 #define RDAC_TIMEOUT	(60 * HZ)
55 #define RDAC_RETRIES	3
56 
57 struct rdac_mode_6_hdr {
58 	u8	data_len;
59 	u8	medium_type;
60 	u8	device_params;
61 	u8	block_desc_len;
62 };
63 
64 struct rdac_mode_10_hdr {
65 	u16	data_len;
66 	u8	medium_type;
67 	u8	device_params;
68 	u16	reserved;
69 	u16	block_desc_len;
70 };
71 
72 struct rdac_mode_common {
73 	u8	controller_serial[16];
74 	u8	alt_controller_serial[16];
75 	u8	rdac_mode[2];
76 	u8	alt_rdac_mode[2];
77 	u8	quiescence_timeout;
78 	u8	rdac_options;
79 };
80 
81 struct rdac_pg_legacy {
82 	struct rdac_mode_6_hdr hdr;
83 	u8	page_code;
84 	u8	page_len;
85 	struct rdac_mode_common common;
86 #define MODE6_MAX_LUN	32
87 	u8	lun_table[MODE6_MAX_LUN];
88 	u8	reserved2[32];
89 	u8	reserved3;
90 	u8	reserved4;
91 };
92 
93 struct rdac_pg_expanded {
94 	struct rdac_mode_10_hdr hdr;
95 	u8	page_code;
96 	u8	subpage_code;
97 	u8	page_len[2];
98 	struct rdac_mode_common common;
99 	u8	lun_table[256];
100 	u8	reserved3;
101 	u8	reserved4;
102 };
103 
104 struct c9_inquiry {
105 	u8	peripheral_info;
106 	u8	page_code;	/* 0xC9 */
107 	u8	reserved1;
108 	u8	page_len;
109 	u8	page_id[4];	/* "vace" */
110 	u8	avte_cvp;
111 	u8	path_prio;
112 	u8	reserved2[38];
113 };
114 
115 #define SUBSYS_ID_LEN	16
116 #define SLOT_ID_LEN	2
117 #define ARRAY_LABEL_LEN	31
118 
119 struct c4_inquiry {
120 	u8	peripheral_info;
121 	u8	page_code;	/* 0xC4 */
122 	u8	reserved1;
123 	u8	page_len;
124 	u8	page_id[4];	/* "subs" */
125 	u8	subsys_id[SUBSYS_ID_LEN];
126 	u8	revision[4];
127 	u8	slot_id[SLOT_ID_LEN];
128 	u8	reserved[2];
129 };
130 
131 struct rdac_controller {
132 	u8			subsys_id[SUBSYS_ID_LEN];
133 	u8			slot_id[SLOT_ID_LEN];
134 	int			use_ms10;
135 	struct kref		kref;
136 	struct list_head	node; /* list of all controllers */
137 	union			{
138 		struct rdac_pg_legacy legacy;
139 		struct rdac_pg_expanded expanded;
140 	} mode_select;
141 	u8	index;
142 	u8	array_name[ARRAY_LABEL_LEN];
143 	spinlock_t		ms_lock;
144 	int			ms_queued;
145 	struct work_struct	ms_work;
146 	struct scsi_device	*ms_sdev;
147 	struct list_head	ms_head;
148 };
149 
150 struct c8_inquiry {
151 	u8	peripheral_info;
152 	u8	page_code; /* 0xC8 */
153 	u8	reserved1;
154 	u8	page_len;
155 	u8	page_id[4]; /* "edid" */
156 	u8	reserved2[3];
157 	u8	vol_uniq_id_len;
158 	u8	vol_uniq_id[16];
159 	u8	vol_user_label_len;
160 	u8	vol_user_label[60];
161 	u8	array_uniq_id_len;
162 	u8	array_unique_id[16];
163 	u8	array_user_label_len;
164 	u8	array_user_label[60];
165 	u8	lun[8];
166 };
167 
168 struct c2_inquiry {
169 	u8	peripheral_info;
170 	u8	page_code;	/* 0xC2 */
171 	u8	reserved1;
172 	u8	page_len;
173 	u8	page_id[4];	/* "swr4" */
174 	u8	sw_version[3];
175 	u8	sw_date[3];
176 	u8	features_enabled;
177 	u8	max_lun_supported;
178 	u8	partitions[239]; /* Total allocation length should be 0xFF */
179 };
180 
181 struct rdac_dh_data {
182 	struct rdac_controller	*ctlr;
183 #define UNINITIALIZED_LUN	(1 << 8)
184 	unsigned		lun;
185 #define RDAC_STATE_ACTIVE	0
186 #define RDAC_STATE_PASSIVE	1
187 	unsigned char		state;
188 
189 #define RDAC_LUN_UNOWNED	0
190 #define RDAC_LUN_OWNED		1
191 #define RDAC_LUN_AVT		2
192 	char			lun_state;
193 	unsigned char		sense[SCSI_SENSE_BUFFERSIZE];
194 	union			{
195 		struct c2_inquiry c2;
196 		struct c4_inquiry c4;
197 		struct c8_inquiry c8;
198 		struct c9_inquiry c9;
199 	} inq;
200 };
201 
202 static const char *lun_state[] =
203 {
204 	"unowned",
205 	"owned",
206 	"owned (AVT mode)",
207 };
208 
209 struct rdac_queue_data {
210 	struct list_head	entry;
211 	struct rdac_dh_data	*h;
212 	activate_complete	callback_fn;
213 	void			*callback_data;
214 };
215 
216 static LIST_HEAD(ctlr_list);
217 static DEFINE_SPINLOCK(list_lock);
218 static struct workqueue_struct *kmpath_rdacd;
219 static void send_mode_select(struct work_struct *work);
220 
221 /*
222  * module parameter to enable rdac debug logging.
223  * 2 bits for each type of logging, only two types defined for now
224  * Can be enhanced if required at later point
225  */
226 static int rdac_logging = 1;
227 module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
228 MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
229 		"Default is 1 - failover logging enabled, "
230 		"set it to 0xF to enable all the logs");
231 
232 #define RDAC_LOG_FAILOVER	0
233 #define RDAC_LOG_SENSE		2
234 
235 #define RDAC_LOG_BITS		2
236 
237 #define RDAC_LOG_LEVEL(SHIFT)  \
238 	((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
239 
240 #define RDAC_LOG(SHIFT, sdev, f, arg...) \
241 do { \
242 	if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
243 		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
244 } while (0);
245 
246 static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
247 {
248 	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
249 	BUG_ON(scsi_dh_data == NULL);
250 	return ((struct rdac_dh_data *) scsi_dh_data->buf);
251 }
252 
253 static struct request *get_rdac_req(struct scsi_device *sdev,
254 			void *buffer, unsigned buflen, int rw)
255 {
256 	struct request *rq;
257 	struct request_queue *q = sdev->request_queue;
258 
259 	rq = blk_get_request(q, rw, GFP_NOIO);
260 
261 	if (!rq) {
262 		sdev_printk(KERN_INFO, sdev,
263 				"get_rdac_req: blk_get_request failed.\n");
264 		return NULL;
265 	}
266 
267 	if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
268 		blk_put_request(rq);
269 		sdev_printk(KERN_INFO, sdev,
270 				"get_rdac_req: blk_rq_map_kern failed.\n");
271 		return NULL;
272 	}
273 
274 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
275 	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
276 			 REQ_FAILFAST_DRIVER;
277 	rq->retries = RDAC_RETRIES;
278 	rq->timeout = RDAC_TIMEOUT;
279 
280 	return rq;
281 }
282 
283 static struct request *rdac_failover_get(struct scsi_device *sdev,
284 					 struct rdac_dh_data *h)
285 {
286 	struct request *rq;
287 	struct rdac_mode_common *common;
288 	unsigned data_size;
289 
290 	if (h->ctlr->use_ms10) {
291 		struct rdac_pg_expanded *rdac_pg;
292 
293 		data_size = sizeof(struct rdac_pg_expanded);
294 		rdac_pg = &h->ctlr->mode_select.expanded;
295 		memset(rdac_pg, 0, data_size);
296 		common = &rdac_pg->common;
297 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
298 		rdac_pg->subpage_code = 0x1;
299 		rdac_pg->page_len[0] = 0x01;
300 		rdac_pg->page_len[1] = 0x28;
301 	} else {
302 		struct rdac_pg_legacy *rdac_pg;
303 
304 		data_size = sizeof(struct rdac_pg_legacy);
305 		rdac_pg = &h->ctlr->mode_select.legacy;
306 		memset(rdac_pg, 0, data_size);
307 		common = &rdac_pg->common;
308 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
309 		rdac_pg->page_len = 0x68;
310 	}
311 	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
312 	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
313 	common->rdac_options = RDAC_FORCED_QUIESENCE;
314 
315 	/* get request for block layer packet command */
316 	rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
317 	if (!rq)
318 		return NULL;
319 
320 	/* Prepare the command. */
321 	if (h->ctlr->use_ms10) {
322 		rq->cmd[0] = MODE_SELECT_10;
323 		rq->cmd[7] = data_size >> 8;
324 		rq->cmd[8] = data_size & 0xff;
325 	} else {
326 		rq->cmd[0] = MODE_SELECT;
327 		rq->cmd[4] = data_size;
328 	}
329 	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
330 
331 	rq->sense = h->sense;
332 	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
333 	rq->sense_len = 0;
334 
335 	return rq;
336 }
337 
338 static void release_controller(struct kref *kref)
339 {
340 	struct rdac_controller *ctlr;
341 	ctlr = container_of(kref, struct rdac_controller, kref);
342 
343 	flush_workqueue(kmpath_rdacd);
344 	spin_lock(&list_lock);
345 	list_del(&ctlr->node);
346 	spin_unlock(&list_lock);
347 	kfree(ctlr);
348 }
349 
350 static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
351 						char *array_name)
352 {
353 	struct rdac_controller *ctlr, *tmp;
354 
355 	spin_lock(&list_lock);
356 
357 	list_for_each_entry(tmp, &ctlr_list, node) {
358 		if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
359 			  (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
360 			kref_get(&tmp->kref);
361 			spin_unlock(&list_lock);
362 			return tmp;
363 		}
364 	}
365 	ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
366 	if (!ctlr)
367 		goto done;
368 
369 	/* initialize fields of controller */
370 	memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
371 	memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
372 	memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
373 
374 	/* update the controller index */
375 	if (slot_id[1] == 0x31)
376 		ctlr->index = 0;
377 	else
378 		ctlr->index = 1;
379 
380 	kref_init(&ctlr->kref);
381 	ctlr->use_ms10 = -1;
382 	ctlr->ms_queued = 0;
383 	ctlr->ms_sdev = NULL;
384 	spin_lock_init(&ctlr->ms_lock);
385 	INIT_WORK(&ctlr->ms_work, send_mode_select);
386 	INIT_LIST_HEAD(&ctlr->ms_head);
387 	list_add(&ctlr->node, &ctlr_list);
388 done:
389 	spin_unlock(&list_lock);
390 	return ctlr;
391 }
392 
393 static int submit_inquiry(struct scsi_device *sdev, int page_code,
394 			  unsigned int len, struct rdac_dh_data *h)
395 {
396 	struct request *rq;
397 	struct request_queue *q = sdev->request_queue;
398 	int err = SCSI_DH_RES_TEMP_UNAVAIL;
399 
400 	rq = get_rdac_req(sdev, &h->inq, len, READ);
401 	if (!rq)
402 		goto done;
403 
404 	/* Prepare the command. */
405 	rq->cmd[0] = INQUIRY;
406 	rq->cmd[1] = 1;
407 	rq->cmd[2] = page_code;
408 	rq->cmd[4] = len;
409 	rq->cmd_len = COMMAND_SIZE(INQUIRY);
410 
411 	rq->sense = h->sense;
412 	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
413 	rq->sense_len = 0;
414 
415 	err = blk_execute_rq(q, NULL, rq, 1);
416 	if (err == -EIO)
417 		err = SCSI_DH_IO;
418 
419 	blk_put_request(rq);
420 done:
421 	return err;
422 }
423 
424 static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
425 			char *array_name)
426 {
427 	int err, i;
428 	struct c8_inquiry *inqp;
429 
430 	err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
431 	if (err == SCSI_DH_OK) {
432 		inqp = &h->inq.c8;
433 		if (inqp->page_code != 0xc8)
434 			return SCSI_DH_NOSYS;
435 		if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
436 		    inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
437 			return SCSI_DH_NOSYS;
438 		h->lun = inqp->lun[7]; /* Uses only the last byte */
439 
440 		for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
441 			*(array_name+i) = inqp->array_user_label[(2*i)+1];
442 
443 		*(array_name+ARRAY_LABEL_LEN-1) = '\0';
444 	}
445 	return err;
446 }
447 
448 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
449 {
450 	int err;
451 	struct c9_inquiry *inqp;
452 
453 	h->lun_state = RDAC_LUN_UNOWNED;
454 	h->state = RDAC_STATE_ACTIVE;
455 	err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
456 	if (err == SCSI_DH_OK) {
457 		inqp = &h->inq.c9;
458 		if ((inqp->avte_cvp >> 7) == 0x1) {
459 			/* LUN in AVT mode */
460 			sdev_printk(KERN_NOTICE, sdev,
461 				    "%s: AVT mode detected\n",
462 				    RDAC_NAME);
463 			h->lun_state = RDAC_LUN_AVT;
464 		} else if ((inqp->avte_cvp & 0x1) != 0) {
465 			/* LUN was owned by the controller */
466 			h->lun_state = RDAC_LUN_OWNED;
467 		}
468 	}
469 
470 	if (h->lun_state == RDAC_LUN_UNOWNED)
471 		h->state = RDAC_STATE_PASSIVE;
472 
473 	return err;
474 }
475 
476 static int initialize_controller(struct scsi_device *sdev,
477 				 struct rdac_dh_data *h, char *array_name)
478 {
479 	int err;
480 	struct c4_inquiry *inqp;
481 
482 	err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
483 	if (err == SCSI_DH_OK) {
484 		inqp = &h->inq.c4;
485 		h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
486 					array_name);
487 		if (!h->ctlr)
488 			err = SCSI_DH_RES_TEMP_UNAVAIL;
489 	}
490 	return err;
491 }
492 
493 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
494 {
495 	int err;
496 	struct c2_inquiry *inqp;
497 
498 	err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
499 	if (err == SCSI_DH_OK) {
500 		inqp = &h->inq.c2;
501 		/*
502 		 * If more than MODE6_MAX_LUN luns are supported, use
503 		 * mode select 10
504 		 */
505 		if (inqp->max_lun_supported >= MODE6_MAX_LUN)
506 			h->ctlr->use_ms10 = 1;
507 		else
508 			h->ctlr->use_ms10 = 0;
509 	}
510 	return err;
511 }
512 
513 static int mode_select_handle_sense(struct scsi_device *sdev,
514 					unsigned char *sensebuf)
515 {
516 	struct scsi_sense_hdr sense_hdr;
517 	int err = SCSI_DH_IO, ret;
518 	struct rdac_dh_data *h = get_rdac_data(sdev);
519 
520 	ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
521 	if (!ret)
522 		goto done;
523 
524 	switch (sense_hdr.sense_key) {
525 	case NO_SENSE:
526 	case ABORTED_COMMAND:
527 	case UNIT_ATTENTION:
528 		err = SCSI_DH_RETRY;
529 		break;
530 	case NOT_READY:
531 		if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
532 			/* LUN Not Ready and is in the Process of Becoming
533 			 * Ready
534 			 */
535 			err = SCSI_DH_RETRY;
536 		break;
537 	case ILLEGAL_REQUEST:
538 		if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
539 			/*
540 			 * Command Lock contention
541 			 */
542 			err = SCSI_DH_RETRY;
543 		break;
544 	default:
545 		break;
546 	}
547 
548 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
549 		"MODE_SELECT returned with sense %02x/%02x/%02x",
550 		(char *) h->ctlr->array_name, h->ctlr->index,
551 		sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
552 
553 done:
554 	return err;
555 }
556 
557 static void send_mode_select(struct work_struct *work)
558 {
559 	struct rdac_controller *ctlr =
560 		container_of(work, struct rdac_controller, ms_work);
561 	struct request *rq;
562 	struct scsi_device *sdev = ctlr->ms_sdev;
563 	struct rdac_dh_data *h = get_rdac_data(sdev);
564 	struct request_queue *q = sdev->request_queue;
565 	int err, retry_cnt = RDAC_RETRY_COUNT;
566 	struct rdac_queue_data *tmp, *qdata;
567 	LIST_HEAD(list);
568 	u8 *lun_table;
569 
570 	spin_lock(&ctlr->ms_lock);
571 	list_splice_init(&ctlr->ms_head, &list);
572 	ctlr->ms_queued = 0;
573 	ctlr->ms_sdev = NULL;
574 	spin_unlock(&ctlr->ms_lock);
575 
576 	if (ctlr->use_ms10)
577 		lun_table = ctlr->mode_select.expanded.lun_table;
578 	else
579 		lun_table = ctlr->mode_select.legacy.lun_table;
580 
581 retry:
582 	err = SCSI_DH_RES_TEMP_UNAVAIL;
583 	rq = rdac_failover_get(sdev, h);
584 	if (!rq)
585 		goto done;
586 
587 	list_for_each_entry(qdata, &list, entry) {
588 		lun_table[qdata->h->lun] = 0x81;
589 	}
590 
591 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
592 		"%s MODE_SELECT command",
593 		(char *) h->ctlr->array_name, h->ctlr->index,
594 		(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
595 
596 	err = blk_execute_rq(q, NULL, rq, 1);
597 	blk_put_request(rq);
598 	if (err != SCSI_DH_OK) {
599 		err = mode_select_handle_sense(sdev, h->sense);
600 		if (err == SCSI_DH_RETRY && retry_cnt--)
601 			goto retry;
602 	}
603 	if (err == SCSI_DH_OK) {
604 		h->state = RDAC_STATE_ACTIVE;
605 		RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
606 				"MODE_SELECT completed",
607 				(char *) h->ctlr->array_name, h->ctlr->index);
608 	}
609 
610 done:
611 	list_for_each_entry_safe(qdata, tmp, &list, entry) {
612 		list_del(&qdata->entry);
613 		if (err == SCSI_DH_OK)
614 			qdata->h->state = RDAC_STATE_ACTIVE;
615 		if (qdata->callback_fn)
616 			qdata->callback_fn(qdata->callback_data, err);
617 		kfree(qdata);
618 	}
619 	return;
620 }
621 
622 static int queue_mode_select(struct scsi_device *sdev,
623 				activate_complete fn, void *data)
624 {
625 	struct rdac_queue_data *qdata;
626 	struct rdac_controller *ctlr;
627 
628 	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
629 	if (!qdata)
630 		return SCSI_DH_RETRY;
631 
632 	qdata->h = get_rdac_data(sdev);
633 	qdata->callback_fn = fn;
634 	qdata->callback_data = data;
635 
636 	ctlr = qdata->h->ctlr;
637 	spin_lock(&ctlr->ms_lock);
638 	list_add_tail(&qdata->entry, &ctlr->ms_head);
639 	if (!ctlr->ms_queued) {
640 		ctlr->ms_queued = 1;
641 		ctlr->ms_sdev = sdev;
642 		queue_work(kmpath_rdacd, &ctlr->ms_work);
643 	}
644 	spin_unlock(&ctlr->ms_lock);
645 	return SCSI_DH_OK;
646 }
647 
648 static int rdac_activate(struct scsi_device *sdev,
649 			activate_complete fn, void *data)
650 {
651 	struct rdac_dh_data *h = get_rdac_data(sdev);
652 	int err = SCSI_DH_OK;
653 
654 	err = check_ownership(sdev, h);
655 	if (err != SCSI_DH_OK)
656 		goto done;
657 
658 	if (h->lun_state == RDAC_LUN_UNOWNED) {
659 		err = queue_mode_select(sdev, fn, data);
660 		if (err == SCSI_DH_OK)
661 			return 0;
662 	}
663 done:
664 	if (fn)
665 		fn(data, err);
666 	return 0;
667 }
668 
669 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
670 {
671 	struct rdac_dh_data *h = get_rdac_data(sdev);
672 	int ret = BLKPREP_OK;
673 
674 	if (h->state != RDAC_STATE_ACTIVE) {
675 		ret = BLKPREP_KILL;
676 		req->cmd_flags |= REQ_QUIET;
677 	}
678 	return ret;
679 
680 }
681 
682 static int rdac_check_sense(struct scsi_device *sdev,
683 				struct scsi_sense_hdr *sense_hdr)
684 {
685 	struct rdac_dh_data *h = get_rdac_data(sdev);
686 
687 	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
688 			"I/O returned with sense %02x/%02x/%02x",
689 			(char *) h->ctlr->array_name, h->ctlr->index,
690 			sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
691 
692 	switch (sense_hdr->sense_key) {
693 	case NOT_READY:
694 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
695 			/* LUN Not Ready - Logical Unit Not Ready and is in
696 			* the process of becoming ready
697 			* Just retry.
698 			*/
699 			return ADD_TO_MLQUEUE;
700 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
701 			/* LUN Not Ready - Storage firmware incompatible
702 			 * Manual code synchonisation required.
703 			 *
704 			 * Nothing we can do here. Try to bypass the path.
705 			 */
706 			return SUCCESS;
707 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
708 			/* LUN Not Ready - Quiescense in progress
709 			 *
710 			 * Just retry and wait.
711 			 */
712 			return ADD_TO_MLQUEUE;
713 		if (sense_hdr->asc == 0xA1  && sense_hdr->ascq == 0x02)
714 			/* LUN Not Ready - Quiescense in progress
715 			 * or has been achieved
716 			 * Just retry.
717 			 */
718 			return ADD_TO_MLQUEUE;
719 		break;
720 	case ILLEGAL_REQUEST:
721 		if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
722 			/* Invalid Request - Current Logical Unit Ownership.
723 			 * Controller is not the current owner of the LUN,
724 			 * Fail the path, so that the other path be used.
725 			 */
726 			h->state = RDAC_STATE_PASSIVE;
727 			return SUCCESS;
728 		}
729 		break;
730 	case UNIT_ATTENTION:
731 		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
732 			/*
733 			 * Power On, Reset, or Bus Device Reset, just retry.
734 			 */
735 			return ADD_TO_MLQUEUE;
736 		if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
737 			/*
738 			 * Quiescence in progress , just retry.
739 			 */
740 			return ADD_TO_MLQUEUE;
741 		break;
742 	}
743 	/* success just means we do not care what scsi-ml does */
744 	return SCSI_RETURN_NOT_HANDLED;
745 }
746 
747 static const struct scsi_dh_devlist rdac_dev_list[] = {
748 	{"IBM", "1722"},
749 	{"IBM", "1724"},
750 	{"IBM", "1726"},
751 	{"IBM", "1742"},
752 	{"IBM", "1745"},
753 	{"IBM", "1746"},
754 	{"IBM", "1814"},
755 	{"IBM", "1815"},
756 	{"IBM", "1818"},
757 	{"IBM", "3526"},
758 	{"SGI", "TP9400"},
759 	{"SGI", "TP9500"},
760 	{"SGI", "IS"},
761 	{"STK", "OPENstorage D280"},
762 	{"SUN", "CSM200_R"},
763 	{"SUN", "LCSM100_I"},
764 	{"SUN", "LCSM100_S"},
765 	{"SUN", "LCSM100_E"},
766 	{"SUN", "LCSM100_F"},
767 	{"DELL", "MD3000"},
768 	{"DELL", "MD3000i"},
769 	{"DELL", "MD32xx"},
770 	{"DELL", "MD32xxi"},
771 	{"DELL", "MD36xxi"},
772 	{"LSI", "INF-01-00"},
773 	{"ENGENIO", "INF-01-00"},
774 	{"STK", "FLEXLINE 380"},
775 	{"SUN", "CSM100_R_FC"},
776 	{"SUN", "STK6580_6780"},
777 	{"SUN", "SUN_6180"},
778 	{NULL, NULL},
779 };
780 
781 static int rdac_bus_attach(struct scsi_device *sdev);
782 static void rdac_bus_detach(struct scsi_device *sdev);
783 
784 static struct scsi_device_handler rdac_dh = {
785 	.name = RDAC_NAME,
786 	.module = THIS_MODULE,
787 	.devlist = rdac_dev_list,
788 	.prep_fn = rdac_prep_fn,
789 	.check_sense = rdac_check_sense,
790 	.attach = rdac_bus_attach,
791 	.detach = rdac_bus_detach,
792 	.activate = rdac_activate,
793 };
794 
795 static int rdac_bus_attach(struct scsi_device *sdev)
796 {
797 	struct scsi_dh_data *scsi_dh_data;
798 	struct rdac_dh_data *h;
799 	unsigned long flags;
800 	int err;
801 	char array_name[ARRAY_LABEL_LEN];
802 
803 	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
804 			       + sizeof(*h) , GFP_KERNEL);
805 	if (!scsi_dh_data) {
806 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
807 			    RDAC_NAME);
808 		return 0;
809 	}
810 
811 	scsi_dh_data->scsi_dh = &rdac_dh;
812 	h = (struct rdac_dh_data *) scsi_dh_data->buf;
813 	h->lun = UNINITIALIZED_LUN;
814 	h->state = RDAC_STATE_ACTIVE;
815 
816 	err = get_lun_info(sdev, h, array_name);
817 	if (err != SCSI_DH_OK)
818 		goto failed;
819 
820 	err = initialize_controller(sdev, h, array_name);
821 	if (err != SCSI_DH_OK)
822 		goto failed;
823 
824 	err = check_ownership(sdev, h);
825 	if (err != SCSI_DH_OK)
826 		goto clean_ctlr;
827 
828 	err = set_mode_select(sdev, h);
829 	if (err != SCSI_DH_OK)
830 		goto clean_ctlr;
831 
832 	if (!try_module_get(THIS_MODULE))
833 		goto clean_ctlr;
834 
835 	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
836 	sdev->scsi_dh_data = scsi_dh_data;
837 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
838 
839 	sdev_printk(KERN_NOTICE, sdev,
840 		    "%s: LUN %d (%s)\n",
841 		    RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
842 
843 	return 0;
844 
845 clean_ctlr:
846 	kref_put(&h->ctlr->kref, release_controller);
847 
848 failed:
849 	kfree(scsi_dh_data);
850 	sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
851 		    RDAC_NAME);
852 	return -EINVAL;
853 }
854 
855 static void rdac_bus_detach( struct scsi_device *sdev )
856 {
857 	struct scsi_dh_data *scsi_dh_data;
858 	struct rdac_dh_data *h;
859 	unsigned long flags;
860 
861 	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
862 	scsi_dh_data = sdev->scsi_dh_data;
863 	sdev->scsi_dh_data = NULL;
864 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
865 
866 	h = (struct rdac_dh_data *) scsi_dh_data->buf;
867 	if (h->ctlr)
868 		kref_put(&h->ctlr->kref, release_controller);
869 	kfree(scsi_dh_data);
870 	module_put(THIS_MODULE);
871 	sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
872 }
873 
874 
875 
876 static int __init rdac_init(void)
877 {
878 	int r;
879 
880 	r = scsi_register_device_handler(&rdac_dh);
881 	if (r != 0) {
882 		printk(KERN_ERR "Failed to register scsi device handler.");
883 		goto done;
884 	}
885 
886 	/*
887 	 * Create workqueue to handle mode selects for rdac
888 	 */
889 	kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
890 	if (!kmpath_rdacd) {
891 		scsi_unregister_device_handler(&rdac_dh);
892 		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
893 	}
894 done:
895 	return r;
896 }
897 
898 static void __exit rdac_exit(void)
899 {
900 	destroy_workqueue(kmpath_rdacd);
901 	scsi_unregister_device_handler(&rdac_dh);
902 }
903 
904 module_init(rdac_init);
905 module_exit(rdac_exit);
906 
907 MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
908 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
909 MODULE_LICENSE("GPL");
910