xref: /openbmc/linux/drivers/scsi/aacraid/commsup.c (revision 643d1f7f)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    communication.
29  *
30  */
31 
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/kthread.h>
43 #include <linux/interrupt.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <asm/semaphore.h>
49 
50 #include "aacraid.h"
51 
52 /**
53  *	fib_map_alloc		-	allocate the fib objects
54  *	@dev: Adapter to allocate for
55  *
56  *	Allocate and map the shared PCI space for the FIB blocks used to
57  *	talk to the Adaptec firmware.
58  */
59 
60 static int fib_map_alloc(struct aac_dev *dev)
61 {
62 	dprintk((KERN_INFO
63 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66 	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67 	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68 	  &dev->hw_fib_pa))==NULL)
69 		return -ENOMEM;
70 	return 0;
71 }
72 
73 /**
74  *	aac_fib_map_free		-	free the fib objects
75  *	@dev: Adapter to free
76  *
77  *	Free the PCI mappings and the memory allocated for FIB blocks
78  *	on this adapter.
79  */
80 
81 void aac_fib_map_free(struct aac_dev *dev)
82 {
83 	pci_free_consistent(dev->pdev,
84 	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
85 	  dev->hw_fib_va, dev->hw_fib_pa);
86 	dev->hw_fib_va = NULL;
87 	dev->hw_fib_pa = 0;
88 }
89 
90 /**
91  *	aac_fib_setup	-	setup the fibs
92  *	@dev: Adapter to set up
93  *
94  *	Allocate the PCI space for the fibs, map it and then intialise the
95  *	fib area, the unmapped fib data and also the free list
96  */
97 
98 int aac_fib_setup(struct aac_dev * dev)
99 {
100 	struct fib *fibptr;
101 	struct hw_fib *hw_fib;
102 	dma_addr_t hw_fib_pa;
103 	int i;
104 
105 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
106 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
107 		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
108 		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
109 	}
110 	if (i<0)
111 		return -ENOMEM;
112 
113 	hw_fib = dev->hw_fib_va;
114 	hw_fib_pa = dev->hw_fib_pa;
115 	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
116 	/*
117 	 *	Initialise the fibs
118 	 */
119 	for (i = 0, fibptr = &dev->fibs[i];
120 		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
121 		i++, fibptr++)
122 	{
123 		fibptr->dev = dev;
124 		fibptr->hw_fib_va = hw_fib;
125 		fibptr->data = (void *) fibptr->hw_fib_va->data;
126 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
127 		init_MUTEX_LOCKED(&fibptr->event_wait);
128 		spin_lock_init(&fibptr->event_lock);
129 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
130 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
131 		fibptr->hw_fib_pa = hw_fib_pa;
132 		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
133 		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
134 	}
135 	/*
136 	 *	Add the fib chain to the free list
137 	 */
138 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
139 	/*
140 	 *	Enable this to debug out of queue space
141 	 */
142 	dev->free_fib = &dev->fibs[0];
143 	return 0;
144 }
145 
146 /**
147  *	aac_fib_alloc	-	allocate a fib
148  *	@dev: Adapter to allocate the fib for
149  *
150  *	Allocate a fib from the adapter fib pool. If the pool is empty we
151  *	return NULL.
152  */
153 
154 struct fib *aac_fib_alloc(struct aac_dev *dev)
155 {
156 	struct fib * fibptr;
157 	unsigned long flags;
158 	spin_lock_irqsave(&dev->fib_lock, flags);
159 	fibptr = dev->free_fib;
160 	if(!fibptr){
161 		spin_unlock_irqrestore(&dev->fib_lock, flags);
162 		return fibptr;
163 	}
164 	dev->free_fib = fibptr->next;
165 	spin_unlock_irqrestore(&dev->fib_lock, flags);
166 	/*
167 	 *	Set the proper node type code and node byte size
168 	 */
169 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
170 	fibptr->size = sizeof(struct fib);
171 	/*
172 	 *	Null out fields that depend on being zero at the start of
173 	 *	each I/O
174 	 */
175 	fibptr->hw_fib_va->header.XferState = 0;
176 	fibptr->flags = 0;
177 	fibptr->callback = NULL;
178 	fibptr->callback_data = NULL;
179 
180 	return fibptr;
181 }
182 
183 /**
184  *	aac_fib_free	-	free a fib
185  *	@fibptr: fib to free up
186  *
187  *	Frees up a fib and places it on the appropriate queue
188  */
189 
190 void aac_fib_free(struct fib *fibptr)
191 {
192 	unsigned long flags;
193 
194 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
195 	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
196 		aac_config.fib_timeouts++;
197 	if (fibptr->hw_fib_va->header.XferState != 0) {
198 		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
199 			 (void*)fibptr,
200 			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
201 	}
202 	fibptr->next = fibptr->dev->free_fib;
203 	fibptr->dev->free_fib = fibptr;
204 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
205 }
206 
207 /**
208  *	aac_fib_init	-	initialise a fib
209  *	@fibptr: The fib to initialize
210  *
211  *	Set up the generic fib fields ready for use
212  */
213 
214 void aac_fib_init(struct fib *fibptr)
215 {
216 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
217 
218 	hw_fib->header.StructType = FIB_MAGIC;
219 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
220 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
221 	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
222 	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
223 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
224 }
225 
226 /**
227  *	fib_deallocate		-	deallocate a fib
228  *	@fibptr: fib to deallocate
229  *
230  *	Will deallocate and return to the free pool the FIB pointed to by the
231  *	caller.
232  */
233 
234 static void fib_dealloc(struct fib * fibptr)
235 {
236 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
237 	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
238 	hw_fib->header.XferState = 0;
239 }
240 
241 /*
242  *	Commuication primitives define and support the queuing method we use to
243  *	support host to adapter commuication. All queue accesses happen through
244  *	these routines and are the only routines which have a knowledge of the
245  *	 how these queues are implemented.
246  */
247 
248 /**
249  *	aac_get_entry		-	get a queue entry
250  *	@dev: Adapter
251  *	@qid: Queue Number
252  *	@entry: Entry return
253  *	@index: Index return
254  *	@nonotify: notification control
255  *
256  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
257  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
258  *	returned.
259  */
260 
261 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
262 {
263 	struct aac_queue * q;
264 	unsigned long idx;
265 
266 	/*
267 	 *	All of the queues wrap when they reach the end, so we check
268 	 *	to see if they have reached the end and if they have we just
269 	 *	set the index back to zero. This is a wrap. You could or off
270 	 *	the high bits in all updates but this is a bit faster I think.
271 	 */
272 
273 	q = &dev->queues->queue[qid];
274 
275 	idx = *index = le32_to_cpu(*(q->headers.producer));
276 	/* Interrupt Moderation, only interrupt for first two entries */
277 	if (idx != le32_to_cpu(*(q->headers.consumer))) {
278 		if (--idx == 0) {
279 			if (qid == AdapNormCmdQueue)
280 				idx = ADAP_NORM_CMD_ENTRIES;
281 			else
282 				idx = ADAP_NORM_RESP_ENTRIES;
283 		}
284 		if (idx != le32_to_cpu(*(q->headers.consumer)))
285 			*nonotify = 1;
286 	}
287 
288 	if (qid == AdapNormCmdQueue) {
289 		if (*index >= ADAP_NORM_CMD_ENTRIES)
290 			*index = 0; /* Wrap to front of the Producer Queue. */
291 	} else {
292 		if (*index >= ADAP_NORM_RESP_ENTRIES)
293 			*index = 0; /* Wrap to front of the Producer Queue. */
294 	}
295 
296 	/* Queue is full */
297 	if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
298 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
299 				qid, q->numpending);
300 		return 0;
301 	} else {
302 		*entry = q->base + *index;
303 		return 1;
304 	}
305 }
306 
307 /**
308  *	aac_queue_get		-	get the next free QE
309  *	@dev: Adapter
310  *	@index: Returned index
311  *	@priority: Priority of fib
312  *	@fib: Fib to associate with the queue entry
313  *	@wait: Wait if queue full
314  *	@fibptr: Driver fib object to go with fib
315  *	@nonotify: Don't notify the adapter
316  *
317  *	Gets the next free QE off the requested priorty adapter command
318  *	queue and associates the Fib with the QE. The QE represented by
319  *	index is ready to insert on the queue when this routine returns
320  *	success.
321  */
322 
323 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
324 {
325 	struct aac_entry * entry = NULL;
326 	int map = 0;
327 
328 	if (qid == AdapNormCmdQueue) {
329 		/*  if no entries wait for some if caller wants to */
330 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
331 			printk(KERN_ERR "GetEntries failed\n");
332 		}
333 		/*
334 		 *	Setup queue entry with a command, status and fib mapped
335 		 */
336 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
337 		map = 1;
338 	} else {
339 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
340 			/* if no entries wait for some if caller wants to */
341 		}
342 		/*
343 		 *	Setup queue entry with command, status and fib mapped
344 		 */
345 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
346 		entry->addr = hw_fib->header.SenderFibAddress;
347 			/* Restore adapters pointer to the FIB */
348 		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
349 		map = 0;
350 	}
351 	/*
352 	 *	If MapFib is true than we need to map the Fib and put pointers
353 	 *	in the queue entry.
354 	 */
355 	if (map)
356 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
357 	return 0;
358 }
359 
360 /*
361  *	Define the highest level of host to adapter communication routines.
362  *	These routines will support host to adapter FS commuication. These
363  *	routines have no knowledge of the commuication method used. This level
364  *	sends and receives FIBs. This level has no knowledge of how these FIBs
365  *	get passed back and forth.
366  */
367 
368 /**
369  *	aac_fib_send	-	send a fib to the adapter
370  *	@command: Command to send
371  *	@fibptr: The fib
372  *	@size: Size of fib data area
373  *	@priority: Priority of Fib
374  *	@wait: Async/sync select
375  *	@reply: True if a reply is wanted
376  *	@callback: Called with reply
377  *	@callback_data: Passed to callback
378  *
379  *	Sends the requested FIB to the adapter and optionally will wait for a
380  *	response FIB. If the caller does not wish to wait for a response than
381  *	an event to wait on must be supplied. This event will be set when a
382  *	response FIB is received from the adapter.
383  */
384 
385 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
386 		int priority, int wait, int reply, fib_callback callback,
387 		void *callback_data)
388 {
389 	struct aac_dev * dev = fibptr->dev;
390 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
391 	unsigned long flags = 0;
392 	unsigned long qflags;
393 
394 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
395 		return -EBUSY;
396 	/*
397 	 *	There are 5 cases with the wait and reponse requested flags.
398 	 *	The only invalid cases are if the caller requests to wait and
399 	 *	does not request a response and if the caller does not want a
400 	 *	response and the Fib is not allocated from pool. If a response
401 	 *	is not requesed the Fib will just be deallocaed by the DPC
402 	 *	routine when the response comes back from the adapter. No
403 	 *	further processing will be done besides deleting the Fib. We
404 	 *	will have a debug mode where the adapter can notify the host
405 	 *	it had a problem and the host can log that fact.
406 	 */
407 	fibptr->flags = 0;
408 	if (wait && !reply) {
409 		return -EINVAL;
410 	} else if (!wait && reply) {
411 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
412 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
413 	} else if (!wait && !reply) {
414 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
415 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
416 	} else if (wait && reply) {
417 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
418 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
419 	}
420 	/*
421 	 *	Map the fib into 32bits by using the fib number
422 	 */
423 
424 	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
425 	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
426 	/*
427 	 *	Set FIB state to indicate where it came from and if we want a
428 	 *	response from the adapter. Also load the command from the
429 	 *	caller.
430 	 *
431 	 *	Map the hw fib pointer as a 32bit value
432 	 */
433 	hw_fib->header.Command = cpu_to_le16(command);
434 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
435 	fibptr->hw_fib_va->header.Flags = 0;	/* 0 the flags field - internal only*/
436 	/*
437 	 *	Set the size of the Fib we want to send to the adapter
438 	 */
439 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
440 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
441 		return -EMSGSIZE;
442 	}
443 	/*
444 	 *	Get a queue entry connect the FIB to it and send an notify
445 	 *	the adapter a command is ready.
446 	 */
447 	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
448 
449 	/*
450 	 *	Fill in the Callback and CallbackContext if we are not
451 	 *	going to wait.
452 	 */
453 	if (!wait) {
454 		fibptr->callback = callback;
455 		fibptr->callback_data = callback_data;
456 		fibptr->flags = FIB_CONTEXT_FLAG;
457 	}
458 
459 	fibptr->done = 0;
460 
461 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
462 
463 	dprintk((KERN_DEBUG "Fib contents:.\n"));
464 	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
465 	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
466 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
467 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
468 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
469 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
470 
471 	if (!dev->queues)
472 		return -EBUSY;
473 
474 	if(wait)
475 		spin_lock_irqsave(&fibptr->event_lock, flags);
476 	aac_adapter_deliver(fibptr);
477 
478 	/*
479 	 *	If the caller wanted us to wait for response wait now.
480 	 */
481 
482 	if (wait) {
483 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
484 		/* Only set for first known interruptable command */
485 		if (wait < 0) {
486 			/*
487 			 * *VERY* Dangerous to time out a command, the
488 			 * assumption is made that we have no hope of
489 			 * functioning because an interrupt routing or other
490 			 * hardware failure has occurred.
491 			 */
492 			unsigned long count = 36000000L; /* 3 minutes */
493 			while (down_trylock(&fibptr->event_wait)) {
494 				int blink;
495 				if (--count == 0) {
496 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
497 					spin_lock_irqsave(q->lock, qflags);
498 					q->numpending--;
499 					spin_unlock_irqrestore(q->lock, qflags);
500 					if (wait == -1) {
501 	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
502 						  "Usually a result of a PCI interrupt routing problem;\n"
503 						  "update mother board BIOS or consider utilizing one of\n"
504 						  "the SAFE mode kernel options (acpi, apic etc)\n");
505 					}
506 					return -ETIMEDOUT;
507 				}
508 				if ((blink = aac_adapter_check_health(dev)) > 0) {
509 					if (wait == -1) {
510 	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
511 						  "Usually a result of a serious unrecoverable hardware problem\n",
512 						  blink);
513 					}
514 					return -EFAULT;
515 				}
516 				udelay(5);
517 			}
518 		} else
519 			(void)down_interruptible(&fibptr->event_wait);
520 		spin_lock_irqsave(&fibptr->event_lock, flags);
521 		if (fibptr->done == 0) {
522 			fibptr->done = 2; /* Tell interrupt we aborted */
523 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
524 			return -EINTR;
525 		}
526 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
527 		BUG_ON(fibptr->done == 0);
528 
529 		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
530 			return -ETIMEDOUT;
531 		return 0;
532 	}
533 	/*
534 	 *	If the user does not want a response than return success otherwise
535 	 *	return pending
536 	 */
537 	if (reply)
538 		return -EINPROGRESS;
539 	else
540 		return 0;
541 }
542 
543 /**
544  *	aac_consumer_get	-	get the top of the queue
545  *	@dev: Adapter
546  *	@q: Queue
547  *	@entry: Return entry
548  *
549  *	Will return a pointer to the entry on the top of the queue requested that
550  *	we are a consumer of, and return the address of the queue entry. It does
551  *	not change the state of the queue.
552  */
553 
554 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
555 {
556 	u32 index;
557 	int status;
558 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
559 		status = 0;
560 	} else {
561 		/*
562 		 *	The consumer index must be wrapped if we have reached
563 		 *	the end of the queue, else we just use the entry
564 		 *	pointed to by the header index
565 		 */
566 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
567 			index = 0;
568 		else
569 			index = le32_to_cpu(*q->headers.consumer);
570 		*entry = q->base + index;
571 		status = 1;
572 	}
573 	return(status);
574 }
575 
576 /**
577  *	aac_consumer_free	-	free consumer entry
578  *	@dev: Adapter
579  *	@q: Queue
580  *	@qid: Queue ident
581  *
582  *	Frees up the current top of the queue we are a consumer of. If the
583  *	queue was full notify the producer that the queue is no longer full.
584  */
585 
586 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
587 {
588 	int wasfull = 0;
589 	u32 notify;
590 
591 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
592 		wasfull = 1;
593 
594 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
595 		*q->headers.consumer = cpu_to_le32(1);
596 	else
597 		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
598 
599 	if (wasfull) {
600 		switch (qid) {
601 
602 		case HostNormCmdQueue:
603 			notify = HostNormCmdNotFull;
604 			break;
605 		case HostNormRespQueue:
606 			notify = HostNormRespNotFull;
607 			break;
608 		default:
609 			BUG();
610 			return;
611 		}
612 		aac_adapter_notify(dev, notify);
613 	}
614 }
615 
616 /**
617  *	aac_fib_adapter_complete	-	complete adapter issued fib
618  *	@fibptr: fib to complete
619  *	@size: size of fib
620  *
621  *	Will do all necessary work to complete a FIB that was sent from
622  *	the adapter.
623  */
624 
625 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
626 {
627 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
628 	struct aac_dev * dev = fibptr->dev;
629 	struct aac_queue * q;
630 	unsigned long nointr = 0;
631 	unsigned long qflags;
632 
633 	if (hw_fib->header.XferState == 0) {
634 		if (dev->comm_interface == AAC_COMM_MESSAGE)
635 			kfree (hw_fib);
636 		return 0;
637 	}
638 	/*
639 	 *	If we plan to do anything check the structure type first.
640 	 */
641 	if (hw_fib->header.StructType != FIB_MAGIC) {
642 		if (dev->comm_interface == AAC_COMM_MESSAGE)
643 			kfree (hw_fib);
644 		return -EINVAL;
645 	}
646 	/*
647 	 *	This block handles the case where the adapter had sent us a
648 	 *	command and we have finished processing the command. We
649 	 *	call completeFib when we are done processing the command
650 	 *	and want to send a response back to the adapter. This will
651 	 *	send the completed cdb to the adapter.
652 	 */
653 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
654 		if (dev->comm_interface == AAC_COMM_MESSAGE) {
655 			kfree (hw_fib);
656 		} else {
657 			u32 index;
658 			hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
659 			if (size) {
660 				size += sizeof(struct aac_fibhdr);
661 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
662 					return -EMSGSIZE;
663 				hw_fib->header.Size = cpu_to_le16(size);
664 			}
665 			q = &dev->queues->queue[AdapNormRespQueue];
666 			spin_lock_irqsave(q->lock, qflags);
667 			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
668 			*(q->headers.producer) = cpu_to_le32(index + 1);
669 			spin_unlock_irqrestore(q->lock, qflags);
670 			if (!(nointr & (int)aac_config.irq_mod))
671 				aac_adapter_notify(dev, AdapNormRespQueue);
672 		}
673 	} else {
674 		printk(KERN_WARNING "aac_fib_adapter_complete: "
675 			"Unknown xferstate detected.\n");
676 		BUG();
677 	}
678 	return 0;
679 }
680 
681 /**
682  *	aac_fib_complete	-	fib completion handler
683  *	@fib: FIB to complete
684  *
685  *	Will do all necessary work to complete a FIB.
686  */
687 
688 int aac_fib_complete(struct fib *fibptr)
689 {
690 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
691 
692 	/*
693 	 *	Check for a fib which has already been completed
694 	 */
695 
696 	if (hw_fib->header.XferState == 0)
697 		return 0;
698 	/*
699 	 *	If we plan to do anything check the structure type first.
700 	 */
701 
702 	if (hw_fib->header.StructType != FIB_MAGIC)
703 		return -EINVAL;
704 	/*
705 	 *	This block completes a cdb which orginated on the host and we
706 	 *	just need to deallocate the cdb or reinit it. At this point the
707 	 *	command is complete that we had sent to the adapter and this
708 	 *	cdb could be reused.
709 	 */
710 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
711 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
712 	{
713 		fib_dealloc(fibptr);
714 	}
715 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
716 	{
717 		/*
718 		 *	This handles the case when the host has aborted the I/O
719 		 *	to the adapter because the adapter is not responding
720 		 */
721 		fib_dealloc(fibptr);
722 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
723 		fib_dealloc(fibptr);
724 	} else {
725 		BUG();
726 	}
727 	return 0;
728 }
729 
730 /**
731  *	aac_printf	-	handle printf from firmware
732  *	@dev: Adapter
733  *	@val: Message info
734  *
735  *	Print a message passed to us by the controller firmware on the
736  *	Adaptec board
737  */
738 
739 void aac_printf(struct aac_dev *dev, u32 val)
740 {
741 	char *cp = dev->printfbuf;
742 	if (dev->printf_enabled)
743 	{
744 		int length = val & 0xffff;
745 		int level = (val >> 16) & 0xffff;
746 
747 		/*
748 		 *	The size of the printfbuf is set in port.c
749 		 *	There is no variable or define for it
750 		 */
751 		if (length > 255)
752 			length = 255;
753 		if (cp[length] != 0)
754 			cp[length] = 0;
755 		if (level == LOG_AAC_HIGH_ERROR)
756 			printk(KERN_WARNING "%s:%s", dev->name, cp);
757 		else
758 			printk(KERN_INFO "%s:%s", dev->name, cp);
759 	}
760 	memset(cp, 0, 256);
761 }
762 
763 
764 /**
765  *	aac_handle_aif		-	Handle a message from the firmware
766  *	@dev: Which adapter this fib is from
767  *	@fibptr: Pointer to fibptr from adapter
768  *
769  *	This routine handles a driver notify fib from the adapter and
770  *	dispatches it to the appropriate routine for handling.
771  */
772 
773 #define AIF_SNIFF_TIMEOUT	(30*HZ)
774 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
775 {
776 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
777 	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
778 	u32 channel, id, lun, container;
779 	struct scsi_device *device;
780 	enum {
781 		NOTHING,
782 		DELETE,
783 		ADD,
784 		CHANGE
785 	} device_config_needed = NOTHING;
786 
787 	/* Sniff for container changes */
788 
789 	if (!dev || !dev->fsa_dev)
790 		return;
791 	container = channel = id = lun = (u32)-1;
792 
793 	/*
794 	 *	We have set this up to try and minimize the number of
795 	 * re-configures that take place. As a result of this when
796 	 * certain AIF's come in we will set a flag waiting for another
797 	 * type of AIF before setting the re-config flag.
798 	 */
799 	switch (le32_to_cpu(aifcmd->command)) {
800 	case AifCmdDriverNotify:
801 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
802 		/*
803 		 *	Morph or Expand complete
804 		 */
805 		case AifDenMorphComplete:
806 		case AifDenVolumeExtendComplete:
807 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
808 			if (container >= dev->maximum_num_containers)
809 				break;
810 
811 			/*
812 			 *	Find the scsi_device associated with the SCSI
813 			 * address. Make sure we have the right array, and if
814 			 * so set the flag to initiate a new re-config once we
815 			 * see an AifEnConfigChange AIF come through.
816 			 */
817 
818 			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
819 				device = scsi_device_lookup(dev->scsi_host_ptr,
820 					CONTAINER_TO_CHANNEL(container),
821 					CONTAINER_TO_ID(container),
822 					CONTAINER_TO_LUN(container));
823 				if (device) {
824 					dev->fsa_dev[container].config_needed = CHANGE;
825 					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
826 					dev->fsa_dev[container].config_waiting_stamp = jiffies;
827 					scsi_device_put(device);
828 				}
829 			}
830 		}
831 
832 		/*
833 		 *	If we are waiting on something and this happens to be
834 		 * that thing then set the re-configure flag.
835 		 */
836 		if (container != (u32)-1) {
837 			if (container >= dev->maximum_num_containers)
838 				break;
839 			if ((dev->fsa_dev[container].config_waiting_on ==
840 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
841 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
842 				dev->fsa_dev[container].config_waiting_on = 0;
843 		} else for (container = 0;
844 		    container < dev->maximum_num_containers; ++container) {
845 			if ((dev->fsa_dev[container].config_waiting_on ==
846 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
847 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
848 				dev->fsa_dev[container].config_waiting_on = 0;
849 		}
850 		break;
851 
852 	case AifCmdEventNotify:
853 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
854 		case AifEnBatteryEvent:
855 			dev->cache_protected =
856 				(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
857 			break;
858 		/*
859 		 *	Add an Array.
860 		 */
861 		case AifEnAddContainer:
862 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
863 			if (container >= dev->maximum_num_containers)
864 				break;
865 			dev->fsa_dev[container].config_needed = ADD;
866 			dev->fsa_dev[container].config_waiting_on =
867 				AifEnConfigChange;
868 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
869 			break;
870 
871 		/*
872 		 *	Delete an Array.
873 		 */
874 		case AifEnDeleteContainer:
875 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
876 			if (container >= dev->maximum_num_containers)
877 				break;
878 			dev->fsa_dev[container].config_needed = DELETE;
879 			dev->fsa_dev[container].config_waiting_on =
880 				AifEnConfigChange;
881 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
882 			break;
883 
884 		/*
885 		 *	Container change detected. If we currently are not
886 		 * waiting on something else, setup to wait on a Config Change.
887 		 */
888 		case AifEnContainerChange:
889 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
890 			if (container >= dev->maximum_num_containers)
891 				break;
892 			if (dev->fsa_dev[container].config_waiting_on &&
893 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
894 				break;
895 			dev->fsa_dev[container].config_needed = CHANGE;
896 			dev->fsa_dev[container].config_waiting_on =
897 				AifEnConfigChange;
898 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
899 			break;
900 
901 		case AifEnConfigChange:
902 			break;
903 
904 		case AifEnAddJBOD:
905 		case AifEnDeleteJBOD:
906 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
907 			if ((container >> 28))
908 				break;
909 			channel = (container >> 24) & 0xF;
910 			if (channel >= dev->maximum_num_channels)
911 				break;
912 			id = container & 0xFFFF;
913 			if (id >= dev->maximum_num_physicals)
914 				break;
915 			lun = (container >> 16) & 0xFF;
916 			channel = aac_phys_to_logical(channel);
917 			device_config_needed =
918 			  (((__le32 *)aifcmd->data)[0] ==
919 			    cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
920 			break;
921 
922 		case AifEnEnclosureManagement:
923 			/*
924 			 * If in JBOD mode, automatic exposure of new
925 			 * physical target to be suppressed until configured.
926 			 */
927 			if (dev->jbod)
928 				break;
929 			switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
930 			case EM_DRIVE_INSERTION:
931 			case EM_DRIVE_REMOVAL:
932 				container = le32_to_cpu(
933 					((__le32 *)aifcmd->data)[2]);
934 				if ((container >> 28))
935 					break;
936 				channel = (container >> 24) & 0xF;
937 				if (channel >= dev->maximum_num_channels)
938 					break;
939 				id = container & 0xFFFF;
940 				lun = (container >> 16) & 0xFF;
941 				if (id >= dev->maximum_num_physicals) {
942 					/* legacy dev_t ? */
943 					if ((0x2000 <= id) || lun || channel ||
944 					  ((channel = (id >> 7) & 0x3F) >=
945 					  dev->maximum_num_channels))
946 						break;
947 					lun = (id >> 4) & 7;
948 					id &= 0xF;
949 				}
950 				channel = aac_phys_to_logical(channel);
951 				device_config_needed =
952 				  (((__le32 *)aifcmd->data)[3]
953 				    == cpu_to_le32(EM_DRIVE_INSERTION)) ?
954 				  ADD : DELETE;
955 				break;
956 			}
957 			break;
958 		}
959 
960 		/*
961 		 *	If we are waiting on something and this happens to be
962 		 * that thing then set the re-configure flag.
963 		 */
964 		if (container != (u32)-1) {
965 			if (container >= dev->maximum_num_containers)
966 				break;
967 			if ((dev->fsa_dev[container].config_waiting_on ==
968 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
969 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
970 				dev->fsa_dev[container].config_waiting_on = 0;
971 		} else for (container = 0;
972 		    container < dev->maximum_num_containers; ++container) {
973 			if ((dev->fsa_dev[container].config_waiting_on ==
974 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
975 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
976 				dev->fsa_dev[container].config_waiting_on = 0;
977 		}
978 		break;
979 
980 	case AifCmdJobProgress:
981 		/*
982 		 *	These are job progress AIF's. When a Clear is being
983 		 * done on a container it is initially created then hidden from
984 		 * the OS. When the clear completes we don't get a config
985 		 * change so we monitor the job status complete on a clear then
986 		 * wait for a container change.
987 		 */
988 
989 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
990 		    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
991 		     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
992 			for (container = 0;
993 			    container < dev->maximum_num_containers;
994 			    ++container) {
995 				/*
996 				 * Stomp on all config sequencing for all
997 				 * containers?
998 				 */
999 				dev->fsa_dev[container].config_waiting_on =
1000 					AifEnContainerChange;
1001 				dev->fsa_dev[container].config_needed = ADD;
1002 				dev->fsa_dev[container].config_waiting_stamp =
1003 					jiffies;
1004 			}
1005 		}
1006 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1007 		    ((__le32 *)aifcmd->data)[6] == 0 &&
1008 		    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1009 			for (container = 0;
1010 			    container < dev->maximum_num_containers;
1011 			    ++container) {
1012 				/*
1013 				 * Stomp on all config sequencing for all
1014 				 * containers?
1015 				 */
1016 				dev->fsa_dev[container].config_waiting_on =
1017 					AifEnContainerChange;
1018 				dev->fsa_dev[container].config_needed = DELETE;
1019 				dev->fsa_dev[container].config_waiting_stamp =
1020 					jiffies;
1021 			}
1022 		}
1023 		break;
1024 	}
1025 
1026 	if (device_config_needed == NOTHING)
1027 	for (container = 0; container < dev->maximum_num_containers;
1028 	    ++container) {
1029 		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1030 			(dev->fsa_dev[container].config_needed != NOTHING) &&
1031 			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1032 			device_config_needed =
1033 				dev->fsa_dev[container].config_needed;
1034 			dev->fsa_dev[container].config_needed = NOTHING;
1035 			channel = CONTAINER_TO_CHANNEL(container);
1036 			id = CONTAINER_TO_ID(container);
1037 			lun = CONTAINER_TO_LUN(container);
1038 			break;
1039 		}
1040 	}
1041 	if (device_config_needed == NOTHING)
1042 		return;
1043 
1044 	/*
1045 	 *	If we decided that a re-configuration needs to be done,
1046 	 * schedule it here on the way out the door, please close the door
1047 	 * behind you.
1048 	 */
1049 
1050 	/*
1051 	 *	Find the scsi_device associated with the SCSI address,
1052 	 * and mark it as changed, invalidating the cache. This deals
1053 	 * with changes to existing device IDs.
1054 	 */
1055 
1056 	if (!dev || !dev->scsi_host_ptr)
1057 		return;
1058 	/*
1059 	 * force reload of disk info via aac_probe_container
1060 	 */
1061 	if ((channel == CONTAINER_CHANNEL) &&
1062 	  (device_config_needed != NOTHING)) {
1063 		if (dev->fsa_dev[container].valid == 1)
1064 			dev->fsa_dev[container].valid = 2;
1065 		aac_probe_container(dev, container);
1066 	}
1067 	device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1068 	if (device) {
1069 		switch (device_config_needed) {
1070 		case DELETE:
1071 			if (scsi_device_online(device)) {
1072 				scsi_device_set_state(device, SDEV_OFFLINE);
1073 				sdev_printk(KERN_INFO, device,
1074 					"Device offlined - %s\n",
1075 					(channel == CONTAINER_CHANNEL) ?
1076 						"array deleted" :
1077 						"enclosure services event");
1078 			}
1079 			break;
1080 		case ADD:
1081 			if (!scsi_device_online(device)) {
1082 				sdev_printk(KERN_INFO, device,
1083 					"Device online - %s\n",
1084 					(channel == CONTAINER_CHANNEL) ?
1085 						"array created" :
1086 						"enclosure services event");
1087 				scsi_device_set_state(device, SDEV_RUNNING);
1088 			}
1089 			/* FALLTHRU */
1090 		case CHANGE:
1091 			if ((channel == CONTAINER_CHANNEL)
1092 			 && (!dev->fsa_dev[container].valid)) {
1093 				if (!scsi_device_online(device))
1094 					break;
1095 				scsi_device_set_state(device, SDEV_OFFLINE);
1096 				sdev_printk(KERN_INFO, device,
1097 					"Device offlined - %s\n",
1098 					"array failed");
1099 				break;
1100 			}
1101 			scsi_rescan_device(&device->sdev_gendev);
1102 
1103 		default:
1104 			break;
1105 		}
1106 		scsi_device_put(device);
1107 		device_config_needed = NOTHING;
1108 	}
1109 	if (device_config_needed == ADD)
1110 		scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1111 }
1112 
1113 static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1114 {
1115 	int index, quirks;
1116 	int retval;
1117 	struct Scsi_Host *host;
1118 	struct scsi_device *dev;
1119 	struct scsi_cmnd *command;
1120 	struct scsi_cmnd *command_list;
1121 	int jafo = 0;
1122 
1123 	/*
1124 	 * Assumptions:
1125 	 *	- host is locked, unless called by the aacraid thread.
1126 	 *	  (a matter of convenience, due to legacy issues surrounding
1127 	 *	  eh_host_adapter_reset).
1128 	 *	- in_reset is asserted, so no new i/o is getting to the
1129 	 *	  card.
1130 	 *	- The card is dead, or will be very shortly ;-/ so no new
1131 	 *	  commands are completing in the interrupt service.
1132 	 */
1133 	host = aac->scsi_host_ptr;
1134 	scsi_block_requests(host);
1135 	aac_adapter_disable_int(aac);
1136 	if (aac->thread->pid != current->pid) {
1137 		spin_unlock_irq(host->host_lock);
1138 		kthread_stop(aac->thread);
1139 		jafo = 1;
1140 	}
1141 
1142 	/*
1143 	 *	If a positive health, means in a known DEAD PANIC
1144 	 * state and the adapter could be reset to `try again'.
1145 	 */
1146 	retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1147 
1148 	if (retval)
1149 		goto out;
1150 
1151 	/*
1152 	 *	Loop through the fibs, close the synchronous FIBS
1153 	 */
1154 	for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1155 		struct fib *fib = &aac->fibs[index];
1156 		if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1157 		  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1158 			unsigned long flagv;
1159 			spin_lock_irqsave(&fib->event_lock, flagv);
1160 			up(&fib->event_wait);
1161 			spin_unlock_irqrestore(&fib->event_lock, flagv);
1162 			schedule();
1163 			retval = 0;
1164 		}
1165 	}
1166 	/* Give some extra time for ioctls to complete. */
1167 	if (retval == 0)
1168 		ssleep(2);
1169 	index = aac->cardtype;
1170 
1171 	/*
1172 	 * Re-initialize the adapter, first free resources, then carefully
1173 	 * apply the initialization sequence to come back again. Only risk
1174 	 * is a change in Firmware dropping cache, it is assumed the caller
1175 	 * will ensure that i/o is queisced and the card is flushed in that
1176 	 * case.
1177 	 */
1178 	aac_fib_map_free(aac);
1179 	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1180 	aac->comm_addr = NULL;
1181 	aac->comm_phys = 0;
1182 	kfree(aac->queues);
1183 	aac->queues = NULL;
1184 	free_irq(aac->pdev->irq, aac);
1185 	kfree(aac->fsa_dev);
1186 	aac->fsa_dev = NULL;
1187 	quirks = aac_get_driver_ident(index)->quirks;
1188 	if (quirks & AAC_QUIRK_31BIT) {
1189 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
1190 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
1191 			goto out;
1192 	} else {
1193 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1194 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1195 			goto out;
1196 	}
1197 	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1198 		goto out;
1199 	if (quirks & AAC_QUIRK_31BIT)
1200 		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1201 			goto out;
1202 	if (jafo) {
1203 		aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1204 		if (IS_ERR(aac->thread)) {
1205 			retval = PTR_ERR(aac->thread);
1206 			goto out;
1207 		}
1208 	}
1209 	(void)aac_get_adapter_info(aac);
1210 	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1211 		host->sg_tablesize = 34;
1212 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1213 	}
1214 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1215 		host->sg_tablesize = 17;
1216 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1217 	}
1218 	aac_get_config_status(aac, 1);
1219 	aac_get_containers(aac);
1220 	/*
1221 	 * This is where the assumption that the Adapter is quiesced
1222 	 * is important.
1223 	 */
1224 	command_list = NULL;
1225 	__shost_for_each_device(dev, host) {
1226 		unsigned long flags;
1227 		spin_lock_irqsave(&dev->list_lock, flags);
1228 		list_for_each_entry(command, &dev->cmd_list, list)
1229 			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1230 				command->SCp.buffer = (struct scatterlist *)command_list;
1231 				command_list = command;
1232 			}
1233 		spin_unlock_irqrestore(&dev->list_lock, flags);
1234 	}
1235 	while ((command = command_list)) {
1236 		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1237 		command->SCp.buffer = NULL;
1238 		command->result = DID_OK << 16
1239 		  | COMMAND_COMPLETE << 8
1240 		  | SAM_STAT_TASK_SET_FULL;
1241 		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1242 		command->scsi_done(command);
1243 	}
1244 	retval = 0;
1245 
1246 out:
1247 	aac->in_reset = 0;
1248 	scsi_unblock_requests(host);
1249 	if (jafo) {
1250 		spin_lock_irq(host->host_lock);
1251 	}
1252 	return retval;
1253 }
1254 
1255 int aac_reset_adapter(struct aac_dev * aac, int forced)
1256 {
1257 	unsigned long flagv = 0;
1258 	int retval;
1259 	struct Scsi_Host * host;
1260 
1261 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1262 		return -EBUSY;
1263 
1264 	if (aac->in_reset) {
1265 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1266 		return -EBUSY;
1267 	}
1268 	aac->in_reset = 1;
1269 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1270 
1271 	/*
1272 	 * Wait for all commands to complete to this specific
1273 	 * target (block maximum 60 seconds). Although not necessary,
1274 	 * it does make us a good storage citizen.
1275 	 */
1276 	host = aac->scsi_host_ptr;
1277 	scsi_block_requests(host);
1278 	if (forced < 2) for (retval = 60; retval; --retval) {
1279 		struct scsi_device * dev;
1280 		struct scsi_cmnd * command;
1281 		int active = 0;
1282 
1283 		__shost_for_each_device(dev, host) {
1284 			spin_lock_irqsave(&dev->list_lock, flagv);
1285 			list_for_each_entry(command, &dev->cmd_list, list) {
1286 				if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1287 					active++;
1288 					break;
1289 				}
1290 			}
1291 			spin_unlock_irqrestore(&dev->list_lock, flagv);
1292 			if (active)
1293 				break;
1294 
1295 		}
1296 		/*
1297 		 * We can exit If all the commands are complete
1298 		 */
1299 		if (active == 0)
1300 			break;
1301 		ssleep(1);
1302 	}
1303 
1304 	/* Quiesce build, flush cache, write through mode */
1305 	if (forced < 2)
1306 		aac_send_shutdown(aac);
1307 	spin_lock_irqsave(host->host_lock, flagv);
1308 	retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
1309 	spin_unlock_irqrestore(host->host_lock, flagv);
1310 
1311 	if ((forced < 2) && (retval == -ENODEV)) {
1312 		/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1313 		struct fib * fibctx = aac_fib_alloc(aac);
1314 		if (fibctx) {
1315 			struct aac_pause *cmd;
1316 			int status;
1317 
1318 			aac_fib_init(fibctx);
1319 
1320 			cmd = (struct aac_pause *) fib_data(fibctx);
1321 
1322 			cmd->command = cpu_to_le32(VM_ContainerConfig);
1323 			cmd->type = cpu_to_le32(CT_PAUSE_IO);
1324 			cmd->timeout = cpu_to_le32(1);
1325 			cmd->min = cpu_to_le32(1);
1326 			cmd->noRescan = cpu_to_le32(1);
1327 			cmd->count = cpu_to_le32(0);
1328 
1329 			status = aac_fib_send(ContainerCommand,
1330 			  fibctx,
1331 			  sizeof(struct aac_pause),
1332 			  FsaNormal,
1333 			  -2 /* Timeout silently */, 1,
1334 			  NULL, NULL);
1335 
1336 			if (status >= 0)
1337 				aac_fib_complete(fibctx);
1338 			aac_fib_free(fibctx);
1339 		}
1340 	}
1341 
1342 	return retval;
1343 }
1344 
1345 int aac_check_health(struct aac_dev * aac)
1346 {
1347 	int BlinkLED;
1348 	unsigned long time_now, flagv = 0;
1349 	struct list_head * entry;
1350 	struct Scsi_Host * host;
1351 
1352 	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1353 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1354 		return 0;
1355 
1356 	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1357 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1358 		return 0; /* OK */
1359 	}
1360 
1361 	aac->in_reset = 1;
1362 
1363 	/* Fake up an AIF:
1364 	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1365 	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1366 	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1367 	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1368 	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1369 	 *	aac.aifcmd.data[3] = BlinkLED
1370 	 */
1371 
1372 	time_now = jiffies/HZ;
1373 	entry = aac->fib_list.next;
1374 
1375 	/*
1376 	 * For each Context that is on the
1377 	 * fibctxList, make a copy of the
1378 	 * fib, and then set the event to wake up the
1379 	 * thread that is waiting for it.
1380 	 */
1381 	while (entry != &aac->fib_list) {
1382 		/*
1383 		 * Extract the fibctx
1384 		 */
1385 		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1386 		struct hw_fib * hw_fib;
1387 		struct fib * fib;
1388 		/*
1389 		 * Check if the queue is getting
1390 		 * backlogged
1391 		 */
1392 		if (fibctx->count > 20) {
1393 			/*
1394 			 * It's *not* jiffies folks,
1395 			 * but jiffies / HZ, so do not
1396 			 * panic ...
1397 			 */
1398 			u32 time_last = fibctx->jiffies;
1399 			/*
1400 			 * Has it been > 2 minutes
1401 			 * since the last read off
1402 			 * the queue?
1403 			 */
1404 			if ((time_now - time_last) > aif_timeout) {
1405 				entry = entry->next;
1406 				aac_close_fib_context(aac, fibctx);
1407 				continue;
1408 			}
1409 		}
1410 		/*
1411 		 * Warning: no sleep allowed while
1412 		 * holding spinlock
1413 		 */
1414 		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1415 		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1416 		if (fib && hw_fib) {
1417 			struct aac_aifcmd * aif;
1418 
1419 			fib->hw_fib_va = hw_fib;
1420 			fib->dev = aac;
1421 			aac_fib_init(fib);
1422 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1423 			fib->size = sizeof (struct fib);
1424 			fib->data = hw_fib->data;
1425 			aif = (struct aac_aifcmd *)hw_fib->data;
1426 			aif->command = cpu_to_le32(AifCmdEventNotify);
1427 			aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1428 			((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1429 			((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1430 			((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1431 			((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1432 
1433 			/*
1434 			 * Put the FIB onto the
1435 			 * fibctx's fibs
1436 			 */
1437 			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1438 			fibctx->count++;
1439 			/*
1440 			 * Set the event to wake up the
1441 			 * thread that will waiting.
1442 			 */
1443 			up(&fibctx->wait_sem);
1444 		} else {
1445 			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1446 			kfree(fib);
1447 			kfree(hw_fib);
1448 		}
1449 		entry = entry->next;
1450 	}
1451 
1452 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1453 
1454 	if (BlinkLED < 0) {
1455 		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1456 		goto out;
1457 	}
1458 
1459 	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1460 
1461 	if (!aac_check_reset || ((aac_check_reset != 1) &&
1462 		(aac->supplement_adapter_info.SupportedOptions2 &
1463 			AAC_OPTION_IGNORE_RESET)))
1464 		goto out;
1465 	host = aac->scsi_host_ptr;
1466 	if (aac->thread->pid != current->pid)
1467 		spin_lock_irqsave(host->host_lock, flagv);
1468 	BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
1469 	if (aac->thread->pid != current->pid)
1470 		spin_unlock_irqrestore(host->host_lock, flagv);
1471 	return BlinkLED;
1472 
1473 out:
1474 	aac->in_reset = 0;
1475 	return BlinkLED;
1476 }
1477 
1478 
1479 /**
1480  *	aac_command_thread	-	command processing thread
1481  *	@dev: Adapter to monitor
1482  *
1483  *	Waits on the commandready event in it's queue. When the event gets set
1484  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1485  *	until the queue is empty. When the queue is empty it will wait for
1486  *	more FIBs.
1487  */
1488 
1489 int aac_command_thread(void *data)
1490 {
1491 	struct aac_dev *dev = data;
1492 	struct hw_fib *hw_fib, *hw_newfib;
1493 	struct fib *fib, *newfib;
1494 	struct aac_fib_context *fibctx;
1495 	unsigned long flags;
1496 	DECLARE_WAITQUEUE(wait, current);
1497 	unsigned long next_jiffies = jiffies + HZ;
1498 	unsigned long next_check_jiffies = next_jiffies;
1499 	long difference = HZ;
1500 
1501 	/*
1502 	 *	We can only have one thread per adapter for AIF's.
1503 	 */
1504 	if (dev->aif_thread)
1505 		return -EINVAL;
1506 
1507 	/*
1508 	 *	Let the DPC know it has a place to send the AIF's to.
1509 	 */
1510 	dev->aif_thread = 1;
1511 	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1512 	set_current_state(TASK_INTERRUPTIBLE);
1513 	dprintk ((KERN_INFO "aac_command_thread start\n"));
1514 	while (1) {
1515 		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1516 		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1517 			struct list_head *entry;
1518 			struct aac_aifcmd * aifcmd;
1519 
1520 			set_current_state(TASK_RUNNING);
1521 
1522 			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1523 			list_del(entry);
1524 
1525 			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1526 			fib = list_entry(entry, struct fib, fiblink);
1527 			/*
1528 			 *	We will process the FIB here or pass it to a
1529 			 *	worker thread that is TBD. We Really can't
1530 			 *	do anything at this point since we don't have
1531 			 *	anything defined for this thread to do.
1532 			 */
1533 			hw_fib = fib->hw_fib_va;
1534 			memset(fib, 0, sizeof(struct fib));
1535 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1536 			fib->size = sizeof(struct fib);
1537 			fib->hw_fib_va = hw_fib;
1538 			fib->data = hw_fib->data;
1539 			fib->dev = dev;
1540 			/*
1541 			 *	We only handle AifRequest fibs from the adapter.
1542 			 */
1543 			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1544 			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1545 				/* Handle Driver Notify Events */
1546 				aac_handle_aif(dev, fib);
1547 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1548 				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1549 			} else {
1550 				/* The u32 here is important and intended. We are using
1551 				   32bit wrapping time to fit the adapter field */
1552 
1553 				u32 time_now, time_last;
1554 				unsigned long flagv;
1555 				unsigned num;
1556 				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1557 				struct fib ** fib_pool, ** fib_p;
1558 
1559 				/* Sniff events */
1560 				if ((aifcmd->command ==
1561 				     cpu_to_le32(AifCmdEventNotify)) ||
1562 				    (aifcmd->command ==
1563 				     cpu_to_le32(AifCmdJobProgress))) {
1564 					aac_handle_aif(dev, fib);
1565 				}
1566 
1567 				time_now = jiffies/HZ;
1568 
1569 				/*
1570 				 * Warning: no sleep allowed while
1571 				 * holding spinlock. We take the estimate
1572 				 * and pre-allocate a set of fibs outside the
1573 				 * lock.
1574 				 */
1575 				num = le32_to_cpu(dev->init->AdapterFibsSize)
1576 				    / sizeof(struct hw_fib); /* some extra */
1577 				spin_lock_irqsave(&dev->fib_lock, flagv);
1578 				entry = dev->fib_list.next;
1579 				while (entry != &dev->fib_list) {
1580 					entry = entry->next;
1581 					++num;
1582 				}
1583 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1584 				hw_fib_pool = NULL;
1585 				fib_pool = NULL;
1586 				if (num
1587 				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1588 				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1589 					hw_fib_p = hw_fib_pool;
1590 					fib_p = fib_pool;
1591 					while (hw_fib_p < &hw_fib_pool[num]) {
1592 						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1593 							--hw_fib_p;
1594 							break;
1595 						}
1596 						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1597 							kfree(*(--hw_fib_p));
1598 							break;
1599 						}
1600 					}
1601 					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1602 						kfree(fib_pool);
1603 						fib_pool = NULL;
1604 						kfree(hw_fib_pool);
1605 						hw_fib_pool = NULL;
1606 					}
1607 				} else {
1608 					kfree(hw_fib_pool);
1609 					hw_fib_pool = NULL;
1610 				}
1611 				spin_lock_irqsave(&dev->fib_lock, flagv);
1612 				entry = dev->fib_list.next;
1613 				/*
1614 				 * For each Context that is on the
1615 				 * fibctxList, make a copy of the
1616 				 * fib, and then set the event to wake up the
1617 				 * thread that is waiting for it.
1618 				 */
1619 				hw_fib_p = hw_fib_pool;
1620 				fib_p = fib_pool;
1621 				while (entry != &dev->fib_list) {
1622 					/*
1623 					 * Extract the fibctx
1624 					 */
1625 					fibctx = list_entry(entry, struct aac_fib_context, next);
1626 					/*
1627 					 * Check if the queue is getting
1628 					 * backlogged
1629 					 */
1630 					if (fibctx->count > 20)
1631 					{
1632 						/*
1633 						 * It's *not* jiffies folks,
1634 						 * but jiffies / HZ so do not
1635 						 * panic ...
1636 						 */
1637 						time_last = fibctx->jiffies;
1638 						/*
1639 						 * Has it been > 2 minutes
1640 						 * since the last read off
1641 						 * the queue?
1642 						 */
1643 						if ((time_now - time_last) > aif_timeout) {
1644 							entry = entry->next;
1645 							aac_close_fib_context(dev, fibctx);
1646 							continue;
1647 						}
1648 					}
1649 					/*
1650 					 * Warning: no sleep allowed while
1651 					 * holding spinlock
1652 					 */
1653 					if (hw_fib_p < &hw_fib_pool[num]) {
1654 						hw_newfib = *hw_fib_p;
1655 						*(hw_fib_p++) = NULL;
1656 						newfib = *fib_p;
1657 						*(fib_p++) = NULL;
1658 						/*
1659 						 * Make the copy of the FIB
1660 						 */
1661 						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1662 						memcpy(newfib, fib, sizeof(struct fib));
1663 						newfib->hw_fib_va = hw_newfib;
1664 						/*
1665 						 * Put the FIB onto the
1666 						 * fibctx's fibs
1667 						 */
1668 						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1669 						fibctx->count++;
1670 						/*
1671 						 * Set the event to wake up the
1672 						 * thread that is waiting.
1673 						 */
1674 						up(&fibctx->wait_sem);
1675 					} else {
1676 						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1677 					}
1678 					entry = entry->next;
1679 				}
1680 				/*
1681 				 *	Set the status of this FIB
1682 				 */
1683 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1684 				aac_fib_adapter_complete(fib, sizeof(u32));
1685 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1686 				/* Free up the remaining resources */
1687 				hw_fib_p = hw_fib_pool;
1688 				fib_p = fib_pool;
1689 				while (hw_fib_p < &hw_fib_pool[num]) {
1690 					kfree(*hw_fib_p);
1691 					kfree(*fib_p);
1692 					++fib_p;
1693 					++hw_fib_p;
1694 				}
1695 				kfree(hw_fib_pool);
1696 				kfree(fib_pool);
1697 			}
1698 			kfree(fib);
1699 			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1700 		}
1701 		/*
1702 		 *	There are no more AIF's
1703 		 */
1704 		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1705 
1706 		/*
1707 		 *	Background activity
1708 		 */
1709 		if ((time_before(next_check_jiffies,next_jiffies))
1710 		 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1711 			next_check_jiffies = next_jiffies;
1712 			if (aac_check_health(dev) == 0) {
1713 				difference = ((long)(unsigned)check_interval)
1714 					   * HZ;
1715 				next_check_jiffies = jiffies + difference;
1716 			} else if (!dev->queues)
1717 				break;
1718 		}
1719 		if (!time_before(next_check_jiffies,next_jiffies)
1720 		 && ((difference = next_jiffies - jiffies) <= 0)) {
1721 			struct timeval now;
1722 			int ret;
1723 
1724 			/* Don't even try to talk to adapter if its sick */
1725 			ret = aac_check_health(dev);
1726 			if (!ret && !dev->queues)
1727 				break;
1728 			next_check_jiffies = jiffies
1729 					   + ((long)(unsigned)check_interval)
1730 					   * HZ;
1731 			do_gettimeofday(&now);
1732 
1733 			/* Synchronize our watches */
1734 			if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1735 			 && (now.tv_usec > (1000000 / HZ)))
1736 				difference = (((1000000 - now.tv_usec) * HZ)
1737 				  + 500000) / 1000000;
1738 			else if (ret == 0) {
1739 				struct fib *fibptr;
1740 
1741 				if ((fibptr = aac_fib_alloc(dev))) {
1742 					__le32 *info;
1743 
1744 					aac_fib_init(fibptr);
1745 
1746 					info = (__le32 *) fib_data(fibptr);
1747 					if (now.tv_usec > 500000)
1748 						++now.tv_sec;
1749 
1750 					*info = cpu_to_le32(now.tv_sec);
1751 
1752 					(void)aac_fib_send(SendHostTime,
1753 						fibptr,
1754 						sizeof(*info),
1755 						FsaNormal,
1756 						1, 1,
1757 						NULL,
1758 						NULL);
1759 					aac_fib_complete(fibptr);
1760 					aac_fib_free(fibptr);
1761 				}
1762 				difference = (long)(unsigned)update_interval*HZ;
1763 			} else {
1764 				/* retry shortly */
1765 				difference = 10 * HZ;
1766 			}
1767 			next_jiffies = jiffies + difference;
1768 			if (time_before(next_check_jiffies,next_jiffies))
1769 				difference = next_check_jiffies - jiffies;
1770 		}
1771 		if (difference <= 0)
1772 			difference = 1;
1773 		set_current_state(TASK_INTERRUPTIBLE);
1774 		schedule_timeout(difference);
1775 
1776 		if (kthread_should_stop())
1777 			break;
1778 	}
1779 	if (dev->queues)
1780 		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1781 	dev->aif_thread = 0;
1782 	return 0;
1783 }
1784