xref: /openbmc/linux/drivers/scsi/aacraid/commsup.c (revision c21b37f6)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    communication.
29  *
30  */
31 
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/kthread.h>
43 #include <linux/interrupt.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <asm/semaphore.h>
49 
50 #include "aacraid.h"
51 
52 /**
53  *	fib_map_alloc		-	allocate the fib objects
54  *	@dev: Adapter to allocate for
55  *
56  *	Allocate and map the shared PCI space for the FIB blocks used to
57  *	talk to the Adaptec firmware.
58  */
59 
60 static int fib_map_alloc(struct aac_dev *dev)
61 {
62 	dprintk((KERN_INFO
63 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66 	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67 	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68 	  &dev->hw_fib_pa))==NULL)
69 		return -ENOMEM;
70 	return 0;
71 }
72 
73 /**
74  *	aac_fib_map_free		-	free the fib objects
75  *	@dev: Adapter to free
76  *
77  *	Free the PCI mappings and the memory allocated for FIB blocks
78  *	on this adapter.
79  */
80 
81 void aac_fib_map_free(struct aac_dev *dev)
82 {
83 	pci_free_consistent(dev->pdev,
84 	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
85 	  dev->hw_fib_va, dev->hw_fib_pa);
86 	dev->hw_fib_va = NULL;
87 	dev->hw_fib_pa = 0;
88 }
89 
90 /**
91  *	aac_fib_setup	-	setup the fibs
92  *	@dev: Adapter to set up
93  *
94  *	Allocate the PCI space for the fibs, map it and then intialise the
95  *	fib area, the unmapped fib data and also the free list
96  */
97 
98 int aac_fib_setup(struct aac_dev * dev)
99 {
100 	struct fib *fibptr;
101 	struct hw_fib *hw_fib;
102 	dma_addr_t hw_fib_pa;
103 	int i;
104 
105 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
106 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
107 		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
108 		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
109 	}
110 	if (i<0)
111 		return -ENOMEM;
112 
113 	hw_fib = dev->hw_fib_va;
114 	hw_fib_pa = dev->hw_fib_pa;
115 	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
116 	/*
117 	 *	Initialise the fibs
118 	 */
119 	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
120 	{
121 		fibptr->dev = dev;
122 		fibptr->hw_fib_va = hw_fib;
123 		fibptr->data = (void *) fibptr->hw_fib_va->data;
124 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
125 		init_MUTEX_LOCKED(&fibptr->event_wait);
126 		spin_lock_init(&fibptr->event_lock);
127 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
128 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
129 		fibptr->hw_fib_pa = hw_fib_pa;
130 		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
131 		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
132 	}
133 	/*
134 	 *	Add the fib chain to the free list
135 	 */
136 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
137 	/*
138 	 *	Enable this to debug out of queue space
139 	 */
140 	dev->free_fib = &dev->fibs[0];
141 	return 0;
142 }
143 
144 /**
145  *	aac_fib_alloc	-	allocate a fib
146  *	@dev: Adapter to allocate the fib for
147  *
148  *	Allocate a fib from the adapter fib pool. If the pool is empty we
149  *	return NULL.
150  */
151 
152 struct fib *aac_fib_alloc(struct aac_dev *dev)
153 {
154 	struct fib * fibptr;
155 	unsigned long flags;
156 	spin_lock_irqsave(&dev->fib_lock, flags);
157 	fibptr = dev->free_fib;
158 	if(!fibptr){
159 		spin_unlock_irqrestore(&dev->fib_lock, flags);
160 		return fibptr;
161 	}
162 	dev->free_fib = fibptr->next;
163 	spin_unlock_irqrestore(&dev->fib_lock, flags);
164 	/*
165 	 *	Set the proper node type code and node byte size
166 	 */
167 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
168 	fibptr->size = sizeof(struct fib);
169 	/*
170 	 *	Null out fields that depend on being zero at the start of
171 	 *	each I/O
172 	 */
173 	fibptr->hw_fib_va->header.XferState = 0;
174 	fibptr->callback = NULL;
175 	fibptr->callback_data = NULL;
176 
177 	return fibptr;
178 }
179 
180 /**
181  *	aac_fib_free	-	free a fib
182  *	@fibptr: fib to free up
183  *
184  *	Frees up a fib and places it on the appropriate queue
185  */
186 
187 void aac_fib_free(struct fib *fibptr)
188 {
189 	unsigned long flags;
190 
191 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
192 	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
193 		aac_config.fib_timeouts++;
194 	if (fibptr->hw_fib_va->header.XferState != 0) {
195 		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
196 			 (void*)fibptr,
197 			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
198 	}
199 	fibptr->next = fibptr->dev->free_fib;
200 	fibptr->dev->free_fib = fibptr;
201 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
202 }
203 
204 /**
205  *	aac_fib_init	-	initialise a fib
206  *	@fibptr: The fib to initialize
207  *
208  *	Set up the generic fib fields ready for use
209  */
210 
211 void aac_fib_init(struct fib *fibptr)
212 {
213 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
214 
215 	hw_fib->header.StructType = FIB_MAGIC;
216 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
217 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
218 	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
219 	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
220 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
221 }
222 
223 /**
224  *	fib_deallocate		-	deallocate a fib
225  *	@fibptr: fib to deallocate
226  *
227  *	Will deallocate and return to the free pool the FIB pointed to by the
228  *	caller.
229  */
230 
231 static void fib_dealloc(struct fib * fibptr)
232 {
233 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
234 	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
235 	hw_fib->header.XferState = 0;
236 }
237 
238 /*
239  *	Commuication primitives define and support the queuing method we use to
240  *	support host to adapter commuication. All queue accesses happen through
241  *	these routines and are the only routines which have a knowledge of the
242  *	 how these queues are implemented.
243  */
244 
245 /**
246  *	aac_get_entry		-	get a queue entry
247  *	@dev: Adapter
248  *	@qid: Queue Number
249  *	@entry: Entry return
250  *	@index: Index return
251  *	@nonotify: notification control
252  *
253  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
254  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
255  *	returned.
256  */
257 
258 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
259 {
260 	struct aac_queue * q;
261 	unsigned long idx;
262 
263 	/*
264 	 *	All of the queues wrap when they reach the end, so we check
265 	 *	to see if they have reached the end and if they have we just
266 	 *	set the index back to zero. This is a wrap. You could or off
267 	 *	the high bits in all updates but this is a bit faster I think.
268 	 */
269 
270 	q = &dev->queues->queue[qid];
271 
272 	idx = *index = le32_to_cpu(*(q->headers.producer));
273 	/* Interrupt Moderation, only interrupt for first two entries */
274 	if (idx != le32_to_cpu(*(q->headers.consumer))) {
275 		if (--idx == 0) {
276 			if (qid == AdapNormCmdQueue)
277 				idx = ADAP_NORM_CMD_ENTRIES;
278 			else
279 				idx = ADAP_NORM_RESP_ENTRIES;
280 		}
281 		if (idx != le32_to_cpu(*(q->headers.consumer)))
282 			*nonotify = 1;
283 	}
284 
285 	if (qid == AdapNormCmdQueue) {
286 	        if (*index >= ADAP_NORM_CMD_ENTRIES)
287 			*index = 0; /* Wrap to front of the Producer Queue. */
288 	} else {
289 		if (*index >= ADAP_NORM_RESP_ENTRIES)
290 			*index = 0; /* Wrap to front of the Producer Queue. */
291 	}
292 
293         if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
294 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
295 				qid, q->numpending);
296 		return 0;
297 	} else {
298 	        *entry = q->base + *index;
299 		return 1;
300 	}
301 }
302 
303 /**
304  *	aac_queue_get		-	get the next free QE
305  *	@dev: Adapter
306  *	@index: Returned index
307  *	@priority: Priority of fib
308  *	@fib: Fib to associate with the queue entry
309  *	@wait: Wait if queue full
310  *	@fibptr: Driver fib object to go with fib
311  *	@nonotify: Don't notify the adapter
312  *
313  *	Gets the next free QE off the requested priorty adapter command
314  *	queue and associates the Fib with the QE. The QE represented by
315  *	index is ready to insert on the queue when this routine returns
316  *	success.
317  */
318 
319 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
320 {
321 	struct aac_entry * entry = NULL;
322 	int map = 0;
323 
324 	if (qid == AdapNormCmdQueue) {
325 		/*  if no entries wait for some if caller wants to */
326         	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
327         	{
328 			printk(KERN_ERR "GetEntries failed\n");
329 		}
330 	        /*
331 	         *	Setup queue entry with a command, status and fib mapped
332 	         */
333 	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
334 	        map = 1;
335 	} else {
336 	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
337 	        {
338 			/* if no entries wait for some if caller wants to */
339 		}
340         	/*
341         	 *	Setup queue entry with command, status and fib mapped
342         	 */
343         	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
344         	entry->addr = hw_fib->header.SenderFibAddress;
345      			/* Restore adapters pointer to the FIB */
346 		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
347         	map = 0;
348 	}
349 	/*
350 	 *	If MapFib is true than we need to map the Fib and put pointers
351 	 *	in the queue entry.
352 	 */
353 	if (map)
354 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
355 	return 0;
356 }
357 
358 /*
359  *	Define the highest level of host to adapter communication routines.
360  *	These routines will support host to adapter FS commuication. These
361  *	routines have no knowledge of the commuication method used. This level
362  *	sends and receives FIBs. This level has no knowledge of how these FIBs
363  *	get passed back and forth.
364  */
365 
366 /**
367  *	aac_fib_send	-	send a fib to the adapter
368  *	@command: Command to send
369  *	@fibptr: The fib
370  *	@size: Size of fib data area
371  *	@priority: Priority of Fib
372  *	@wait: Async/sync select
373  *	@reply: True if a reply is wanted
374  *	@callback: Called with reply
375  *	@callback_data: Passed to callback
376  *
377  *	Sends the requested FIB to the adapter and optionally will wait for a
378  *	response FIB. If the caller does not wish to wait for a response than
379  *	an event to wait on must be supplied. This event will be set when a
380  *	response FIB is received from the adapter.
381  */
382 
383 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
384 		int priority, int wait, int reply, fib_callback callback,
385 		void *callback_data)
386 {
387 	struct aac_dev * dev = fibptr->dev;
388 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
389 	unsigned long flags = 0;
390 	unsigned long qflags;
391 
392 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
393 		return -EBUSY;
394 	/*
395 	 *	There are 5 cases with the wait and reponse requested flags.
396 	 *	The only invalid cases are if the caller requests to wait and
397 	 *	does not request a response and if the caller does not want a
398 	 *	response and the Fib is not allocated from pool. If a response
399 	 *	is not requesed the Fib will just be deallocaed by the DPC
400 	 *	routine when the response comes back from the adapter. No
401 	 *	further processing will be done besides deleting the Fib. We
402 	 *	will have a debug mode where the adapter can notify the host
403 	 *	it had a problem and the host can log that fact.
404 	 */
405 	if (wait && !reply) {
406 		return -EINVAL;
407 	} else if (!wait && reply) {
408 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
409 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
410 	} else if (!wait && !reply) {
411 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
412 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
413 	} else if (wait && reply) {
414 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
415 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
416 	}
417 	/*
418 	 *	Map the fib into 32bits by using the fib number
419 	 */
420 
421 	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
422 	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
423 	/*
424 	 *	Set FIB state to indicate where it came from and if we want a
425 	 *	response from the adapter. Also load the command from the
426 	 *	caller.
427 	 *
428 	 *	Map the hw fib pointer as a 32bit value
429 	 */
430 	hw_fib->header.Command = cpu_to_le16(command);
431 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
432 	fibptr->hw_fib_va->header.Flags = 0;	/* 0 the flags field - internal only*/
433 	/*
434 	 *	Set the size of the Fib we want to send to the adapter
435 	 */
436 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
437 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
438 		return -EMSGSIZE;
439 	}
440 	/*
441 	 *	Get a queue entry connect the FIB to it and send an notify
442 	 *	the adapter a command is ready.
443 	 */
444 	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
445 
446 	/*
447 	 *	Fill in the Callback and CallbackContext if we are not
448 	 *	going to wait.
449 	 */
450 	if (!wait) {
451 		fibptr->callback = callback;
452 		fibptr->callback_data = callback_data;
453 	}
454 
455 	fibptr->done = 0;
456 	fibptr->flags = 0;
457 
458 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
459 
460 	dprintk((KERN_DEBUG "Fib contents:.\n"));
461 	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
462 	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
463 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
464 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
465 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
466 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
467 
468 	if (!dev->queues)
469 		return -EBUSY;
470 
471 	if(wait)
472 		spin_lock_irqsave(&fibptr->event_lock, flags);
473 	aac_adapter_deliver(fibptr);
474 
475 	/*
476 	 *	If the caller wanted us to wait for response wait now.
477 	 */
478 
479 	if (wait) {
480 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
481 		/* Only set for first known interruptable command */
482 		if (wait < 0) {
483 			/*
484 			 * *VERY* Dangerous to time out a command, the
485 			 * assumption is made that we have no hope of
486 			 * functioning because an interrupt routing or other
487 			 * hardware failure has occurred.
488 			 */
489 			unsigned long count = 36000000L; /* 3 minutes */
490 			while (down_trylock(&fibptr->event_wait)) {
491 				int blink;
492 				if (--count == 0) {
493 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
494 					spin_lock_irqsave(q->lock, qflags);
495 					q->numpending--;
496 					spin_unlock_irqrestore(q->lock, qflags);
497 					if (wait == -1) {
498 	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
499 						  "Usually a result of a PCI interrupt routing problem;\n"
500 						  "update mother board BIOS or consider utilizing one of\n"
501 						  "the SAFE mode kernel options (acpi, apic etc)\n");
502 					}
503 					return -ETIMEDOUT;
504 				}
505 				if ((blink = aac_adapter_check_health(dev)) > 0) {
506 					if (wait == -1) {
507 	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
508 						  "Usually a result of a serious unrecoverable hardware problem\n",
509 						  blink);
510 					}
511 					return -EFAULT;
512 				}
513 				udelay(5);
514 			}
515 		} else
516 			(void)down_interruptible(&fibptr->event_wait);
517 		spin_lock_irqsave(&fibptr->event_lock, flags);
518 		if (fibptr->done == 0) {
519 			fibptr->done = 2; /* Tell interrupt we aborted */
520 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
521 			return -EINTR;
522 		}
523 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
524 		BUG_ON(fibptr->done == 0);
525 
526 		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
527 			return -ETIMEDOUT;
528 		return 0;
529 	}
530 	/*
531 	 *	If the user does not want a response than return success otherwise
532 	 *	return pending
533 	 */
534 	if (reply)
535 		return -EINPROGRESS;
536 	else
537 		return 0;
538 }
539 
540 /**
541  *	aac_consumer_get	-	get the top of the queue
542  *	@dev: Adapter
543  *	@q: Queue
544  *	@entry: Return entry
545  *
546  *	Will return a pointer to the entry on the top of the queue requested that
547  * 	we are a consumer of, and return the address of the queue entry. It does
548  *	not change the state of the queue.
549  */
550 
551 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
552 {
553 	u32 index;
554 	int status;
555 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
556 		status = 0;
557 	} else {
558 		/*
559 		 *	The consumer index must be wrapped if we have reached
560 		 *	the end of the queue, else we just use the entry
561 		 *	pointed to by the header index
562 		 */
563 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
564 			index = 0;
565 		else
566 		        index = le32_to_cpu(*q->headers.consumer);
567 		*entry = q->base + index;
568 		status = 1;
569 	}
570 	return(status);
571 }
572 
573 /**
574  *	aac_consumer_free	-	free consumer entry
575  *	@dev: Adapter
576  *	@q: Queue
577  *	@qid: Queue ident
578  *
579  *	Frees up the current top of the queue we are a consumer of. If the
580  *	queue was full notify the producer that the queue is no longer full.
581  */
582 
583 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
584 {
585 	int wasfull = 0;
586 	u32 notify;
587 
588 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
589 		wasfull = 1;
590 
591 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
592 		*q->headers.consumer = cpu_to_le32(1);
593 	else
594 		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
595 
596 	if (wasfull) {
597 		switch (qid) {
598 
599 		case HostNormCmdQueue:
600 			notify = HostNormCmdNotFull;
601 			break;
602 		case HostNormRespQueue:
603 			notify = HostNormRespNotFull;
604 			break;
605 		default:
606 			BUG();
607 			return;
608 		}
609 		aac_adapter_notify(dev, notify);
610 	}
611 }
612 
613 /**
614  *	aac_fib_adapter_complete	-	complete adapter issued fib
615  *	@fibptr: fib to complete
616  *	@size: size of fib
617  *
618  *	Will do all necessary work to complete a FIB that was sent from
619  *	the adapter.
620  */
621 
622 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
623 {
624 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
625 	struct aac_dev * dev = fibptr->dev;
626 	struct aac_queue * q;
627 	unsigned long nointr = 0;
628 	unsigned long qflags;
629 
630 	if (hw_fib->header.XferState == 0) {
631 		if (dev->comm_interface == AAC_COMM_MESSAGE)
632 			kfree (hw_fib);
633         	return 0;
634 	}
635 	/*
636 	 *	If we plan to do anything check the structure type first.
637 	 */
638 	if ( hw_fib->header.StructType != FIB_MAGIC ) {
639 		if (dev->comm_interface == AAC_COMM_MESSAGE)
640 			kfree (hw_fib);
641         	return -EINVAL;
642 	}
643 	/*
644 	 *	This block handles the case where the adapter had sent us a
645 	 *	command and we have finished processing the command. We
646 	 *	call completeFib when we are done processing the command
647 	 *	and want to send a response back to the adapter. This will
648 	 *	send the completed cdb to the adapter.
649 	 */
650 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
651 		if (dev->comm_interface == AAC_COMM_MESSAGE) {
652 			kfree (hw_fib);
653 		} else {
654 	       		u32 index;
655 		        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
656 			if (size) {
657 				size += sizeof(struct aac_fibhdr);
658 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
659 					return -EMSGSIZE;
660 				hw_fib->header.Size = cpu_to_le16(size);
661 			}
662 			q = &dev->queues->queue[AdapNormRespQueue];
663 			spin_lock_irqsave(q->lock, qflags);
664 			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
665 			*(q->headers.producer) = cpu_to_le32(index + 1);
666 			spin_unlock_irqrestore(q->lock, qflags);
667 			if (!(nointr & (int)aac_config.irq_mod))
668 				aac_adapter_notify(dev, AdapNormRespQueue);
669 		}
670 	}
671 	else
672 	{
673         	printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
674         	BUG();
675 	}
676 	return 0;
677 }
678 
679 /**
680  *	aac_fib_complete	-	fib completion handler
681  *	@fib: FIB to complete
682  *
683  *	Will do all necessary work to complete a FIB.
684  */
685 
686 int aac_fib_complete(struct fib *fibptr)
687 {
688 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
689 
690 	/*
691 	 *	Check for a fib which has already been completed
692 	 */
693 
694 	if (hw_fib->header.XferState == 0)
695         	return 0;
696 	/*
697 	 *	If we plan to do anything check the structure type first.
698 	 */
699 
700 	if (hw_fib->header.StructType != FIB_MAGIC)
701 	        return -EINVAL;
702 	/*
703 	 *	This block completes a cdb which orginated on the host and we
704 	 *	just need to deallocate the cdb or reinit it. At this point the
705 	 *	command is complete that we had sent to the adapter and this
706 	 *	cdb could be reused.
707 	 */
708 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
709 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
710 	{
711 		fib_dealloc(fibptr);
712 	}
713 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
714 	{
715 		/*
716 		 *	This handles the case when the host has aborted the I/O
717 		 *	to the adapter because the adapter is not responding
718 		 */
719 		fib_dealloc(fibptr);
720 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
721 		fib_dealloc(fibptr);
722 	} else {
723 		BUG();
724 	}
725 	return 0;
726 }
727 
728 /**
729  *	aac_printf	-	handle printf from firmware
730  *	@dev: Adapter
731  *	@val: Message info
732  *
733  *	Print a message passed to us by the controller firmware on the
734  *	Adaptec board
735  */
736 
737 void aac_printf(struct aac_dev *dev, u32 val)
738 {
739 	char *cp = dev->printfbuf;
740 	if (dev->printf_enabled)
741 	{
742 		int length = val & 0xffff;
743 		int level = (val >> 16) & 0xffff;
744 
745 		/*
746 		 *	The size of the printfbuf is set in port.c
747 		 *	There is no variable or define for it
748 		 */
749 		if (length > 255)
750 			length = 255;
751 		if (cp[length] != 0)
752 			cp[length] = 0;
753 		if (level == LOG_AAC_HIGH_ERROR)
754 			printk(KERN_WARNING "%s:%s", dev->name, cp);
755 		else
756 			printk(KERN_INFO "%s:%s", dev->name, cp);
757 	}
758 	memset(cp, 0,  256);
759 }
760 
761 
762 /**
763  *	aac_handle_aif		-	Handle a message from the firmware
764  *	@dev: Which adapter this fib is from
765  *	@fibptr: Pointer to fibptr from adapter
766  *
767  *	This routine handles a driver notify fib from the adapter and
768  *	dispatches it to the appropriate routine for handling.
769  */
770 
771 #define AIF_SNIFF_TIMEOUT	(30*HZ)
772 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
773 {
774 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
775 	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
776 	u32 container;
777 	struct scsi_device *device;
778 	enum {
779 		NOTHING,
780 		DELETE,
781 		ADD,
782 		CHANGE
783 	} device_config_needed;
784 
785 	/* Sniff for container changes */
786 
787 	if (!dev || !dev->fsa_dev)
788 		return;
789 	container = (u32)-1;
790 
791 	/*
792 	 *	We have set this up to try and minimize the number of
793 	 * re-configures that take place. As a result of this when
794 	 * certain AIF's come in we will set a flag waiting for another
795 	 * type of AIF before setting the re-config flag.
796 	 */
797 	switch (le32_to_cpu(aifcmd->command)) {
798 	case AifCmdDriverNotify:
799 		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
800 		/*
801 		 *	Morph or Expand complete
802 		 */
803 		case AifDenMorphComplete:
804 		case AifDenVolumeExtendComplete:
805 			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
806 			if (container >= dev->maximum_num_containers)
807 				break;
808 
809 			/*
810 			 *	Find the scsi_device associated with the SCSI
811 			 * address. Make sure we have the right array, and if
812 			 * so set the flag to initiate a new re-config once we
813 			 * see an AifEnConfigChange AIF come through.
814 			 */
815 
816 			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
817 				device = scsi_device_lookup(dev->scsi_host_ptr,
818 					CONTAINER_TO_CHANNEL(container),
819 					CONTAINER_TO_ID(container),
820 					CONTAINER_TO_LUN(container));
821 				if (device) {
822 					dev->fsa_dev[container].config_needed = CHANGE;
823 					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
824 					dev->fsa_dev[container].config_waiting_stamp = jiffies;
825 					scsi_device_put(device);
826 				}
827 			}
828 		}
829 
830 		/*
831 		 *	If we are waiting on something and this happens to be
832 		 * that thing then set the re-configure flag.
833 		 */
834 		if (container != (u32)-1) {
835 			if (container >= dev->maximum_num_containers)
836 				break;
837 			if ((dev->fsa_dev[container].config_waiting_on ==
838 			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
839 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
840 				dev->fsa_dev[container].config_waiting_on = 0;
841 		} else for (container = 0;
842 		    container < dev->maximum_num_containers; ++container) {
843 			if ((dev->fsa_dev[container].config_waiting_on ==
844 			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
845 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
846 				dev->fsa_dev[container].config_waiting_on = 0;
847 		}
848 		break;
849 
850 	case AifCmdEventNotify:
851 		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
852 		/*
853 		 *	Add an Array.
854 		 */
855 		case AifEnAddContainer:
856 			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
857 			if (container >= dev->maximum_num_containers)
858 				break;
859 			dev->fsa_dev[container].config_needed = ADD;
860 			dev->fsa_dev[container].config_waiting_on =
861 				AifEnConfigChange;
862 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
863 			break;
864 
865 		/*
866 		 *	Delete an Array.
867 		 */
868 		case AifEnDeleteContainer:
869 			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
870 			if (container >= dev->maximum_num_containers)
871 				break;
872 			dev->fsa_dev[container].config_needed = DELETE;
873 			dev->fsa_dev[container].config_waiting_on =
874 				AifEnConfigChange;
875 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
876 			break;
877 
878 		/*
879 		 *	Container change detected. If we currently are not
880 		 * waiting on something else, setup to wait on a Config Change.
881 		 */
882 		case AifEnContainerChange:
883 			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
884 			if (container >= dev->maximum_num_containers)
885 				break;
886 			if (dev->fsa_dev[container].config_waiting_on &&
887 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
888 				break;
889 			dev->fsa_dev[container].config_needed = CHANGE;
890 			dev->fsa_dev[container].config_waiting_on =
891 				AifEnConfigChange;
892 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
893 			break;
894 
895 		case AifEnConfigChange:
896 			break;
897 
898 		}
899 
900 		/*
901 		 *	If we are waiting on something and this happens to be
902 		 * that thing then set the re-configure flag.
903 		 */
904 		if (container != (u32)-1) {
905 			if (container >= dev->maximum_num_containers)
906 				break;
907 			if ((dev->fsa_dev[container].config_waiting_on ==
908 			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
909 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
910 				dev->fsa_dev[container].config_waiting_on = 0;
911 		} else for (container = 0;
912 		    container < dev->maximum_num_containers; ++container) {
913 			if ((dev->fsa_dev[container].config_waiting_on ==
914 			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
915 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
916 				dev->fsa_dev[container].config_waiting_on = 0;
917 		}
918 		break;
919 
920 	case AifCmdJobProgress:
921 		/*
922 		 *	These are job progress AIF's. When a Clear is being
923 		 * done on a container it is initially created then hidden from
924 		 * the OS. When the clear completes we don't get a config
925 		 * change so we monitor the job status complete on a clear then
926 		 * wait for a container change.
927 		 */
928 
929 		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
930 		 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
931 		  || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
932 			for (container = 0;
933 			    container < dev->maximum_num_containers;
934 			    ++container) {
935 				/*
936 				 * Stomp on all config sequencing for all
937 				 * containers?
938 				 */
939 				dev->fsa_dev[container].config_waiting_on =
940 					AifEnContainerChange;
941 				dev->fsa_dev[container].config_needed = ADD;
942 				dev->fsa_dev[container].config_waiting_stamp =
943 					jiffies;
944 			}
945 		}
946 		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
947 		 && (((u32 *)aifcmd->data)[6] == 0)
948 		 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
949 			for (container = 0;
950 			    container < dev->maximum_num_containers;
951 			    ++container) {
952 				/*
953 				 * Stomp on all config sequencing for all
954 				 * containers?
955 				 */
956 				dev->fsa_dev[container].config_waiting_on =
957 					AifEnContainerChange;
958 				dev->fsa_dev[container].config_needed = DELETE;
959 				dev->fsa_dev[container].config_waiting_stamp =
960 					jiffies;
961 			}
962 		}
963 		break;
964 	}
965 
966 	device_config_needed = NOTHING;
967 	for (container = 0; container < dev->maximum_num_containers;
968 	    ++container) {
969 		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
970 			(dev->fsa_dev[container].config_needed != NOTHING) &&
971 			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
972 			device_config_needed =
973 				dev->fsa_dev[container].config_needed;
974 			dev->fsa_dev[container].config_needed = NOTHING;
975 			break;
976 		}
977 	}
978 	if (device_config_needed == NOTHING)
979 		return;
980 
981 	/*
982 	 *	If we decided that a re-configuration needs to be done,
983 	 * schedule it here on the way out the door, please close the door
984 	 * behind you.
985 	 */
986 
987 	/*
988 	 *	Find the scsi_device associated with the SCSI address,
989 	 * and mark it as changed, invalidating the cache. This deals
990 	 * with changes to existing device IDs.
991 	 */
992 
993 	if (!dev || !dev->scsi_host_ptr)
994 		return;
995 	/*
996 	 * force reload of disk info via aac_probe_container
997 	 */
998 	if ((device_config_needed == CHANGE)
999 	 && (dev->fsa_dev[container].valid == 1))
1000 		dev->fsa_dev[container].valid = 2;
1001 	if ((device_config_needed == CHANGE) ||
1002 			(device_config_needed == ADD))
1003 		aac_probe_container(dev, container);
1004 	device = scsi_device_lookup(dev->scsi_host_ptr,
1005 		CONTAINER_TO_CHANNEL(container),
1006 		CONTAINER_TO_ID(container),
1007 		CONTAINER_TO_LUN(container));
1008 	if (device) {
1009 		switch (device_config_needed) {
1010 		case DELETE:
1011 		case CHANGE:
1012 			scsi_rescan_device(&device->sdev_gendev);
1013 
1014 		default:
1015 			break;
1016 		}
1017 		scsi_device_put(device);
1018 	}
1019 	if (device_config_needed == ADD) {
1020 		scsi_add_device(dev->scsi_host_ptr,
1021 		  CONTAINER_TO_CHANNEL(container),
1022 		  CONTAINER_TO_ID(container),
1023 		  CONTAINER_TO_LUN(container));
1024 	}
1025 
1026 }
1027 
1028 static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1029 {
1030 	int index, quirks;
1031 	int retval;
1032 	struct Scsi_Host *host;
1033 	struct scsi_device *dev;
1034 	struct scsi_cmnd *command;
1035 	struct scsi_cmnd *command_list;
1036 	int jafo = 0;
1037 
1038 	/*
1039 	 * Assumptions:
1040 	 *	- host is locked, unless called by the aacraid thread.
1041 	 *	  (a matter of convenience, due to legacy issues surrounding
1042 	 *	  eh_host_adapter_reset).
1043 	 *	- in_reset is asserted, so no new i/o is getting to the
1044 	 *	  card.
1045 	 *	- The card is dead, or will be very shortly ;-/ so no new
1046 	 *	  commands are completing in the interrupt service.
1047 	 */
1048 	host = aac->scsi_host_ptr;
1049 	scsi_block_requests(host);
1050 	aac_adapter_disable_int(aac);
1051 	if (aac->thread->pid != current->pid) {
1052 		spin_unlock_irq(host->host_lock);
1053 		kthread_stop(aac->thread);
1054 		jafo = 1;
1055 	}
1056 
1057 	/*
1058 	 *	If a positive health, means in a known DEAD PANIC
1059 	 * state and the adapter could be reset to `try again'.
1060 	 */
1061 	retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1062 
1063 	if (retval)
1064 		goto out;
1065 
1066 	/*
1067 	 *	Loop through the fibs, close the synchronous FIBS
1068 	 */
1069 	for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1070 		struct fib *fib = &aac->fibs[index];
1071 		if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1072 		  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1073 			unsigned long flagv;
1074 			spin_lock_irqsave(&fib->event_lock, flagv);
1075 			up(&fib->event_wait);
1076 			spin_unlock_irqrestore(&fib->event_lock, flagv);
1077 			schedule();
1078 			retval = 0;
1079 		}
1080 	}
1081 	/* Give some extra time for ioctls to complete. */
1082 	if (retval == 0)
1083 		ssleep(2);
1084 	index = aac->cardtype;
1085 
1086 	/*
1087 	 * Re-initialize the adapter, first free resources, then carefully
1088 	 * apply the initialization sequence to come back again. Only risk
1089 	 * is a change in Firmware dropping cache, it is assumed the caller
1090 	 * will ensure that i/o is queisced and the card is flushed in that
1091 	 * case.
1092 	 */
1093 	aac_fib_map_free(aac);
1094 	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1095 	aac->comm_addr = NULL;
1096 	aac->comm_phys = 0;
1097 	kfree(aac->queues);
1098 	aac->queues = NULL;
1099 	free_irq(aac->pdev->irq, aac);
1100 	kfree(aac->fsa_dev);
1101 	aac->fsa_dev = NULL;
1102 	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1103 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
1104 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
1105 			goto out;
1106 	} else {
1107 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1108 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1109 			goto out;
1110 	}
1111 	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1112 		goto out;
1113 	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1114 		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1115 			goto out;
1116 	if (jafo) {
1117 		aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1118 		if (IS_ERR(aac->thread)) {
1119 			retval = PTR_ERR(aac->thread);
1120 			goto out;
1121 		}
1122 	}
1123 	(void)aac_get_adapter_info(aac);
1124 	quirks = aac_get_driver_ident(index)->quirks;
1125 	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1126  		host->sg_tablesize = 34;
1127  		host->max_sectors = (host->sg_tablesize * 8) + 112;
1128  	}
1129  	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1130  		host->sg_tablesize = 17;
1131  		host->max_sectors = (host->sg_tablesize * 8) + 112;
1132  	}
1133 	aac_get_config_status(aac, 1);
1134 	aac_get_containers(aac);
1135 	/*
1136 	 * This is where the assumption that the Adapter is quiesced
1137 	 * is important.
1138 	 */
1139 	command_list = NULL;
1140 	__shost_for_each_device(dev, host) {
1141 		unsigned long flags;
1142 		spin_lock_irqsave(&dev->list_lock, flags);
1143 		list_for_each_entry(command, &dev->cmd_list, list)
1144 			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1145 				command->SCp.buffer = (struct scatterlist *)command_list;
1146 				command_list = command;
1147 			}
1148 		spin_unlock_irqrestore(&dev->list_lock, flags);
1149 	}
1150 	while ((command = command_list)) {
1151 		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1152 		command->SCp.buffer = NULL;
1153 		command->result = DID_OK << 16
1154 		  | COMMAND_COMPLETE << 8
1155 		  | SAM_STAT_TASK_SET_FULL;
1156 		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1157 		command->scsi_done(command);
1158 	}
1159 	retval = 0;
1160 
1161 out:
1162 	aac->in_reset = 0;
1163 	scsi_unblock_requests(host);
1164 	if (jafo) {
1165 		spin_lock_irq(host->host_lock);
1166 	}
1167 	return retval;
1168 }
1169 
1170 int aac_reset_adapter(struct aac_dev * aac, int forced)
1171 {
1172 	unsigned long flagv = 0;
1173 	int retval;
1174 	struct Scsi_Host * host;
1175 
1176 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1177 		return -EBUSY;
1178 
1179 	if (aac->in_reset) {
1180 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1181 		return -EBUSY;
1182 	}
1183 	aac->in_reset = 1;
1184 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1185 
1186 	/*
1187 	 * Wait for all commands to complete to this specific
1188 	 * target (block maximum 60 seconds). Although not necessary,
1189 	 * it does make us a good storage citizen.
1190 	 */
1191 	host = aac->scsi_host_ptr;
1192 	scsi_block_requests(host);
1193 	if (forced < 2) for (retval = 60; retval; --retval) {
1194 		struct scsi_device * dev;
1195 		struct scsi_cmnd * command;
1196 		int active = 0;
1197 
1198 		__shost_for_each_device(dev, host) {
1199 			spin_lock_irqsave(&dev->list_lock, flagv);
1200 			list_for_each_entry(command, &dev->cmd_list, list) {
1201 				if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1202 					active++;
1203 					break;
1204 				}
1205 			}
1206 			spin_unlock_irqrestore(&dev->list_lock, flagv);
1207 			if (active)
1208 				break;
1209 
1210 		}
1211 		/*
1212 		 * We can exit If all the commands are complete
1213 		 */
1214 		if (active == 0)
1215 			break;
1216 		ssleep(1);
1217 	}
1218 
1219 	/* Quiesce build, flush cache, write through mode */
1220 	aac_send_shutdown(aac);
1221 	spin_lock_irqsave(host->host_lock, flagv);
1222 	retval = _aac_reset_adapter(aac, forced);
1223 	spin_unlock_irqrestore(host->host_lock, flagv);
1224 
1225 	if (retval == -ENODEV) {
1226 		/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1227 		struct fib * fibctx = aac_fib_alloc(aac);
1228 		if (fibctx) {
1229 			struct aac_pause *cmd;
1230 			int status;
1231 
1232 			aac_fib_init(fibctx);
1233 
1234 			cmd = (struct aac_pause *) fib_data(fibctx);
1235 
1236 			cmd->command = cpu_to_le32(VM_ContainerConfig);
1237 			cmd->type = cpu_to_le32(CT_PAUSE_IO);
1238 			cmd->timeout = cpu_to_le32(1);
1239 			cmd->min = cpu_to_le32(1);
1240 			cmd->noRescan = cpu_to_le32(1);
1241 			cmd->count = cpu_to_le32(0);
1242 
1243 			status = aac_fib_send(ContainerCommand,
1244 			  fibctx,
1245 			  sizeof(struct aac_pause),
1246 			  FsaNormal,
1247 			  -2 /* Timeout silently */, 1,
1248 			  NULL, NULL);
1249 
1250 			if (status >= 0)
1251 				aac_fib_complete(fibctx);
1252 			aac_fib_free(fibctx);
1253 		}
1254 	}
1255 
1256 	return retval;
1257 }
1258 
1259 int aac_check_health(struct aac_dev * aac)
1260 {
1261 	int BlinkLED;
1262 	unsigned long time_now, flagv = 0;
1263 	struct list_head * entry;
1264 	struct Scsi_Host * host;
1265 
1266 	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1267 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1268 		return 0;
1269 
1270 	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1271 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1272 		return 0; /* OK */
1273 	}
1274 
1275 	aac->in_reset = 1;
1276 
1277 	/* Fake up an AIF:
1278 	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1279 	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1280 	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1281 	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1282 	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1283 	 *	aac.aifcmd.data[3] = BlinkLED
1284 	 */
1285 
1286 	time_now = jiffies/HZ;
1287 	entry = aac->fib_list.next;
1288 
1289 	/*
1290 	 * For each Context that is on the
1291 	 * fibctxList, make a copy of the
1292 	 * fib, and then set the event to wake up the
1293 	 * thread that is waiting for it.
1294 	 */
1295 	while (entry != &aac->fib_list) {
1296 		/*
1297 		 * Extract the fibctx
1298 		 */
1299 		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1300 		struct hw_fib * hw_fib;
1301 		struct fib * fib;
1302 		/*
1303 		 * Check if the queue is getting
1304 		 * backlogged
1305 		 */
1306 		if (fibctx->count > 20) {
1307 			/*
1308 			 * It's *not* jiffies folks,
1309 			 * but jiffies / HZ, so do not
1310 			 * panic ...
1311 			 */
1312 			u32 time_last = fibctx->jiffies;
1313 			/*
1314 			 * Has it been > 2 minutes
1315 			 * since the last read off
1316 			 * the queue?
1317 			 */
1318 			if ((time_now - time_last) > aif_timeout) {
1319 				entry = entry->next;
1320 				aac_close_fib_context(aac, fibctx);
1321 				continue;
1322 			}
1323 		}
1324 		/*
1325 		 * Warning: no sleep allowed while
1326 		 * holding spinlock
1327 		 */
1328 		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1329 		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1330 		if (fib && hw_fib) {
1331 			struct aac_aifcmd * aif;
1332 
1333 			fib->hw_fib_va = hw_fib;
1334 			fib->dev = aac;
1335 			aac_fib_init(fib);
1336 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1337 			fib->size = sizeof (struct fib);
1338 			fib->data = hw_fib->data;
1339 			aif = (struct aac_aifcmd *)hw_fib->data;
1340 			aif->command = cpu_to_le32(AifCmdEventNotify);
1341 		 	aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1342 		 	aif->data[0] = cpu_to_le32(AifEnExpEvent);
1343 			aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1344 		 	aif->data[2] = cpu_to_le32(AifHighPriority);
1345 			aif->data[3] = cpu_to_le32(BlinkLED);
1346 
1347 			/*
1348 			 * Put the FIB onto the
1349 			 * fibctx's fibs
1350 			 */
1351 			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1352 			fibctx->count++;
1353 			/*
1354 			 * Set the event to wake up the
1355 			 * thread that will waiting.
1356 			 */
1357 			up(&fibctx->wait_sem);
1358 		} else {
1359 			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1360 			kfree(fib);
1361 			kfree(hw_fib);
1362 		}
1363 		entry = entry->next;
1364 	}
1365 
1366 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1367 
1368 	if (BlinkLED < 0) {
1369 		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1370 		goto out;
1371 	}
1372 
1373 	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1374 
1375 	if (!check_reset || (aac->supplement_adapter_info.SupportedOptions2 &
1376 	  le32_to_cpu(AAC_OPTION_IGNORE_RESET)))
1377 		goto out;
1378 	host = aac->scsi_host_ptr;
1379 	if (aac->thread->pid != current->pid)
1380 		spin_lock_irqsave(host->host_lock, flagv);
1381 	BlinkLED = _aac_reset_adapter(aac, 0);
1382 	if (aac->thread->pid != current->pid)
1383 		spin_unlock_irqrestore(host->host_lock, flagv);
1384 	return BlinkLED;
1385 
1386 out:
1387 	aac->in_reset = 0;
1388 	return BlinkLED;
1389 }
1390 
1391 
1392 /**
1393  *	aac_command_thread	-	command processing thread
1394  *	@dev: Adapter to monitor
1395  *
1396  *	Waits on the commandready event in it's queue. When the event gets set
1397  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1398  *	until the queue is empty. When the queue is empty it will wait for
1399  *	more FIBs.
1400  */
1401 
1402 int aac_command_thread(void *data)
1403 {
1404 	struct aac_dev *dev = data;
1405 	struct hw_fib *hw_fib, *hw_newfib;
1406 	struct fib *fib, *newfib;
1407 	struct aac_fib_context *fibctx;
1408 	unsigned long flags;
1409 	DECLARE_WAITQUEUE(wait, current);
1410 	unsigned long next_jiffies = jiffies + HZ;
1411 	unsigned long next_check_jiffies = next_jiffies;
1412 	long difference = HZ;
1413 
1414 	/*
1415 	 *	We can only have one thread per adapter for AIF's.
1416 	 */
1417 	if (dev->aif_thread)
1418 		return -EINVAL;
1419 
1420 	/*
1421 	 *	Let the DPC know it has a place to send the AIF's to.
1422 	 */
1423 	dev->aif_thread = 1;
1424 	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1425 	set_current_state(TASK_INTERRUPTIBLE);
1426 	dprintk ((KERN_INFO "aac_command_thread start\n"));
1427 	while(1)
1428 	{
1429 		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1430 		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1431 			struct list_head *entry;
1432 			struct aac_aifcmd * aifcmd;
1433 
1434 			set_current_state(TASK_RUNNING);
1435 
1436 			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1437 			list_del(entry);
1438 
1439 			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1440 			fib = list_entry(entry, struct fib, fiblink);
1441 			/*
1442 			 *	We will process the FIB here or pass it to a
1443 			 *	worker thread that is TBD. We Really can't
1444 			 *	do anything at this point since we don't have
1445 			 *	anything defined for this thread to do.
1446 			 */
1447 			hw_fib = fib->hw_fib_va;
1448 			memset(fib, 0, sizeof(struct fib));
1449 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1450 			fib->size = sizeof( struct fib );
1451 			fib->hw_fib_va = hw_fib;
1452 			fib->data = hw_fib->data;
1453 			fib->dev = dev;
1454 			/*
1455 			 *	We only handle AifRequest fibs from the adapter.
1456 			 */
1457 			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1458 			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1459 				/* Handle Driver Notify Events */
1460 				aac_handle_aif(dev, fib);
1461 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1462 				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1463 			} else {
1464 				struct list_head *entry;
1465 				/* The u32 here is important and intended. We are using
1466 				   32bit wrapping time to fit the adapter field */
1467 
1468 				u32 time_now, time_last;
1469 				unsigned long flagv;
1470 				unsigned num;
1471 				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1472 				struct fib ** fib_pool, ** fib_p;
1473 
1474 				/* Sniff events */
1475 				if ((aifcmd->command ==
1476 				     cpu_to_le32(AifCmdEventNotify)) ||
1477 				    (aifcmd->command ==
1478 				     cpu_to_le32(AifCmdJobProgress))) {
1479 					aac_handle_aif(dev, fib);
1480 				}
1481 
1482 				time_now = jiffies/HZ;
1483 
1484 				/*
1485 				 * Warning: no sleep allowed while
1486 				 * holding spinlock. We take the estimate
1487 				 * and pre-allocate a set of fibs outside the
1488 				 * lock.
1489 				 */
1490 				num = le32_to_cpu(dev->init->AdapterFibsSize)
1491 				    / sizeof(struct hw_fib); /* some extra */
1492 				spin_lock_irqsave(&dev->fib_lock, flagv);
1493 				entry = dev->fib_list.next;
1494 				while (entry != &dev->fib_list) {
1495 					entry = entry->next;
1496 					++num;
1497 				}
1498 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1499 				hw_fib_pool = NULL;
1500 				fib_pool = NULL;
1501 				if (num
1502 				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1503 				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1504 					hw_fib_p = hw_fib_pool;
1505 					fib_p = fib_pool;
1506 					while (hw_fib_p < &hw_fib_pool[num]) {
1507 						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1508 							--hw_fib_p;
1509 							break;
1510 						}
1511 						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1512 							kfree(*(--hw_fib_p));
1513 							break;
1514 						}
1515 					}
1516 					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1517 						kfree(fib_pool);
1518 						fib_pool = NULL;
1519 						kfree(hw_fib_pool);
1520 						hw_fib_pool = NULL;
1521 					}
1522 				} else {
1523 					kfree(hw_fib_pool);
1524 					hw_fib_pool = NULL;
1525 				}
1526 				spin_lock_irqsave(&dev->fib_lock, flagv);
1527 				entry = dev->fib_list.next;
1528 				/*
1529 				 * For each Context that is on the
1530 				 * fibctxList, make a copy of the
1531 				 * fib, and then set the event to wake up the
1532 				 * thread that is waiting for it.
1533 				 */
1534 				hw_fib_p = hw_fib_pool;
1535 				fib_p = fib_pool;
1536 				while (entry != &dev->fib_list) {
1537 					/*
1538 					 * Extract the fibctx
1539 					 */
1540 					fibctx = list_entry(entry, struct aac_fib_context, next);
1541 					/*
1542 					 * Check if the queue is getting
1543 					 * backlogged
1544 					 */
1545 					if (fibctx->count > 20)
1546 					{
1547 						/*
1548 						 * It's *not* jiffies folks,
1549 						 * but jiffies / HZ so do not
1550 						 * panic ...
1551 						 */
1552 						time_last = fibctx->jiffies;
1553 						/*
1554 						 * Has it been > 2 minutes
1555 						 * since the last read off
1556 						 * the queue?
1557 						 */
1558 						if ((time_now - time_last) > aif_timeout) {
1559 							entry = entry->next;
1560 							aac_close_fib_context(dev, fibctx);
1561 							continue;
1562 						}
1563 					}
1564 					/*
1565 					 * Warning: no sleep allowed while
1566 					 * holding spinlock
1567 					 */
1568 					if (hw_fib_p < &hw_fib_pool[num]) {
1569 						hw_newfib = *hw_fib_p;
1570 						*(hw_fib_p++) = NULL;
1571 						newfib = *fib_p;
1572 						*(fib_p++) = NULL;
1573 						/*
1574 						 * Make the copy of the FIB
1575 						 */
1576 						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1577 						memcpy(newfib, fib, sizeof(struct fib));
1578 						newfib->hw_fib_va = hw_newfib;
1579 						/*
1580 						 * Put the FIB onto the
1581 						 * fibctx's fibs
1582 						 */
1583 						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1584 						fibctx->count++;
1585 						/*
1586 						 * Set the event to wake up the
1587 						 * thread that is waiting.
1588 						 */
1589 						up(&fibctx->wait_sem);
1590 					} else {
1591 						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1592 					}
1593 					entry = entry->next;
1594 				}
1595 				/*
1596 				 *	Set the status of this FIB
1597 				 */
1598 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1599 				aac_fib_adapter_complete(fib, sizeof(u32));
1600 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1601 				/* Free up the remaining resources */
1602 				hw_fib_p = hw_fib_pool;
1603 				fib_p = fib_pool;
1604 				while (hw_fib_p < &hw_fib_pool[num]) {
1605 					kfree(*hw_fib_p);
1606 					kfree(*fib_p);
1607 					++fib_p;
1608 					++hw_fib_p;
1609 				}
1610 				kfree(hw_fib_pool);
1611 				kfree(fib_pool);
1612 			}
1613 			kfree(fib);
1614 			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1615 		}
1616 		/*
1617 		 *	There are no more AIF's
1618 		 */
1619 		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1620 
1621 		/*
1622 		 *	Background activity
1623 		 */
1624 		if ((time_before(next_check_jiffies,next_jiffies))
1625 		 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1626 			next_check_jiffies = next_jiffies;
1627 			if (aac_check_health(dev) == 0) {
1628 				difference = ((long)(unsigned)check_interval)
1629 					   * HZ;
1630 				next_check_jiffies = jiffies + difference;
1631 			} else if (!dev->queues)
1632 				break;
1633 		}
1634 		if (!time_before(next_check_jiffies,next_jiffies)
1635 		 && ((difference = next_jiffies - jiffies) <= 0)) {
1636 			struct timeval now;
1637 			int ret;
1638 
1639 			/* Don't even try to talk to adapter if its sick */
1640 			ret = aac_check_health(dev);
1641 			if (!ret && !dev->queues)
1642 				break;
1643 			next_check_jiffies = jiffies
1644 					   + ((long)(unsigned)check_interval)
1645 					   * HZ;
1646 			do_gettimeofday(&now);
1647 
1648 			/* Synchronize our watches */
1649 			if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1650 			 && (now.tv_usec > (1000000 / HZ)))
1651 				difference = (((1000000 - now.tv_usec) * HZ)
1652 				  + 500000) / 1000000;
1653 			else if (ret == 0) {
1654 				struct fib *fibptr;
1655 
1656 				if ((fibptr = aac_fib_alloc(dev))) {
1657 					u32 * info;
1658 
1659 					aac_fib_init(fibptr);
1660 
1661 					info = (u32 *) fib_data(fibptr);
1662 					if (now.tv_usec > 500000)
1663 						++now.tv_sec;
1664 
1665 					*info = cpu_to_le32(now.tv_sec);
1666 
1667 					(void)aac_fib_send(SendHostTime,
1668 						fibptr,
1669 						sizeof(*info),
1670 						FsaNormal,
1671 						1, 1,
1672 						NULL,
1673 						NULL);
1674 					aac_fib_complete(fibptr);
1675 					aac_fib_free(fibptr);
1676 				}
1677 				difference = (long)(unsigned)update_interval*HZ;
1678 			} else {
1679 				/* retry shortly */
1680 				difference = 10 * HZ;
1681 			}
1682 			next_jiffies = jiffies + difference;
1683 			if (time_before(next_check_jiffies,next_jiffies))
1684 				difference = next_check_jiffies - jiffies;
1685 		}
1686 		if (difference <= 0)
1687 			difference = 1;
1688 		set_current_state(TASK_INTERRUPTIBLE);
1689 		schedule_timeout(difference);
1690 
1691 		if (kthread_should_stop())
1692 			break;
1693 	}
1694 	if (dev->queues)
1695 		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1696 	dev->aif_thread = 0;
1697 	return 0;
1698 }
1699