xref: /openbmc/linux/drivers/scsi/aacraid/commsup.c (revision 565d76cb)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    communication.
29  *
30  */
31 
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/kthread.h>
43 #include <linux/interrupt.h>
44 #include <linux/semaphore.h>
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_cmnd.h>
49 
50 #include "aacraid.h"
51 
52 /**
53  *	fib_map_alloc		-	allocate the fib objects
54  *	@dev: Adapter to allocate for
55  *
56  *	Allocate and map the shared PCI space for the FIB blocks used to
57  *	talk to the Adaptec firmware.
58  */
59 
60 static int fib_map_alloc(struct aac_dev *dev)
61 {
62 	dprintk((KERN_INFO
63 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66 	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67 	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68 	  &dev->hw_fib_pa))==NULL)
69 		return -ENOMEM;
70 	return 0;
71 }
72 
73 /**
74  *	aac_fib_map_free		-	free the fib objects
75  *	@dev: Adapter to free
76  *
77  *	Free the PCI mappings and the memory allocated for FIB blocks
78  *	on this adapter.
79  */
80 
81 void aac_fib_map_free(struct aac_dev *dev)
82 {
83 	pci_free_consistent(dev->pdev,
84 	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
85 	  dev->hw_fib_va, dev->hw_fib_pa);
86 	dev->hw_fib_va = NULL;
87 	dev->hw_fib_pa = 0;
88 }
89 
90 /**
91  *	aac_fib_setup	-	setup the fibs
92  *	@dev: Adapter to set up
93  *
94  *	Allocate the PCI space for the fibs, map it and then initialise the
95  *	fib area, the unmapped fib data and also the free list
96  */
97 
98 int aac_fib_setup(struct aac_dev * dev)
99 {
100 	struct fib *fibptr;
101 	struct hw_fib *hw_fib;
102 	dma_addr_t hw_fib_pa;
103 	int i;
104 
105 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
106 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
107 		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
108 		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
109 	}
110 	if (i<0)
111 		return -ENOMEM;
112 
113 	hw_fib = dev->hw_fib_va;
114 	hw_fib_pa = dev->hw_fib_pa;
115 	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
116 	/*
117 	 *	Initialise the fibs
118 	 */
119 	for (i = 0, fibptr = &dev->fibs[i];
120 		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
121 		i++, fibptr++)
122 	{
123 		fibptr->dev = dev;
124 		fibptr->hw_fib_va = hw_fib;
125 		fibptr->data = (void *) fibptr->hw_fib_va->data;
126 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
127 		sema_init(&fibptr->event_wait, 0);
128 		spin_lock_init(&fibptr->event_lock);
129 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
130 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
131 		fibptr->hw_fib_pa = hw_fib_pa;
132 		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
133 		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
134 	}
135 	/*
136 	 *	Add the fib chain to the free list
137 	 */
138 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
139 	/*
140 	 *	Enable this to debug out of queue space
141 	 */
142 	dev->free_fib = &dev->fibs[0];
143 	return 0;
144 }
145 
146 /**
147  *	aac_fib_alloc	-	allocate a fib
148  *	@dev: Adapter to allocate the fib for
149  *
150  *	Allocate a fib from the adapter fib pool. If the pool is empty we
151  *	return NULL.
152  */
153 
154 struct fib *aac_fib_alloc(struct aac_dev *dev)
155 {
156 	struct fib * fibptr;
157 	unsigned long flags;
158 	spin_lock_irqsave(&dev->fib_lock, flags);
159 	fibptr = dev->free_fib;
160 	if(!fibptr){
161 		spin_unlock_irqrestore(&dev->fib_lock, flags);
162 		return fibptr;
163 	}
164 	dev->free_fib = fibptr->next;
165 	spin_unlock_irqrestore(&dev->fib_lock, flags);
166 	/*
167 	 *	Set the proper node type code and node byte size
168 	 */
169 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
170 	fibptr->size = sizeof(struct fib);
171 	/*
172 	 *	Null out fields that depend on being zero at the start of
173 	 *	each I/O
174 	 */
175 	fibptr->hw_fib_va->header.XferState = 0;
176 	fibptr->flags = 0;
177 	fibptr->callback = NULL;
178 	fibptr->callback_data = NULL;
179 
180 	return fibptr;
181 }
182 
183 /**
184  *	aac_fib_free	-	free a fib
185  *	@fibptr: fib to free up
186  *
187  *	Frees up a fib and places it on the appropriate queue
188  */
189 
190 void aac_fib_free(struct fib *fibptr)
191 {
192 	unsigned long flags, flagsv;
193 
194 	spin_lock_irqsave(&fibptr->event_lock, flagsv);
195 	if (fibptr->done == 2) {
196 		spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
197 		return;
198 	}
199 	spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
200 
201 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
202 	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
203 		aac_config.fib_timeouts++;
204 	if (fibptr->hw_fib_va->header.XferState != 0) {
205 		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
206 			 (void*)fibptr,
207 			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
208 	}
209 	fibptr->next = fibptr->dev->free_fib;
210 	fibptr->dev->free_fib = fibptr;
211 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
212 }
213 
214 /**
215  *	aac_fib_init	-	initialise a fib
216  *	@fibptr: The fib to initialize
217  *
218  *	Set up the generic fib fields ready for use
219  */
220 
221 void aac_fib_init(struct fib *fibptr)
222 {
223 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
224 
225 	hw_fib->header.StructType = FIB_MAGIC;
226 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
227 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
228 	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
229 	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
230 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
231 }
232 
233 /**
234  *	fib_deallocate		-	deallocate a fib
235  *	@fibptr: fib to deallocate
236  *
237  *	Will deallocate and return to the free pool the FIB pointed to by the
238  *	caller.
239  */
240 
241 static void fib_dealloc(struct fib * fibptr)
242 {
243 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
244 	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
245 	hw_fib->header.XferState = 0;
246 }
247 
248 /*
249  *	Commuication primitives define and support the queuing method we use to
250  *	support host to adapter commuication. All queue accesses happen through
251  *	these routines and are the only routines which have a knowledge of the
252  *	 how these queues are implemented.
253  */
254 
255 /**
256  *	aac_get_entry		-	get a queue entry
257  *	@dev: Adapter
258  *	@qid: Queue Number
259  *	@entry: Entry return
260  *	@index: Index return
261  *	@nonotify: notification control
262  *
263  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
264  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
265  *	returned.
266  */
267 
268 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
269 {
270 	struct aac_queue * q;
271 	unsigned long idx;
272 
273 	/*
274 	 *	All of the queues wrap when they reach the end, so we check
275 	 *	to see if they have reached the end and if they have we just
276 	 *	set the index back to zero. This is a wrap. You could or off
277 	 *	the high bits in all updates but this is a bit faster I think.
278 	 */
279 
280 	q = &dev->queues->queue[qid];
281 
282 	idx = *index = le32_to_cpu(*(q->headers.producer));
283 	/* Interrupt Moderation, only interrupt for first two entries */
284 	if (idx != le32_to_cpu(*(q->headers.consumer))) {
285 		if (--idx == 0) {
286 			if (qid == AdapNormCmdQueue)
287 				idx = ADAP_NORM_CMD_ENTRIES;
288 			else
289 				idx = ADAP_NORM_RESP_ENTRIES;
290 		}
291 		if (idx != le32_to_cpu(*(q->headers.consumer)))
292 			*nonotify = 1;
293 	}
294 
295 	if (qid == AdapNormCmdQueue) {
296 		if (*index >= ADAP_NORM_CMD_ENTRIES)
297 			*index = 0; /* Wrap to front of the Producer Queue. */
298 	} else {
299 		if (*index >= ADAP_NORM_RESP_ENTRIES)
300 			*index = 0; /* Wrap to front of the Producer Queue. */
301 	}
302 
303 	/* Queue is full */
304 	if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
305 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
306 				qid, q->numpending);
307 		return 0;
308 	} else {
309 		*entry = q->base + *index;
310 		return 1;
311 	}
312 }
313 
314 /**
315  *	aac_queue_get		-	get the next free QE
316  *	@dev: Adapter
317  *	@index: Returned index
318  *	@priority: Priority of fib
319  *	@fib: Fib to associate with the queue entry
320  *	@wait: Wait if queue full
321  *	@fibptr: Driver fib object to go with fib
322  *	@nonotify: Don't notify the adapter
323  *
324  *	Gets the next free QE off the requested priorty adapter command
325  *	queue and associates the Fib with the QE. The QE represented by
326  *	index is ready to insert on the queue when this routine returns
327  *	success.
328  */
329 
330 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
331 {
332 	struct aac_entry * entry = NULL;
333 	int map = 0;
334 
335 	if (qid == AdapNormCmdQueue) {
336 		/*  if no entries wait for some if caller wants to */
337 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
338 			printk(KERN_ERR "GetEntries failed\n");
339 		}
340 		/*
341 		 *	Setup queue entry with a command, status and fib mapped
342 		 */
343 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
344 		map = 1;
345 	} else {
346 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
347 			/* if no entries wait for some if caller wants to */
348 		}
349 		/*
350 		 *	Setup queue entry with command, status and fib mapped
351 		 */
352 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
353 		entry->addr = hw_fib->header.SenderFibAddress;
354 			/* Restore adapters pointer to the FIB */
355 		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
356 		map = 0;
357 	}
358 	/*
359 	 *	If MapFib is true than we need to map the Fib and put pointers
360 	 *	in the queue entry.
361 	 */
362 	if (map)
363 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
364 	return 0;
365 }
366 
367 /*
368  *	Define the highest level of host to adapter communication routines.
369  *	These routines will support host to adapter FS commuication. These
370  *	routines have no knowledge of the commuication method used. This level
371  *	sends and receives FIBs. This level has no knowledge of how these FIBs
372  *	get passed back and forth.
373  */
374 
375 /**
376  *	aac_fib_send	-	send a fib to the adapter
377  *	@command: Command to send
378  *	@fibptr: The fib
379  *	@size: Size of fib data area
380  *	@priority: Priority of Fib
381  *	@wait: Async/sync select
382  *	@reply: True if a reply is wanted
383  *	@callback: Called with reply
384  *	@callback_data: Passed to callback
385  *
386  *	Sends the requested FIB to the adapter and optionally will wait for a
387  *	response FIB. If the caller does not wish to wait for a response than
388  *	an event to wait on must be supplied. This event will be set when a
389  *	response FIB is received from the adapter.
390  */
391 
392 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
393 		int priority, int wait, int reply, fib_callback callback,
394 		void *callback_data)
395 {
396 	struct aac_dev * dev = fibptr->dev;
397 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
398 	unsigned long flags = 0;
399 	unsigned long qflags;
400 	unsigned long mflags = 0;
401 
402 
403 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
404 		return -EBUSY;
405 	/*
406 	 *	There are 5 cases with the wait and reponse requested flags.
407 	 *	The only invalid cases are if the caller requests to wait and
408 	 *	does not request a response and if the caller does not want a
409 	 *	response and the Fib is not allocated from pool. If a response
410 	 *	is not requesed the Fib will just be deallocaed by the DPC
411 	 *	routine when the response comes back from the adapter. No
412 	 *	further processing will be done besides deleting the Fib. We
413 	 *	will have a debug mode where the adapter can notify the host
414 	 *	it had a problem and the host can log that fact.
415 	 */
416 	fibptr->flags = 0;
417 	if (wait && !reply) {
418 		return -EINVAL;
419 	} else if (!wait && reply) {
420 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
421 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
422 	} else if (!wait && !reply) {
423 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
424 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
425 	} else if (wait && reply) {
426 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
427 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
428 	}
429 	/*
430 	 *	Map the fib into 32bits by using the fib number
431 	 */
432 
433 	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
434 	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
435 	/*
436 	 *	Set FIB state to indicate where it came from and if we want a
437 	 *	response from the adapter. Also load the command from the
438 	 *	caller.
439 	 *
440 	 *	Map the hw fib pointer as a 32bit value
441 	 */
442 	hw_fib->header.Command = cpu_to_le16(command);
443 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
444 	fibptr->hw_fib_va->header.Flags = 0;	/* 0 the flags field - internal only*/
445 	/*
446 	 *	Set the size of the Fib we want to send to the adapter
447 	 */
448 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
449 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
450 		return -EMSGSIZE;
451 	}
452 	/*
453 	 *	Get a queue entry connect the FIB to it and send an notify
454 	 *	the adapter a command is ready.
455 	 */
456 	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
457 
458 	/*
459 	 *	Fill in the Callback and CallbackContext if we are not
460 	 *	going to wait.
461 	 */
462 	if (!wait) {
463 		fibptr->callback = callback;
464 		fibptr->callback_data = callback_data;
465 		fibptr->flags = FIB_CONTEXT_FLAG;
466 	}
467 
468 	fibptr->done = 0;
469 
470 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
471 
472 	dprintk((KERN_DEBUG "Fib contents:.\n"));
473 	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
474 	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
475 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
476 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
477 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
478 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
479 
480 	if (!dev->queues)
481 		return -EBUSY;
482 
483 	if (wait) {
484 
485 		spin_lock_irqsave(&dev->manage_lock, mflags);
486 		if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
487 			printk(KERN_INFO "No management Fibs Available:%d\n",
488 						dev->management_fib_count);
489 			spin_unlock_irqrestore(&dev->manage_lock, mflags);
490 			return -EBUSY;
491 		}
492 		dev->management_fib_count++;
493 		spin_unlock_irqrestore(&dev->manage_lock, mflags);
494 		spin_lock_irqsave(&fibptr->event_lock, flags);
495 	}
496 
497 	if (aac_adapter_deliver(fibptr) != 0) {
498 		printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
499 		if (wait) {
500 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
501 			spin_lock_irqsave(&dev->manage_lock, mflags);
502 			dev->management_fib_count--;
503 			spin_unlock_irqrestore(&dev->manage_lock, mflags);
504 		}
505 		return -EBUSY;
506 	}
507 
508 
509 	/*
510 	 *	If the caller wanted us to wait for response wait now.
511 	 */
512 
513 	if (wait) {
514 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
515 		/* Only set for first known interruptable command */
516 		if (wait < 0) {
517 			/*
518 			 * *VERY* Dangerous to time out a command, the
519 			 * assumption is made that we have no hope of
520 			 * functioning because an interrupt routing or other
521 			 * hardware failure has occurred.
522 			 */
523 			unsigned long count = 36000000L; /* 3 minutes */
524 			while (down_trylock(&fibptr->event_wait)) {
525 				int blink;
526 				if (--count == 0) {
527 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
528 					spin_lock_irqsave(q->lock, qflags);
529 					q->numpending--;
530 					spin_unlock_irqrestore(q->lock, qflags);
531 					if (wait == -1) {
532 	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
533 						  "Usually a result of a PCI interrupt routing problem;\n"
534 						  "update mother board BIOS or consider utilizing one of\n"
535 						  "the SAFE mode kernel options (acpi, apic etc)\n");
536 					}
537 					return -ETIMEDOUT;
538 				}
539 				if ((blink = aac_adapter_check_health(dev)) > 0) {
540 					if (wait == -1) {
541 	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
542 						  "Usually a result of a serious unrecoverable hardware problem\n",
543 						  blink);
544 					}
545 					return -EFAULT;
546 				}
547 				udelay(5);
548 			}
549 		} else if (down_interruptible(&fibptr->event_wait)) {
550 			/* Do nothing ... satisfy
551 			 * down_interruptible must_check */
552 		}
553 
554 		spin_lock_irqsave(&fibptr->event_lock, flags);
555 		if (fibptr->done == 0) {
556 			fibptr->done = 2; /* Tell interrupt we aborted */
557 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
558 			return -ERESTARTSYS;
559 		}
560 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
561 		BUG_ON(fibptr->done == 0);
562 
563 		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
564 			return -ETIMEDOUT;
565 		return 0;
566 	}
567 	/*
568 	 *	If the user does not want a response than return success otherwise
569 	 *	return pending
570 	 */
571 	if (reply)
572 		return -EINPROGRESS;
573 	else
574 		return 0;
575 }
576 
577 /**
578  *	aac_consumer_get	-	get the top of the queue
579  *	@dev: Adapter
580  *	@q: Queue
581  *	@entry: Return entry
582  *
583  *	Will return a pointer to the entry on the top of the queue requested that
584  *	we are a consumer of, and return the address of the queue entry. It does
585  *	not change the state of the queue.
586  */
587 
588 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
589 {
590 	u32 index;
591 	int status;
592 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
593 		status = 0;
594 	} else {
595 		/*
596 		 *	The consumer index must be wrapped if we have reached
597 		 *	the end of the queue, else we just use the entry
598 		 *	pointed to by the header index
599 		 */
600 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
601 			index = 0;
602 		else
603 			index = le32_to_cpu(*q->headers.consumer);
604 		*entry = q->base + index;
605 		status = 1;
606 	}
607 	return(status);
608 }
609 
610 /**
611  *	aac_consumer_free	-	free consumer entry
612  *	@dev: Adapter
613  *	@q: Queue
614  *	@qid: Queue ident
615  *
616  *	Frees up the current top of the queue we are a consumer of. If the
617  *	queue was full notify the producer that the queue is no longer full.
618  */
619 
620 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
621 {
622 	int wasfull = 0;
623 	u32 notify;
624 
625 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
626 		wasfull = 1;
627 
628 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
629 		*q->headers.consumer = cpu_to_le32(1);
630 	else
631 		le32_add_cpu(q->headers.consumer, 1);
632 
633 	if (wasfull) {
634 		switch (qid) {
635 
636 		case HostNormCmdQueue:
637 			notify = HostNormCmdNotFull;
638 			break;
639 		case HostNormRespQueue:
640 			notify = HostNormRespNotFull;
641 			break;
642 		default:
643 			BUG();
644 			return;
645 		}
646 		aac_adapter_notify(dev, notify);
647 	}
648 }
649 
650 /**
651  *	aac_fib_adapter_complete	-	complete adapter issued fib
652  *	@fibptr: fib to complete
653  *	@size: size of fib
654  *
655  *	Will do all necessary work to complete a FIB that was sent from
656  *	the adapter.
657  */
658 
659 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
660 {
661 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
662 	struct aac_dev * dev = fibptr->dev;
663 	struct aac_queue * q;
664 	unsigned long nointr = 0;
665 	unsigned long qflags;
666 
667 	if (hw_fib->header.XferState == 0) {
668 		if (dev->comm_interface == AAC_COMM_MESSAGE)
669 			kfree (hw_fib);
670 		return 0;
671 	}
672 	/*
673 	 *	If we plan to do anything check the structure type first.
674 	 */
675 	if (hw_fib->header.StructType != FIB_MAGIC) {
676 		if (dev->comm_interface == AAC_COMM_MESSAGE)
677 			kfree (hw_fib);
678 		return -EINVAL;
679 	}
680 	/*
681 	 *	This block handles the case where the adapter had sent us a
682 	 *	command and we have finished processing the command. We
683 	 *	call completeFib when we are done processing the command
684 	 *	and want to send a response back to the adapter. This will
685 	 *	send the completed cdb to the adapter.
686 	 */
687 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
688 		if (dev->comm_interface == AAC_COMM_MESSAGE) {
689 			kfree (hw_fib);
690 		} else {
691 			u32 index;
692 			hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
693 			if (size) {
694 				size += sizeof(struct aac_fibhdr);
695 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
696 					return -EMSGSIZE;
697 				hw_fib->header.Size = cpu_to_le16(size);
698 			}
699 			q = &dev->queues->queue[AdapNormRespQueue];
700 			spin_lock_irqsave(q->lock, qflags);
701 			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
702 			*(q->headers.producer) = cpu_to_le32(index + 1);
703 			spin_unlock_irqrestore(q->lock, qflags);
704 			if (!(nointr & (int)aac_config.irq_mod))
705 				aac_adapter_notify(dev, AdapNormRespQueue);
706 		}
707 	} else {
708 		printk(KERN_WARNING "aac_fib_adapter_complete: "
709 			"Unknown xferstate detected.\n");
710 		BUG();
711 	}
712 	return 0;
713 }
714 
715 /**
716  *	aac_fib_complete	-	fib completion handler
717  *	@fib: FIB to complete
718  *
719  *	Will do all necessary work to complete a FIB.
720  */
721 
722 int aac_fib_complete(struct fib *fibptr)
723 {
724 	unsigned long flags;
725 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
726 
727 	/*
728 	 *	Check for a fib which has already been completed
729 	 */
730 
731 	if (hw_fib->header.XferState == 0)
732 		return 0;
733 	/*
734 	 *	If we plan to do anything check the structure type first.
735 	 */
736 
737 	if (hw_fib->header.StructType != FIB_MAGIC)
738 		return -EINVAL;
739 	/*
740 	 *	This block completes a cdb which orginated on the host and we
741 	 *	just need to deallocate the cdb or reinit it. At this point the
742 	 *	command is complete that we had sent to the adapter and this
743 	 *	cdb could be reused.
744 	 */
745 	spin_lock_irqsave(&fibptr->event_lock, flags);
746 	if (fibptr->done == 2) {
747 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
748 		return 0;
749 	}
750 	spin_unlock_irqrestore(&fibptr->event_lock, flags);
751 
752 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
753 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
754 	{
755 		fib_dealloc(fibptr);
756 	}
757 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
758 	{
759 		/*
760 		 *	This handles the case when the host has aborted the I/O
761 		 *	to the adapter because the adapter is not responding
762 		 */
763 		fib_dealloc(fibptr);
764 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
765 		fib_dealloc(fibptr);
766 	} else {
767 		BUG();
768 	}
769 	return 0;
770 }
771 
772 /**
773  *	aac_printf	-	handle printf from firmware
774  *	@dev: Adapter
775  *	@val: Message info
776  *
777  *	Print a message passed to us by the controller firmware on the
778  *	Adaptec board
779  */
780 
781 void aac_printf(struct aac_dev *dev, u32 val)
782 {
783 	char *cp = dev->printfbuf;
784 	if (dev->printf_enabled)
785 	{
786 		int length = val & 0xffff;
787 		int level = (val >> 16) & 0xffff;
788 
789 		/*
790 		 *	The size of the printfbuf is set in port.c
791 		 *	There is no variable or define for it
792 		 */
793 		if (length > 255)
794 			length = 255;
795 		if (cp[length] != 0)
796 			cp[length] = 0;
797 		if (level == LOG_AAC_HIGH_ERROR)
798 			printk(KERN_WARNING "%s:%s", dev->name, cp);
799 		else
800 			printk(KERN_INFO "%s:%s", dev->name, cp);
801 	}
802 	memset(cp, 0, 256);
803 }
804 
805 
806 /**
807  *	aac_handle_aif		-	Handle a message from the firmware
808  *	@dev: Which adapter this fib is from
809  *	@fibptr: Pointer to fibptr from adapter
810  *
811  *	This routine handles a driver notify fib from the adapter and
812  *	dispatches it to the appropriate routine for handling.
813  */
814 
815 #define AIF_SNIFF_TIMEOUT	(30*HZ)
816 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
817 {
818 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
819 	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
820 	u32 channel, id, lun, container;
821 	struct scsi_device *device;
822 	enum {
823 		NOTHING,
824 		DELETE,
825 		ADD,
826 		CHANGE
827 	} device_config_needed = NOTHING;
828 
829 	/* Sniff for container changes */
830 
831 	if (!dev || !dev->fsa_dev)
832 		return;
833 	container = channel = id = lun = (u32)-1;
834 
835 	/*
836 	 *	We have set this up to try and minimize the number of
837 	 * re-configures that take place. As a result of this when
838 	 * certain AIF's come in we will set a flag waiting for another
839 	 * type of AIF before setting the re-config flag.
840 	 */
841 	switch (le32_to_cpu(aifcmd->command)) {
842 	case AifCmdDriverNotify:
843 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
844 		/*
845 		 *	Morph or Expand complete
846 		 */
847 		case AifDenMorphComplete:
848 		case AifDenVolumeExtendComplete:
849 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
850 			if (container >= dev->maximum_num_containers)
851 				break;
852 
853 			/*
854 			 *	Find the scsi_device associated with the SCSI
855 			 * address. Make sure we have the right array, and if
856 			 * so set the flag to initiate a new re-config once we
857 			 * see an AifEnConfigChange AIF come through.
858 			 */
859 
860 			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
861 				device = scsi_device_lookup(dev->scsi_host_ptr,
862 					CONTAINER_TO_CHANNEL(container),
863 					CONTAINER_TO_ID(container),
864 					CONTAINER_TO_LUN(container));
865 				if (device) {
866 					dev->fsa_dev[container].config_needed = CHANGE;
867 					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
868 					dev->fsa_dev[container].config_waiting_stamp = jiffies;
869 					scsi_device_put(device);
870 				}
871 			}
872 		}
873 
874 		/*
875 		 *	If we are waiting on something and this happens to be
876 		 * that thing then set the re-configure flag.
877 		 */
878 		if (container != (u32)-1) {
879 			if (container >= dev->maximum_num_containers)
880 				break;
881 			if ((dev->fsa_dev[container].config_waiting_on ==
882 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
883 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
884 				dev->fsa_dev[container].config_waiting_on = 0;
885 		} else for (container = 0;
886 		    container < dev->maximum_num_containers; ++container) {
887 			if ((dev->fsa_dev[container].config_waiting_on ==
888 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
889 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
890 				dev->fsa_dev[container].config_waiting_on = 0;
891 		}
892 		break;
893 
894 	case AifCmdEventNotify:
895 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
896 		case AifEnBatteryEvent:
897 			dev->cache_protected =
898 				(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
899 			break;
900 		/*
901 		 *	Add an Array.
902 		 */
903 		case AifEnAddContainer:
904 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
905 			if (container >= dev->maximum_num_containers)
906 				break;
907 			dev->fsa_dev[container].config_needed = ADD;
908 			dev->fsa_dev[container].config_waiting_on =
909 				AifEnConfigChange;
910 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
911 			break;
912 
913 		/*
914 		 *	Delete an Array.
915 		 */
916 		case AifEnDeleteContainer:
917 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
918 			if (container >= dev->maximum_num_containers)
919 				break;
920 			dev->fsa_dev[container].config_needed = DELETE;
921 			dev->fsa_dev[container].config_waiting_on =
922 				AifEnConfigChange;
923 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
924 			break;
925 
926 		/*
927 		 *	Container change detected. If we currently are not
928 		 * waiting on something else, setup to wait on a Config Change.
929 		 */
930 		case AifEnContainerChange:
931 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
932 			if (container >= dev->maximum_num_containers)
933 				break;
934 			if (dev->fsa_dev[container].config_waiting_on &&
935 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
936 				break;
937 			dev->fsa_dev[container].config_needed = CHANGE;
938 			dev->fsa_dev[container].config_waiting_on =
939 				AifEnConfigChange;
940 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
941 			break;
942 
943 		case AifEnConfigChange:
944 			break;
945 
946 		case AifEnAddJBOD:
947 		case AifEnDeleteJBOD:
948 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
949 			if ((container >> 28)) {
950 				container = (u32)-1;
951 				break;
952 			}
953 			channel = (container >> 24) & 0xF;
954 			if (channel >= dev->maximum_num_channels) {
955 				container = (u32)-1;
956 				break;
957 			}
958 			id = container & 0xFFFF;
959 			if (id >= dev->maximum_num_physicals) {
960 				container = (u32)-1;
961 				break;
962 			}
963 			lun = (container >> 16) & 0xFF;
964 			container = (u32)-1;
965 			channel = aac_phys_to_logical(channel);
966 			device_config_needed =
967 			  (((__le32 *)aifcmd->data)[0] ==
968 			    cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
969 			if (device_config_needed == ADD) {
970 				device = scsi_device_lookup(dev->scsi_host_ptr,
971 					channel,
972 					id,
973 					lun);
974 				if (device) {
975 					scsi_remove_device(device);
976 					scsi_device_put(device);
977 				}
978 			}
979 			break;
980 
981 		case AifEnEnclosureManagement:
982 			/*
983 			 * If in JBOD mode, automatic exposure of new
984 			 * physical target to be suppressed until configured.
985 			 */
986 			if (dev->jbod)
987 				break;
988 			switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
989 			case EM_DRIVE_INSERTION:
990 			case EM_DRIVE_REMOVAL:
991 				container = le32_to_cpu(
992 					((__le32 *)aifcmd->data)[2]);
993 				if ((container >> 28)) {
994 					container = (u32)-1;
995 					break;
996 				}
997 				channel = (container >> 24) & 0xF;
998 				if (channel >= dev->maximum_num_channels) {
999 					container = (u32)-1;
1000 					break;
1001 				}
1002 				id = container & 0xFFFF;
1003 				lun = (container >> 16) & 0xFF;
1004 				container = (u32)-1;
1005 				if (id >= dev->maximum_num_physicals) {
1006 					/* legacy dev_t ? */
1007 					if ((0x2000 <= id) || lun || channel ||
1008 					  ((channel = (id >> 7) & 0x3F) >=
1009 					  dev->maximum_num_channels))
1010 						break;
1011 					lun = (id >> 4) & 7;
1012 					id &= 0xF;
1013 				}
1014 				channel = aac_phys_to_logical(channel);
1015 				device_config_needed =
1016 				  (((__le32 *)aifcmd->data)[3]
1017 				    == cpu_to_le32(EM_DRIVE_INSERTION)) ?
1018 				  ADD : DELETE;
1019 				break;
1020 			}
1021 			break;
1022 		}
1023 
1024 		/*
1025 		 *	If we are waiting on something and this happens to be
1026 		 * that thing then set the re-configure flag.
1027 		 */
1028 		if (container != (u32)-1) {
1029 			if (container >= dev->maximum_num_containers)
1030 				break;
1031 			if ((dev->fsa_dev[container].config_waiting_on ==
1032 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1033 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1034 				dev->fsa_dev[container].config_waiting_on = 0;
1035 		} else for (container = 0;
1036 		    container < dev->maximum_num_containers; ++container) {
1037 			if ((dev->fsa_dev[container].config_waiting_on ==
1038 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1039 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1040 				dev->fsa_dev[container].config_waiting_on = 0;
1041 		}
1042 		break;
1043 
1044 	case AifCmdJobProgress:
1045 		/*
1046 		 *	These are job progress AIF's. When a Clear is being
1047 		 * done on a container it is initially created then hidden from
1048 		 * the OS. When the clear completes we don't get a config
1049 		 * change so we monitor the job status complete on a clear then
1050 		 * wait for a container change.
1051 		 */
1052 
1053 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1054 		    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1055 		     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1056 			for (container = 0;
1057 			    container < dev->maximum_num_containers;
1058 			    ++container) {
1059 				/*
1060 				 * Stomp on all config sequencing for all
1061 				 * containers?
1062 				 */
1063 				dev->fsa_dev[container].config_waiting_on =
1064 					AifEnContainerChange;
1065 				dev->fsa_dev[container].config_needed = ADD;
1066 				dev->fsa_dev[container].config_waiting_stamp =
1067 					jiffies;
1068 			}
1069 		}
1070 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1071 		    ((__le32 *)aifcmd->data)[6] == 0 &&
1072 		    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1073 			for (container = 0;
1074 			    container < dev->maximum_num_containers;
1075 			    ++container) {
1076 				/*
1077 				 * Stomp on all config sequencing for all
1078 				 * containers?
1079 				 */
1080 				dev->fsa_dev[container].config_waiting_on =
1081 					AifEnContainerChange;
1082 				dev->fsa_dev[container].config_needed = DELETE;
1083 				dev->fsa_dev[container].config_waiting_stamp =
1084 					jiffies;
1085 			}
1086 		}
1087 		break;
1088 	}
1089 
1090 	container = 0;
1091 retry_next:
1092 	if (device_config_needed == NOTHING)
1093 	for (; container < dev->maximum_num_containers; ++container) {
1094 		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1095 			(dev->fsa_dev[container].config_needed != NOTHING) &&
1096 			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1097 			device_config_needed =
1098 				dev->fsa_dev[container].config_needed;
1099 			dev->fsa_dev[container].config_needed = NOTHING;
1100 			channel = CONTAINER_TO_CHANNEL(container);
1101 			id = CONTAINER_TO_ID(container);
1102 			lun = CONTAINER_TO_LUN(container);
1103 			break;
1104 		}
1105 	}
1106 	if (device_config_needed == NOTHING)
1107 		return;
1108 
1109 	/*
1110 	 *	If we decided that a re-configuration needs to be done,
1111 	 * schedule it here on the way out the door, please close the door
1112 	 * behind you.
1113 	 */
1114 
1115 	/*
1116 	 *	Find the scsi_device associated with the SCSI address,
1117 	 * and mark it as changed, invalidating the cache. This deals
1118 	 * with changes to existing device IDs.
1119 	 */
1120 
1121 	if (!dev || !dev->scsi_host_ptr)
1122 		return;
1123 	/*
1124 	 * force reload of disk info via aac_probe_container
1125 	 */
1126 	if ((channel == CONTAINER_CHANNEL) &&
1127 	  (device_config_needed != NOTHING)) {
1128 		if (dev->fsa_dev[container].valid == 1)
1129 			dev->fsa_dev[container].valid = 2;
1130 		aac_probe_container(dev, container);
1131 	}
1132 	device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1133 	if (device) {
1134 		switch (device_config_needed) {
1135 		case DELETE:
1136 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1137 			scsi_remove_device(device);
1138 #else
1139 			if (scsi_device_online(device)) {
1140 				scsi_device_set_state(device, SDEV_OFFLINE);
1141 				sdev_printk(KERN_INFO, device,
1142 					"Device offlined - %s\n",
1143 					(channel == CONTAINER_CHANNEL) ?
1144 						"array deleted" :
1145 						"enclosure services event");
1146 			}
1147 #endif
1148 			break;
1149 		case ADD:
1150 			if (!scsi_device_online(device)) {
1151 				sdev_printk(KERN_INFO, device,
1152 					"Device online - %s\n",
1153 					(channel == CONTAINER_CHANNEL) ?
1154 						"array created" :
1155 						"enclosure services event");
1156 				scsi_device_set_state(device, SDEV_RUNNING);
1157 			}
1158 			/* FALLTHRU */
1159 		case CHANGE:
1160 			if ((channel == CONTAINER_CHANNEL)
1161 			 && (!dev->fsa_dev[container].valid)) {
1162 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1163 				scsi_remove_device(device);
1164 #else
1165 				if (!scsi_device_online(device))
1166 					break;
1167 				scsi_device_set_state(device, SDEV_OFFLINE);
1168 				sdev_printk(KERN_INFO, device,
1169 					"Device offlined - %s\n",
1170 					"array failed");
1171 #endif
1172 				break;
1173 			}
1174 			scsi_rescan_device(&device->sdev_gendev);
1175 
1176 		default:
1177 			break;
1178 		}
1179 		scsi_device_put(device);
1180 		device_config_needed = NOTHING;
1181 	}
1182 	if (device_config_needed == ADD)
1183 		scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1184 	if (channel == CONTAINER_CHANNEL) {
1185 		container++;
1186 		device_config_needed = NOTHING;
1187 		goto retry_next;
1188 	}
1189 }
1190 
1191 static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1192 {
1193 	int index, quirks;
1194 	int retval;
1195 	struct Scsi_Host *host;
1196 	struct scsi_device *dev;
1197 	struct scsi_cmnd *command;
1198 	struct scsi_cmnd *command_list;
1199 	int jafo = 0;
1200 
1201 	/*
1202 	 * Assumptions:
1203 	 *	- host is locked, unless called by the aacraid thread.
1204 	 *	  (a matter of convenience, due to legacy issues surrounding
1205 	 *	  eh_host_adapter_reset).
1206 	 *	- in_reset is asserted, so no new i/o is getting to the
1207 	 *	  card.
1208 	 *	- The card is dead, or will be very shortly ;-/ so no new
1209 	 *	  commands are completing in the interrupt service.
1210 	 */
1211 	host = aac->scsi_host_ptr;
1212 	scsi_block_requests(host);
1213 	aac_adapter_disable_int(aac);
1214 	if (aac->thread->pid != current->pid) {
1215 		spin_unlock_irq(host->host_lock);
1216 		kthread_stop(aac->thread);
1217 		jafo = 1;
1218 	}
1219 
1220 	/*
1221 	 *	If a positive health, means in a known DEAD PANIC
1222 	 * state and the adapter could be reset to `try again'.
1223 	 */
1224 	retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1225 
1226 	if (retval)
1227 		goto out;
1228 
1229 	/*
1230 	 *	Loop through the fibs, close the synchronous FIBS
1231 	 */
1232 	for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1233 		struct fib *fib = &aac->fibs[index];
1234 		if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1235 		  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1236 			unsigned long flagv;
1237 			spin_lock_irqsave(&fib->event_lock, flagv);
1238 			up(&fib->event_wait);
1239 			spin_unlock_irqrestore(&fib->event_lock, flagv);
1240 			schedule();
1241 			retval = 0;
1242 		}
1243 	}
1244 	/* Give some extra time for ioctls to complete. */
1245 	if (retval == 0)
1246 		ssleep(2);
1247 	index = aac->cardtype;
1248 
1249 	/*
1250 	 * Re-initialize the adapter, first free resources, then carefully
1251 	 * apply the initialization sequence to come back again. Only risk
1252 	 * is a change in Firmware dropping cache, it is assumed the caller
1253 	 * will ensure that i/o is queisced and the card is flushed in that
1254 	 * case.
1255 	 */
1256 	aac_fib_map_free(aac);
1257 	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1258 	aac->comm_addr = NULL;
1259 	aac->comm_phys = 0;
1260 	kfree(aac->queues);
1261 	aac->queues = NULL;
1262 	free_irq(aac->pdev->irq, aac);
1263 	kfree(aac->fsa_dev);
1264 	aac->fsa_dev = NULL;
1265 	quirks = aac_get_driver_ident(index)->quirks;
1266 	if (quirks & AAC_QUIRK_31BIT) {
1267 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
1268 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
1269 			goto out;
1270 	} else {
1271 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
1272 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
1273 			goto out;
1274 	}
1275 	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1276 		goto out;
1277 	if (quirks & AAC_QUIRK_31BIT)
1278 		if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
1279 			goto out;
1280 	if (jafo) {
1281 		aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1282 		if (IS_ERR(aac->thread)) {
1283 			retval = PTR_ERR(aac->thread);
1284 			goto out;
1285 		}
1286 	}
1287 	(void)aac_get_adapter_info(aac);
1288 	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1289 		host->sg_tablesize = 34;
1290 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1291 	}
1292 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1293 		host->sg_tablesize = 17;
1294 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1295 	}
1296 	aac_get_config_status(aac, 1);
1297 	aac_get_containers(aac);
1298 	/*
1299 	 * This is where the assumption that the Adapter is quiesced
1300 	 * is important.
1301 	 */
1302 	command_list = NULL;
1303 	__shost_for_each_device(dev, host) {
1304 		unsigned long flags;
1305 		spin_lock_irqsave(&dev->list_lock, flags);
1306 		list_for_each_entry(command, &dev->cmd_list, list)
1307 			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1308 				command->SCp.buffer = (struct scatterlist *)command_list;
1309 				command_list = command;
1310 			}
1311 		spin_unlock_irqrestore(&dev->list_lock, flags);
1312 	}
1313 	while ((command = command_list)) {
1314 		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1315 		command->SCp.buffer = NULL;
1316 		command->result = DID_OK << 16
1317 		  | COMMAND_COMPLETE << 8
1318 		  | SAM_STAT_TASK_SET_FULL;
1319 		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1320 		command->scsi_done(command);
1321 	}
1322 	retval = 0;
1323 
1324 out:
1325 	aac->in_reset = 0;
1326 	scsi_unblock_requests(host);
1327 	if (jafo) {
1328 		spin_lock_irq(host->host_lock);
1329 	}
1330 	return retval;
1331 }
1332 
1333 int aac_reset_adapter(struct aac_dev * aac, int forced)
1334 {
1335 	unsigned long flagv = 0;
1336 	int retval;
1337 	struct Scsi_Host * host;
1338 
1339 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1340 		return -EBUSY;
1341 
1342 	if (aac->in_reset) {
1343 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1344 		return -EBUSY;
1345 	}
1346 	aac->in_reset = 1;
1347 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1348 
1349 	/*
1350 	 * Wait for all commands to complete to this specific
1351 	 * target (block maximum 60 seconds). Although not necessary,
1352 	 * it does make us a good storage citizen.
1353 	 */
1354 	host = aac->scsi_host_ptr;
1355 	scsi_block_requests(host);
1356 	if (forced < 2) for (retval = 60; retval; --retval) {
1357 		struct scsi_device * dev;
1358 		struct scsi_cmnd * command;
1359 		int active = 0;
1360 
1361 		__shost_for_each_device(dev, host) {
1362 			spin_lock_irqsave(&dev->list_lock, flagv);
1363 			list_for_each_entry(command, &dev->cmd_list, list) {
1364 				if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1365 					active++;
1366 					break;
1367 				}
1368 			}
1369 			spin_unlock_irqrestore(&dev->list_lock, flagv);
1370 			if (active)
1371 				break;
1372 
1373 		}
1374 		/*
1375 		 * We can exit If all the commands are complete
1376 		 */
1377 		if (active == 0)
1378 			break;
1379 		ssleep(1);
1380 	}
1381 
1382 	/* Quiesce build, flush cache, write through mode */
1383 	if (forced < 2)
1384 		aac_send_shutdown(aac);
1385 	spin_lock_irqsave(host->host_lock, flagv);
1386 	retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
1387 	spin_unlock_irqrestore(host->host_lock, flagv);
1388 
1389 	if ((forced < 2) && (retval == -ENODEV)) {
1390 		/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1391 		struct fib * fibctx = aac_fib_alloc(aac);
1392 		if (fibctx) {
1393 			struct aac_pause *cmd;
1394 			int status;
1395 
1396 			aac_fib_init(fibctx);
1397 
1398 			cmd = (struct aac_pause *) fib_data(fibctx);
1399 
1400 			cmd->command = cpu_to_le32(VM_ContainerConfig);
1401 			cmd->type = cpu_to_le32(CT_PAUSE_IO);
1402 			cmd->timeout = cpu_to_le32(1);
1403 			cmd->min = cpu_to_le32(1);
1404 			cmd->noRescan = cpu_to_le32(1);
1405 			cmd->count = cpu_to_le32(0);
1406 
1407 			status = aac_fib_send(ContainerCommand,
1408 			  fibctx,
1409 			  sizeof(struct aac_pause),
1410 			  FsaNormal,
1411 			  -2 /* Timeout silently */, 1,
1412 			  NULL, NULL);
1413 
1414 			if (status >= 0)
1415 				aac_fib_complete(fibctx);
1416 			/* FIB should be freed only after getting
1417 			 * the response from the F/W */
1418 			if (status != -ERESTARTSYS)
1419 				aac_fib_free(fibctx);
1420 		}
1421 	}
1422 
1423 	return retval;
1424 }
1425 
1426 int aac_check_health(struct aac_dev * aac)
1427 {
1428 	int BlinkLED;
1429 	unsigned long time_now, flagv = 0;
1430 	struct list_head * entry;
1431 	struct Scsi_Host * host;
1432 
1433 	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1434 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1435 		return 0;
1436 
1437 	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1438 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1439 		return 0; /* OK */
1440 	}
1441 
1442 	aac->in_reset = 1;
1443 
1444 	/* Fake up an AIF:
1445 	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1446 	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1447 	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1448 	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1449 	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1450 	 *	aac.aifcmd.data[3] = BlinkLED
1451 	 */
1452 
1453 	time_now = jiffies/HZ;
1454 	entry = aac->fib_list.next;
1455 
1456 	/*
1457 	 * For each Context that is on the
1458 	 * fibctxList, make a copy of the
1459 	 * fib, and then set the event to wake up the
1460 	 * thread that is waiting for it.
1461 	 */
1462 	while (entry != &aac->fib_list) {
1463 		/*
1464 		 * Extract the fibctx
1465 		 */
1466 		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1467 		struct hw_fib * hw_fib;
1468 		struct fib * fib;
1469 		/*
1470 		 * Check if the queue is getting
1471 		 * backlogged
1472 		 */
1473 		if (fibctx->count > 20) {
1474 			/*
1475 			 * It's *not* jiffies folks,
1476 			 * but jiffies / HZ, so do not
1477 			 * panic ...
1478 			 */
1479 			u32 time_last = fibctx->jiffies;
1480 			/*
1481 			 * Has it been > 2 minutes
1482 			 * since the last read off
1483 			 * the queue?
1484 			 */
1485 			if ((time_now - time_last) > aif_timeout) {
1486 				entry = entry->next;
1487 				aac_close_fib_context(aac, fibctx);
1488 				continue;
1489 			}
1490 		}
1491 		/*
1492 		 * Warning: no sleep allowed while
1493 		 * holding spinlock
1494 		 */
1495 		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1496 		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1497 		if (fib && hw_fib) {
1498 			struct aac_aifcmd * aif;
1499 
1500 			fib->hw_fib_va = hw_fib;
1501 			fib->dev = aac;
1502 			aac_fib_init(fib);
1503 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1504 			fib->size = sizeof (struct fib);
1505 			fib->data = hw_fib->data;
1506 			aif = (struct aac_aifcmd *)hw_fib->data;
1507 			aif->command = cpu_to_le32(AifCmdEventNotify);
1508 			aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1509 			((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1510 			((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1511 			((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1512 			((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1513 
1514 			/*
1515 			 * Put the FIB onto the
1516 			 * fibctx's fibs
1517 			 */
1518 			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1519 			fibctx->count++;
1520 			/*
1521 			 * Set the event to wake up the
1522 			 * thread that will waiting.
1523 			 */
1524 			up(&fibctx->wait_sem);
1525 		} else {
1526 			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1527 			kfree(fib);
1528 			kfree(hw_fib);
1529 		}
1530 		entry = entry->next;
1531 	}
1532 
1533 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1534 
1535 	if (BlinkLED < 0) {
1536 		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1537 		goto out;
1538 	}
1539 
1540 	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1541 
1542 	if (!aac_check_reset || ((aac_check_reset == 1) &&
1543 		(aac->supplement_adapter_info.SupportedOptions2 &
1544 			AAC_OPTION_IGNORE_RESET)))
1545 		goto out;
1546 	host = aac->scsi_host_ptr;
1547 	if (aac->thread->pid != current->pid)
1548 		spin_lock_irqsave(host->host_lock, flagv);
1549 	BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
1550 	if (aac->thread->pid != current->pid)
1551 		spin_unlock_irqrestore(host->host_lock, flagv);
1552 	return BlinkLED;
1553 
1554 out:
1555 	aac->in_reset = 0;
1556 	return BlinkLED;
1557 }
1558 
1559 
1560 /**
1561  *	aac_command_thread	-	command processing thread
1562  *	@dev: Adapter to monitor
1563  *
1564  *	Waits on the commandready event in it's queue. When the event gets set
1565  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1566  *	until the queue is empty. When the queue is empty it will wait for
1567  *	more FIBs.
1568  */
1569 
1570 int aac_command_thread(void *data)
1571 {
1572 	struct aac_dev *dev = data;
1573 	struct hw_fib *hw_fib, *hw_newfib;
1574 	struct fib *fib, *newfib;
1575 	struct aac_fib_context *fibctx;
1576 	unsigned long flags;
1577 	DECLARE_WAITQUEUE(wait, current);
1578 	unsigned long next_jiffies = jiffies + HZ;
1579 	unsigned long next_check_jiffies = next_jiffies;
1580 	long difference = HZ;
1581 
1582 	/*
1583 	 *	We can only have one thread per adapter for AIF's.
1584 	 */
1585 	if (dev->aif_thread)
1586 		return -EINVAL;
1587 
1588 	/*
1589 	 *	Let the DPC know it has a place to send the AIF's to.
1590 	 */
1591 	dev->aif_thread = 1;
1592 	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1593 	set_current_state(TASK_INTERRUPTIBLE);
1594 	dprintk ((KERN_INFO "aac_command_thread start\n"));
1595 	while (1) {
1596 		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1597 		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1598 			struct list_head *entry;
1599 			struct aac_aifcmd * aifcmd;
1600 
1601 			set_current_state(TASK_RUNNING);
1602 
1603 			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1604 			list_del(entry);
1605 
1606 			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1607 			fib = list_entry(entry, struct fib, fiblink);
1608 			/*
1609 			 *	We will process the FIB here or pass it to a
1610 			 *	worker thread that is TBD. We Really can't
1611 			 *	do anything at this point since we don't have
1612 			 *	anything defined for this thread to do.
1613 			 */
1614 			hw_fib = fib->hw_fib_va;
1615 			memset(fib, 0, sizeof(struct fib));
1616 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1617 			fib->size = sizeof(struct fib);
1618 			fib->hw_fib_va = hw_fib;
1619 			fib->data = hw_fib->data;
1620 			fib->dev = dev;
1621 			/*
1622 			 *	We only handle AifRequest fibs from the adapter.
1623 			 */
1624 			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1625 			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1626 				/* Handle Driver Notify Events */
1627 				aac_handle_aif(dev, fib);
1628 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1629 				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1630 			} else {
1631 				/* The u32 here is important and intended. We are using
1632 				   32bit wrapping time to fit the adapter field */
1633 
1634 				u32 time_now, time_last;
1635 				unsigned long flagv;
1636 				unsigned num;
1637 				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1638 				struct fib ** fib_pool, ** fib_p;
1639 
1640 				/* Sniff events */
1641 				if ((aifcmd->command ==
1642 				     cpu_to_le32(AifCmdEventNotify)) ||
1643 				    (aifcmd->command ==
1644 				     cpu_to_le32(AifCmdJobProgress))) {
1645 					aac_handle_aif(dev, fib);
1646 				}
1647 
1648 				time_now = jiffies/HZ;
1649 
1650 				/*
1651 				 * Warning: no sleep allowed while
1652 				 * holding spinlock. We take the estimate
1653 				 * and pre-allocate a set of fibs outside the
1654 				 * lock.
1655 				 */
1656 				num = le32_to_cpu(dev->init->AdapterFibsSize)
1657 				    / sizeof(struct hw_fib); /* some extra */
1658 				spin_lock_irqsave(&dev->fib_lock, flagv);
1659 				entry = dev->fib_list.next;
1660 				while (entry != &dev->fib_list) {
1661 					entry = entry->next;
1662 					++num;
1663 				}
1664 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1665 				hw_fib_pool = NULL;
1666 				fib_pool = NULL;
1667 				if (num
1668 				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1669 				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1670 					hw_fib_p = hw_fib_pool;
1671 					fib_p = fib_pool;
1672 					while (hw_fib_p < &hw_fib_pool[num]) {
1673 						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1674 							--hw_fib_p;
1675 							break;
1676 						}
1677 						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1678 							kfree(*(--hw_fib_p));
1679 							break;
1680 						}
1681 					}
1682 					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1683 						kfree(fib_pool);
1684 						fib_pool = NULL;
1685 						kfree(hw_fib_pool);
1686 						hw_fib_pool = NULL;
1687 					}
1688 				} else {
1689 					kfree(hw_fib_pool);
1690 					hw_fib_pool = NULL;
1691 				}
1692 				spin_lock_irqsave(&dev->fib_lock, flagv);
1693 				entry = dev->fib_list.next;
1694 				/*
1695 				 * For each Context that is on the
1696 				 * fibctxList, make a copy of the
1697 				 * fib, and then set the event to wake up the
1698 				 * thread that is waiting for it.
1699 				 */
1700 				hw_fib_p = hw_fib_pool;
1701 				fib_p = fib_pool;
1702 				while (entry != &dev->fib_list) {
1703 					/*
1704 					 * Extract the fibctx
1705 					 */
1706 					fibctx = list_entry(entry, struct aac_fib_context, next);
1707 					/*
1708 					 * Check if the queue is getting
1709 					 * backlogged
1710 					 */
1711 					if (fibctx->count > 20)
1712 					{
1713 						/*
1714 						 * It's *not* jiffies folks,
1715 						 * but jiffies / HZ so do not
1716 						 * panic ...
1717 						 */
1718 						time_last = fibctx->jiffies;
1719 						/*
1720 						 * Has it been > 2 minutes
1721 						 * since the last read off
1722 						 * the queue?
1723 						 */
1724 						if ((time_now - time_last) > aif_timeout) {
1725 							entry = entry->next;
1726 							aac_close_fib_context(dev, fibctx);
1727 							continue;
1728 						}
1729 					}
1730 					/*
1731 					 * Warning: no sleep allowed while
1732 					 * holding spinlock
1733 					 */
1734 					if (hw_fib_p < &hw_fib_pool[num]) {
1735 						hw_newfib = *hw_fib_p;
1736 						*(hw_fib_p++) = NULL;
1737 						newfib = *fib_p;
1738 						*(fib_p++) = NULL;
1739 						/*
1740 						 * Make the copy of the FIB
1741 						 */
1742 						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1743 						memcpy(newfib, fib, sizeof(struct fib));
1744 						newfib->hw_fib_va = hw_newfib;
1745 						/*
1746 						 * Put the FIB onto the
1747 						 * fibctx's fibs
1748 						 */
1749 						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1750 						fibctx->count++;
1751 						/*
1752 						 * Set the event to wake up the
1753 						 * thread that is waiting.
1754 						 */
1755 						up(&fibctx->wait_sem);
1756 					} else {
1757 						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1758 					}
1759 					entry = entry->next;
1760 				}
1761 				/*
1762 				 *	Set the status of this FIB
1763 				 */
1764 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1765 				aac_fib_adapter_complete(fib, sizeof(u32));
1766 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1767 				/* Free up the remaining resources */
1768 				hw_fib_p = hw_fib_pool;
1769 				fib_p = fib_pool;
1770 				while (hw_fib_p < &hw_fib_pool[num]) {
1771 					kfree(*hw_fib_p);
1772 					kfree(*fib_p);
1773 					++fib_p;
1774 					++hw_fib_p;
1775 				}
1776 				kfree(hw_fib_pool);
1777 				kfree(fib_pool);
1778 			}
1779 			kfree(fib);
1780 			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1781 		}
1782 		/*
1783 		 *	There are no more AIF's
1784 		 */
1785 		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1786 
1787 		/*
1788 		 *	Background activity
1789 		 */
1790 		if ((time_before(next_check_jiffies,next_jiffies))
1791 		 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1792 			next_check_jiffies = next_jiffies;
1793 			if (aac_check_health(dev) == 0) {
1794 				difference = ((long)(unsigned)check_interval)
1795 					   * HZ;
1796 				next_check_jiffies = jiffies + difference;
1797 			} else if (!dev->queues)
1798 				break;
1799 		}
1800 		if (!time_before(next_check_jiffies,next_jiffies)
1801 		 && ((difference = next_jiffies - jiffies) <= 0)) {
1802 			struct timeval now;
1803 			int ret;
1804 
1805 			/* Don't even try to talk to adapter if its sick */
1806 			ret = aac_check_health(dev);
1807 			if (!ret && !dev->queues)
1808 				break;
1809 			next_check_jiffies = jiffies
1810 					   + ((long)(unsigned)check_interval)
1811 					   * HZ;
1812 			do_gettimeofday(&now);
1813 
1814 			/* Synchronize our watches */
1815 			if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1816 			 && (now.tv_usec > (1000000 / HZ)))
1817 				difference = (((1000000 - now.tv_usec) * HZ)
1818 				  + 500000) / 1000000;
1819 			else if (ret == 0) {
1820 				struct fib *fibptr;
1821 
1822 				if ((fibptr = aac_fib_alloc(dev))) {
1823 					int status;
1824 					__le32 *info;
1825 
1826 					aac_fib_init(fibptr);
1827 
1828 					info = (__le32 *) fib_data(fibptr);
1829 					if (now.tv_usec > 500000)
1830 						++now.tv_sec;
1831 
1832 					*info = cpu_to_le32(now.tv_sec);
1833 
1834 					status = aac_fib_send(SendHostTime,
1835 						fibptr,
1836 						sizeof(*info),
1837 						FsaNormal,
1838 						1, 1,
1839 						NULL,
1840 						NULL);
1841 					/* Do not set XferState to zero unless
1842 					 * receives a response from F/W */
1843 					if (status >= 0)
1844 						aac_fib_complete(fibptr);
1845 					/* FIB should be freed only after
1846 					 * getting the response from the F/W */
1847 					if (status != -ERESTARTSYS)
1848 						aac_fib_free(fibptr);
1849 				}
1850 				difference = (long)(unsigned)update_interval*HZ;
1851 			} else {
1852 				/* retry shortly */
1853 				difference = 10 * HZ;
1854 			}
1855 			next_jiffies = jiffies + difference;
1856 			if (time_before(next_check_jiffies,next_jiffies))
1857 				difference = next_check_jiffies - jiffies;
1858 		}
1859 		if (difference <= 0)
1860 			difference = 1;
1861 		set_current_state(TASK_INTERRUPTIBLE);
1862 		schedule_timeout(difference);
1863 
1864 		if (kthread_should_stop())
1865 			break;
1866 	}
1867 	if (dev->queues)
1868 		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1869 	dev->aif_thread = 0;
1870 	return 0;
1871 }
1872