xref: /openbmc/linux/drivers/scsi/aacraid/comminit.c (revision efe4a1ac)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2010 Adaptec, Inc.
9  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10  *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; see the file COPYING.  If not, write to
24  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  * Module Name:
27  *  comminit.c
28  *
29  * Abstract: This supports the initialization of the host adapter commuication interface.
30  *    This is a platform dependent module for the pci cyclone board.
31  *
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/pci.h>
38 #include <linux/spinlock.h>
39 #include <linux/slab.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/completion.h>
43 #include <linux/mm.h>
44 #include <scsi/scsi_host.h>
45 
46 #include "aacraid.h"
47 
48 struct aac_common aac_config = {
49 	.irq_mod = 1
50 };
51 
52 static inline int aac_is_msix_mode(struct aac_dev *dev)
53 {
54 	u32 status = 0;
55 
56 	if (dev->pdev->device == PMC_DEVICE_S6 ||
57 		dev->pdev->device == PMC_DEVICE_S7 ||
58 		dev->pdev->device == PMC_DEVICE_S8) {
59 		status = src_readl(dev, MUnit.OMR);
60 	}
61 	return (status & AAC_INT_MODE_MSIX);
62 }
63 
64 static inline void aac_change_to_intx(struct aac_dev *dev)
65 {
66 	aac_src_access_devreg(dev, AAC_DISABLE_MSIX);
67 	aac_src_access_devreg(dev, AAC_ENABLE_INTX);
68 }
69 
70 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
71 {
72 	unsigned char *base;
73 	unsigned long size, align;
74 	const unsigned long fibsize = dev->max_fib_size;
75 	const unsigned long printfbufsiz = 256;
76 	unsigned long host_rrq_size, aac_init_size;
77 	union aac_init *init;
78 	dma_addr_t phys;
79 	unsigned long aac_max_hostphysmempages;
80 
81 	if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
82 		(dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
83 		(dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
84 		!dev->sa_firmware)) {
85 		host_rrq_size =
86 			(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)
87 				* sizeof(u32);
88 		aac_init_size = sizeof(union aac_init);
89 	} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
90 		dev->sa_firmware) {
91 		host_rrq_size = (dev->scsi_host_ptr->can_queue
92 			+ AAC_NUM_MGT_FIB) * sizeof(u32)  * AAC_MAX_MSIX;
93 		aac_init_size = sizeof(union aac_init) +
94 			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq);
95 	} else {
96 		host_rrq_size = 0;
97 		aac_init_size = sizeof(union aac_init);
98 	}
99 	size = fibsize + aac_init_size + commsize + commalign +
100 			printfbufsiz + host_rrq_size;
101 
102 	base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL);
103 	if (base == NULL) {
104 		printk(KERN_ERR "aacraid: unable to create mapping.\n");
105 		return 0;
106 	}
107 
108 	dev->comm_addr = (void *)base;
109 	dev->comm_phys = phys;
110 	dev->comm_size = size;
111 
112 	if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
113 	    (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
114 	    (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) {
115 		dev->host_rrq = (u32 *)(base + fibsize);
116 		dev->host_rrq_pa = phys + fibsize;
117 		memset(dev->host_rrq, 0, host_rrq_size);
118 	}
119 
120 	dev->init = (union aac_init *)(base + fibsize + host_rrq_size);
121 	dev->init_pa = phys + fibsize + host_rrq_size;
122 
123 	init = dev->init;
124 
125 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
126 		int i;
127 		u64 addr;
128 
129 		init->r8.init_struct_revision =
130 			cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8);
131 		init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
132 					INITFLAGS_DRIVER_USES_UTC_TIME |
133 					INITFLAGS_DRIVER_SUPPORTS_PM);
134 		init->r8.init_flags |=
135 				cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE);
136 		init->r8.rr_queue_count = cpu_to_le32(dev->max_msix);
137 		init->r8.max_io_size =
138 			cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
139 		init->r8.max_num_aif = init->r8.reserved1 =
140 			init->r8.reserved2 = 0;
141 
142 		for (i = 0; i < dev->max_msix; i++) {
143 			addr = (u64)dev->host_rrq_pa + dev->vector_cap * i *
144 					sizeof(u32);
145 			init->r8.rrq[i].host_addr_high = cpu_to_le32(
146 						upper_32_bits(addr));
147 			init->r8.rrq[i].host_addr_low = cpu_to_le32(
148 						lower_32_bits(addr));
149 			init->r8.rrq[i].msix_id = i;
150 			init->r8.rrq[i].element_count = cpu_to_le16(
151 					(u16)dev->vector_cap);
152 			init->r8.rrq[i].comp_thresh =
153 					init->r8.rrq[i].unused = 0;
154 		}
155 
156 		pr_warn("aacraid: Comm Interface type3 enabled\n");
157 	} else {
158 		init->r7.init_struct_revision =
159 			cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
160 		if (dev->max_fib_size != sizeof(struct hw_fib))
161 			init->r7.init_struct_revision =
162 				cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
163 		init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION);
164 		init->r7.fsrev = cpu_to_le32(dev->fsrev);
165 
166 		/*
167 		 *	Adapter Fibs are the first thing allocated so that they
168 		 *	start page aligned
169 		 */
170 		dev->aif_base_va = (struct hw_fib *)base;
171 
172 		init->r7.adapter_fibs_virtual_address = 0;
173 		init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys);
174 		init->r7.adapter_fibs_size = cpu_to_le32(fibsize);
175 		init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib));
176 
177 		/*
178 		 * number of 4k pages of host physical memory. The aacraid fw
179 		 * needs this number to be less than 4gb worth of pages. New
180 		 * firmware doesn't have any issues with the mapping system, but
181 		 * older Firmware did, and had *troubles* dealing with the math
182 		 * overloading past 32 bits, thus we must limit this field.
183 		 */
184 		aac_max_hostphysmempages =
185 				dma_get_required_mask(&dev->pdev->dev) >> 12;
186 		if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
187 			init->r7.host_phys_mem_pages =
188 					cpu_to_le32(aac_max_hostphysmempages);
189 		else
190 			init->r7.host_phys_mem_pages =
191 					cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
192 
193 		init->r7.init_flags =
194 			cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
195 			INITFLAGS_DRIVER_SUPPORTS_PM);
196 		init->r7.max_io_commands =
197 			cpu_to_le32(dev->scsi_host_ptr->can_queue +
198 					AAC_NUM_MGT_FIB);
199 		init->r7.max_io_size =
200 			cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
201 		init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size);
202 		init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif);
203 
204 		if (dev->comm_interface == AAC_COMM_MESSAGE) {
205 			init->r7.init_flags |=
206 				cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
207 			pr_warn("aacraid: Comm Interface enabled\n");
208 		} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
209 			init->r7.init_struct_revision =
210 				cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
211 			init->r7.init_flags |=
212 				cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
213 				INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
214 				INITFLAGS_FAST_JBOD_SUPPORTED);
215 			init->r7.host_rrq_addr_high =
216 				cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
217 			init->r7.host_rrq_addr_low =
218 				cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
219 			pr_warn("aacraid: Comm Interface type1 enabled\n");
220 		} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
221 			init->r7.init_struct_revision =
222 				cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
223 			init->r7.init_flags |=
224 				cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
225 				INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
226 				INITFLAGS_FAST_JBOD_SUPPORTED);
227 			init->r7.host_rrq_addr_high =
228 				cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
229 			init->r7.host_rrq_addr_low =
230 				cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
231 			init->r7.no_of_msix_vectors =
232 				cpu_to_le32(dev->max_msix);
233 			/* must be the COMM_PREFERRED_SETTINGS values */
234 			pr_warn("aacraid: Comm Interface type2 enabled\n");
235 		}
236 	}
237 
238 	/*
239 	 * Increment the base address by the amount already used
240 	 */
241 	base = base + fibsize + host_rrq_size + aac_init_size;
242 	phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
243 			aac_init_size);
244 
245 	/*
246 	 *	Align the beginning of Headers to commalign
247 	 */
248 	align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
249 	base = base + align;
250 	phys = phys + align;
251 	/*
252 	 *	Fill in addresses of the Comm Area Headers and Queues
253 	 */
254 	*commaddr = base;
255 	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
256 		init->r7.comm_header_address = cpu_to_le32((u32)phys);
257 	/*
258 	 *	Increment the base address by the size of the CommArea
259 	 */
260 	base = base + commsize;
261 	phys = phys + commsize;
262 	/*
263 	 *	 Place the Printf buffer area after the Fast I/O comm area.
264 	 */
265 	dev->printfbuf = (void *)base;
266 	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) {
267 		init->r7.printfbuf = cpu_to_le32(phys);
268 		init->r7.printfbufsiz = cpu_to_le32(printfbufsiz);
269 	}
270 	memset(base, 0, printfbufsiz);
271 	return 1;
272 }
273 
274 static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
275 {
276 	atomic_set(&q->numpending, 0);
277 	q->dev = dev;
278 	init_waitqueue_head(&q->cmdready);
279 	INIT_LIST_HEAD(&q->cmdq);
280 	init_waitqueue_head(&q->qfull);
281 	spin_lock_init(&q->lockdata);
282 	q->lock = &q->lockdata;
283 	q->headers.producer = (__le32 *)mem;
284 	q->headers.consumer = (__le32 *)(mem+1);
285 	*(q->headers.producer) = cpu_to_le32(qsize);
286 	*(q->headers.consumer) = cpu_to_le32(qsize);
287 	q->entries = qsize;
288 }
289 
290 /**
291  *	aac_send_shutdown		-	shutdown an adapter
292  *	@dev: Adapter to shutdown
293  *
294  *	This routine will send a VM_CloseAll (shutdown) request to the adapter.
295  */
296 
297 int aac_send_shutdown(struct aac_dev * dev)
298 {
299 	struct fib * fibctx;
300 	struct aac_close *cmd;
301 	int status;
302 
303 	fibctx = aac_fib_alloc(dev);
304 	if (!fibctx)
305 		return -ENOMEM;
306 	aac_fib_init(fibctx);
307 
308 	mutex_lock(&dev->ioctl_mutex);
309 	dev->adapter_shutdown = 1;
310 	mutex_unlock(&dev->ioctl_mutex);
311 
312 	cmd = (struct aac_close *) fib_data(fibctx);
313 	cmd->command = cpu_to_le32(VM_CloseAll);
314 	cmd->cid = cpu_to_le32(0xfffffffe);
315 
316 	status = aac_fib_send(ContainerCommand,
317 			  fibctx,
318 			  sizeof(struct aac_close),
319 			  FsaNormal,
320 			  -2 /* Timeout silently */, 1,
321 			  NULL, NULL);
322 
323 	if (status >= 0)
324 		aac_fib_complete(fibctx);
325 	/* FIB should be freed only after getting the response from the F/W */
326 	if (status != -ERESTARTSYS)
327 		aac_fib_free(fibctx);
328 	if ((dev->pdev->device == PMC_DEVICE_S7 ||
329 	     dev->pdev->device == PMC_DEVICE_S8 ||
330 	     dev->pdev->device == PMC_DEVICE_S9) &&
331 	     dev->msi_enabled)
332 		aac_set_intx_mode(dev);
333 	return status;
334 }
335 
336 /**
337  *	aac_comm_init	-	Initialise FSA data structures
338  *	@dev:	Adapter to initialise
339  *
340  *	Initializes the data structures that are required for the FSA commuication
341  *	interface to operate.
342  *	Returns
343  *		1 - if we were able to init the commuication interface.
344  *		0 - If there were errors initing. This is a fatal error.
345  */
346 
347 static int aac_comm_init(struct aac_dev * dev)
348 {
349 	unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
350 	unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
351 	u32 *headers;
352 	struct aac_entry * queues;
353 	unsigned long size;
354 	struct aac_queue_block * comm = dev->queues;
355 	/*
356 	 *	Now allocate and initialize the zone structures used as our
357 	 *	pool of FIB context records.  The size of the zone is based
358 	 *	on the system memory size.  We also initialize the mutex used
359 	 *	to protect the zone.
360 	 */
361 	spin_lock_init(&dev->fib_lock);
362 
363 	/*
364 	 *	Allocate the physically contiguous space for the commuication
365 	 *	queue headers.
366 	 */
367 
368 	size = hdrsize + queuesize;
369 
370 	if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
371 		return -ENOMEM;
372 
373 	queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
374 
375 	/* Adapter to Host normal priority Command queue */
376 	comm->queue[HostNormCmdQueue].base = queues;
377 	aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
378 	queues += HOST_NORM_CMD_ENTRIES;
379 	headers += 2;
380 
381 	/* Adapter to Host high priority command queue */
382 	comm->queue[HostHighCmdQueue].base = queues;
383 	aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
384 
385 	queues += HOST_HIGH_CMD_ENTRIES;
386 	headers +=2;
387 
388 	/* Host to adapter normal priority command queue */
389 	comm->queue[AdapNormCmdQueue].base = queues;
390 	aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
391 
392 	queues += ADAP_NORM_CMD_ENTRIES;
393 	headers += 2;
394 
395 	/* host to adapter high priority command queue */
396 	comm->queue[AdapHighCmdQueue].base = queues;
397 	aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
398 
399 	queues += ADAP_HIGH_CMD_ENTRIES;
400 	headers += 2;
401 
402 	/* adapter to host normal priority response queue */
403 	comm->queue[HostNormRespQueue].base = queues;
404 	aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
405 	queues += HOST_NORM_RESP_ENTRIES;
406 	headers += 2;
407 
408 	/* adapter to host high priority response queue */
409 	comm->queue[HostHighRespQueue].base = queues;
410 	aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
411 
412 	queues += HOST_HIGH_RESP_ENTRIES;
413 	headers += 2;
414 
415 	/* host to adapter normal priority response queue */
416 	comm->queue[AdapNormRespQueue].base = queues;
417 	aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
418 
419 	queues += ADAP_NORM_RESP_ENTRIES;
420 	headers += 2;
421 
422 	/* host to adapter high priority response queue */
423 	comm->queue[AdapHighRespQueue].base = queues;
424 	aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
425 
426 	comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
427 	comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
428 	comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
429 	comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
430 
431 	return 0;
432 }
433 
434 void aac_define_int_mode(struct aac_dev *dev)
435 {
436 	int i, msi_count, min_msix;
437 
438 	msi_count = i = 0;
439 	/* max. vectors from GET_COMM_PREFERRED_SETTINGS */
440 	if (dev->max_msix == 0 ||
441 	    dev->pdev->device == PMC_DEVICE_S6 ||
442 	    dev->sync_mode) {
443 		dev->max_msix = 1;
444 		dev->vector_cap =
445 			dev->scsi_host_ptr->can_queue +
446 			AAC_NUM_MGT_FIB;
447 		return;
448 	}
449 
450 	/* Don't bother allocating more MSI-X vectors than cpus */
451 	msi_count = min(dev->max_msix,
452 		(unsigned int)num_online_cpus());
453 
454 	dev->max_msix = msi_count;
455 
456 	if (msi_count > AAC_MAX_MSIX)
457 		msi_count = AAC_MAX_MSIX;
458 
459 	if (msi_count > 1 &&
460 	    pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
461 		min_msix = 2;
462 		i = pci_alloc_irq_vectors(dev->pdev,
463 					  min_msix, msi_count,
464 					  PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
465 		if (i > 0) {
466 			dev->msi_enabled = 1;
467 			msi_count = i;
468 		} else {
469 			dev->msi_enabled = 0;
470 			dev_err(&dev->pdev->dev,
471 			"MSIX not supported!! Will try INTX 0x%x.\n", i);
472 		}
473 	}
474 
475 	if (!dev->msi_enabled)
476 		dev->max_msix = msi_count = 1;
477 	else {
478 		if (dev->max_msix > msi_count)
479 			dev->max_msix = msi_count;
480 	}
481 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware)
482 		dev->vector_cap = dev->scsi_host_ptr->can_queue +
483 				AAC_NUM_MGT_FIB;
484 	else
485 		dev->vector_cap = (dev->scsi_host_ptr->can_queue +
486 				AAC_NUM_MGT_FIB) / msi_count;
487 
488 }
489 struct aac_dev *aac_init_adapter(struct aac_dev *dev)
490 {
491 	u32 status[5];
492 	struct Scsi_Host * host = dev->scsi_host_ptr;
493 	extern int aac_sync_mode;
494 
495 	/*
496 	 *	Check the preferred comm settings, defaults from template.
497 	 */
498 	dev->management_fib_count = 0;
499 	spin_lock_init(&dev->manage_lock);
500 	spin_lock_init(&dev->sync_lock);
501 	spin_lock_init(&dev->iq_lock);
502 	dev->max_fib_size = sizeof(struct hw_fib);
503 	dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
504 		- sizeof(struct aac_fibhdr)
505 		- sizeof(struct aac_write) + sizeof(struct sgentry))
506 			/ sizeof(struct sgentry);
507 	dev->comm_interface = AAC_COMM_PRODUCER;
508 	dev->raw_io_interface = dev->raw_io_64 = 0;
509 
510 
511 	/*
512 	 * Enable INTX mode, if not done already Enabled
513 	 */
514 	if (aac_is_msix_mode(dev)) {
515 		aac_change_to_intx(dev);
516 		dev_info(&dev->pdev->dev, "Changed firmware to INTX mode");
517 	}
518 
519 	if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
520 		0, 0, 0, 0, 0, 0,
521 		status+0, status+1, status+2, status+3, status+4)) &&
522 		(status[0] == 0x00000001)) {
523 		dev->doorbell_mask = status[3];
524 		if (status[1] & AAC_OPT_NEW_COMM_64)
525 			dev->raw_io_64 = 1;
526 		dev->sync_mode = aac_sync_mode;
527 		if (dev->a_ops.adapter_comm &&
528 			(status[1] & AAC_OPT_NEW_COMM)) {
529 				dev->comm_interface = AAC_COMM_MESSAGE;
530 				dev->raw_io_interface = 1;
531 			if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
532 				/* driver supports TYPE1 (Tupelo) */
533 				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
534 			} else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) {
535 				/* driver supports TYPE2 (Denali, Yosemite) */
536 				dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
537 			} else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) {
538 				/* driver supports TYPE3 (Yosemite, Thor) */
539 				dev->comm_interface = AAC_COMM_MESSAGE_TYPE3;
540 			} else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) {
541 				/* not supported TYPE - switch to sync. mode */
542 				dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
543 				dev->sync_mode = 1;
544 			}
545 		}
546 		if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
547 			(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
548 			dev->sa_firmware = 1;
549 		else
550 			dev->sa_firmware = 0;
551 
552 		if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
553 		    (status[2] > dev->base_size)) {
554 			aac_adapter_ioremap(dev, 0);
555 			dev->base_size = status[2];
556 			if (aac_adapter_ioremap(dev, status[2])) {
557 				/* remap failed, go back ... */
558 				dev->comm_interface = AAC_COMM_PRODUCER;
559 				if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
560 					printk(KERN_WARNING
561 					  "aacraid: unable to map adapter.\n");
562 					return NULL;
563 				}
564 			}
565 		}
566 	}
567 	dev->max_msix = 0;
568 	dev->msi_enabled = 0;
569 	dev->adapter_shutdown = 0;
570 	if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
571 	  0, 0, 0, 0, 0, 0,
572 	  status+0, status+1, status+2, status+3, status+4))
573 	 && (status[0] == 0x00000001)) {
574 		/*
575 		 *	status[1] >> 16		maximum command size in KB
576 		 *	status[1] & 0xFFFF	maximum FIB size
577 		 *	status[2] >> 16		maximum SG elements to driver
578 		 *	status[2] & 0xFFFF	maximum SG elements from driver
579 		 *	status[3] & 0xFFFF	maximum number FIBs outstanding
580 		 */
581 		host->max_sectors = (status[1] >> 16) << 1;
582 		/* Multiple of 32 for PMC */
583 		dev->max_fib_size = status[1] & 0xFFE0;
584 		host->sg_tablesize = status[2] >> 16;
585 		dev->sg_tablesize = status[2] & 0xFFFF;
586 		if (dev->pdev->device == PMC_DEVICE_S7 ||
587 		    dev->pdev->device == PMC_DEVICE_S8 ||
588 		    dev->pdev->device == PMC_DEVICE_S9) {
589 			if (host->can_queue > (status[3] >> 16) -
590 					AAC_NUM_MGT_FIB)
591 				host->can_queue = (status[3] >> 16) -
592 					AAC_NUM_MGT_FIB;
593 		} else if (host->can_queue > (status[3] & 0xFFFF) -
594 				AAC_NUM_MGT_FIB)
595 			host->can_queue = (status[3] & 0xFFFF) -
596 				AAC_NUM_MGT_FIB;
597 
598 		dev->max_num_aif = status[4] & 0xFFFF;
599 	}
600 	if (numacb > 0) {
601 		if (numacb < host->can_queue)
602 			host->can_queue = numacb;
603 		else
604 			pr_warn("numacb=%d ignored\n", numacb);
605 	}
606 
607 	if (dev->pdev->device == PMC_DEVICE_S6 ||
608 	    dev->pdev->device == PMC_DEVICE_S7 ||
609 	    dev->pdev->device == PMC_DEVICE_S8 ||
610 	    dev->pdev->device == PMC_DEVICE_S9)
611 		aac_define_int_mode(dev);
612 	/*
613 	 *	Ok now init the communication subsystem
614 	 */
615 
616 	dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
617 	if (dev->queues == NULL) {
618 		printk(KERN_ERR "Error could not allocate comm region.\n");
619 		return NULL;
620 	}
621 
622 	if (aac_comm_init(dev)<0){
623 		kfree(dev->queues);
624 		return NULL;
625 	}
626 	/*
627 	 *	Initialize the list of fibs
628 	 */
629 	if (aac_fib_setup(dev) < 0) {
630 		kfree(dev->queues);
631 		return NULL;
632 	}
633 
634 	INIT_LIST_HEAD(&dev->fib_list);
635 	INIT_LIST_HEAD(&dev->sync_fib_list);
636 
637 	return dev;
638 }
639 
640