xref: /openbmc/linux/drivers/scsi/smartpqi/smartpqi_init.c (revision 4ed91d48259d9ddd378424d008f2e6559f7e78f8)
1 /*
2  *    driver for Microsemi PQI-based storage controllers
3  *    Copyright (c) 2016 Microsemi Corporation
4  *    Copyright (c) 2016 PMC-Sierra, Inc.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <linux/blk-mq-pci.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_transport_sas.h>
34 #include <asm/unaligned.h>
35 #include "smartpqi.h"
36 #include "smartpqi_sis.h"
37 
38 #if !defined(BUILD_TIMESTAMP)
39 #define BUILD_TIMESTAMP
40 #endif
41 
42 #define DRIVER_VERSION		"0.9.13-370"
43 #define DRIVER_MAJOR		0
44 #define DRIVER_MINOR		9
45 #define DRIVER_RELEASE		13
46 #define DRIVER_REVISION		370
47 
48 #define DRIVER_NAME		"Microsemi PQI Driver (v" DRIVER_VERSION ")"
49 #define DRIVER_NAME_SHORT	"smartpqi"
50 
51 MODULE_AUTHOR("Microsemi");
52 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 	DRIVER_VERSION);
54 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
55 MODULE_VERSION(DRIVER_VERSION);
56 MODULE_LICENSE("GPL");
57 
58 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT	0
59 
60 static char *hpe_branded_controller = "HPE Smart Array Controller";
61 static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62 
63 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65 static void pqi_scan_start(struct Scsi_Host *shost);
66 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 	struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 	struct pqi_io_request *io_request);
69 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 	struct pqi_iu_header *request, unsigned int flags,
71 	struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 	unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 	struct pqi_encryption_info *encryption_info);
76 
77 /* for flags argument to pqi_submit_raid_request_synchronous() */
78 #define PQI_SYNC_FLAGS_INTERRUPTABLE	0x1
79 
80 static struct scsi_transport_template *pqi_sas_transport_template;
81 
82 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83 
84 static int pqi_disable_device_id_wildcards;
85 module_param_named(disable_device_id_wildcards,
86 	pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
87 MODULE_PARM_DESC(disable_device_id_wildcards,
88 	"Disable device ID wildcards.");
89 
90 static char *raid_levels[] = {
91 	"RAID-0",
92 	"RAID-4",
93 	"RAID-1(1+0)",
94 	"RAID-5",
95 	"RAID-5+1",
96 	"RAID-ADG",
97 	"RAID-1(ADM)",
98 };
99 
100 static char *pqi_raid_level_to_string(u8 raid_level)
101 {
102 	if (raid_level < ARRAY_SIZE(raid_levels))
103 		return raid_levels[raid_level];
104 
105 	return "";
106 }
107 
108 #define SA_RAID_0		0
109 #define SA_RAID_4		1
110 #define SA_RAID_1		2	/* also used for RAID 10 */
111 #define SA_RAID_5		3	/* also used for RAID 50 */
112 #define SA_RAID_51		4
113 #define SA_RAID_6		5	/* also used for RAID 60 */
114 #define SA_RAID_ADM		6	/* also used for RAID 1+0 ADM */
115 #define SA_RAID_MAX		SA_RAID_ADM
116 #define SA_RAID_UNKNOWN		0xff
117 
118 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
119 {
120 	scmd->scsi_done(scmd);
121 }
122 
123 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
124 {
125 	return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
126 }
127 
128 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
129 {
130 	void *hostdata = shost_priv(shost);
131 
132 	return *((struct pqi_ctrl_info **)hostdata);
133 }
134 
135 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
136 {
137 	return !device->is_physical_device;
138 }
139 
140 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
141 {
142 	return !ctrl_info->controller_online;
143 }
144 
145 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
146 {
147 	if (ctrl_info->controller_online)
148 		if (!sis_is_firmware_running(ctrl_info))
149 			pqi_take_ctrl_offline(ctrl_info);
150 }
151 
152 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
153 {
154 	return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
155 }
156 
157 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
158 	struct pqi_ctrl_info *ctrl_info)
159 {
160 	return sis_read_driver_scratch(ctrl_info);
161 }
162 
163 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
164 	enum pqi_ctrl_mode mode)
165 {
166 	sis_write_driver_scratch(ctrl_info, mode);
167 }
168 
169 #define PQI_RESCAN_WORK_INTERVAL	(10 * HZ)
170 
171 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
172 {
173 	schedule_delayed_work(&ctrl_info->rescan_work,
174 		PQI_RESCAN_WORK_INTERVAL);
175 }
176 
177 static int pqi_map_single(struct pci_dev *pci_dev,
178 	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
179 	size_t buffer_length, int data_direction)
180 {
181 	dma_addr_t bus_address;
182 
183 	if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
184 		return 0;
185 
186 	bus_address = pci_map_single(pci_dev, buffer, buffer_length,
187 		data_direction);
188 	if (pci_dma_mapping_error(pci_dev, bus_address))
189 		return -ENOMEM;
190 
191 	put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
192 	put_unaligned_le32(buffer_length, &sg_descriptor->length);
193 	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
194 
195 	return 0;
196 }
197 
198 static void pqi_pci_unmap(struct pci_dev *pci_dev,
199 	struct pqi_sg_descriptor *descriptors, int num_descriptors,
200 	int data_direction)
201 {
202 	int i;
203 
204 	if (data_direction == PCI_DMA_NONE)
205 		return;
206 
207 	for (i = 0; i < num_descriptors; i++)
208 		pci_unmap_single(pci_dev,
209 			(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
210 			get_unaligned_le32(&descriptors[i].length),
211 			data_direction);
212 }
213 
214 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
215 	struct pqi_raid_path_request *request, u8 cmd,
216 	u8 *scsi3addr, void *buffer, size_t buffer_length,
217 	u16 vpd_page, int *pci_direction)
218 {
219 	u8 *cdb;
220 	int pci_dir;
221 
222 	memset(request, 0, sizeof(*request));
223 
224 	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
225 	put_unaligned_le16(offsetof(struct pqi_raid_path_request,
226 		sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
227 		&request->header.iu_length);
228 	put_unaligned_le32(buffer_length, &request->buffer_length);
229 	memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
230 	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
231 	request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
232 
233 	cdb = request->cdb;
234 
235 	switch (cmd) {
236 	case INQUIRY:
237 		request->data_direction = SOP_READ_FLAG;
238 		cdb[0] = INQUIRY;
239 		if (vpd_page & VPD_PAGE) {
240 			cdb[1] = 0x1;
241 			cdb[2] = (u8)vpd_page;
242 		}
243 		cdb[4] = (u8)buffer_length;
244 		break;
245 	case CISS_REPORT_LOG:
246 	case CISS_REPORT_PHYS:
247 		request->data_direction = SOP_READ_FLAG;
248 		cdb[0] = cmd;
249 		if (cmd == CISS_REPORT_PHYS)
250 			cdb[1] = CISS_REPORT_PHYS_EXTENDED;
251 		else
252 			cdb[1] = CISS_REPORT_LOG_EXTENDED;
253 		put_unaligned_be32(buffer_length, &cdb[6]);
254 		break;
255 	case CISS_GET_RAID_MAP:
256 		request->data_direction = SOP_READ_FLAG;
257 		cdb[0] = CISS_READ;
258 		cdb[1] = CISS_GET_RAID_MAP;
259 		put_unaligned_be32(buffer_length, &cdb[6]);
260 		break;
261 	case SA_CACHE_FLUSH:
262 		request->data_direction = SOP_WRITE_FLAG;
263 		cdb[0] = BMIC_WRITE;
264 		cdb[6] = BMIC_CACHE_FLUSH;
265 		put_unaligned_be16(buffer_length, &cdb[7]);
266 		break;
267 	case BMIC_IDENTIFY_CONTROLLER:
268 	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
269 		request->data_direction = SOP_READ_FLAG;
270 		cdb[0] = BMIC_READ;
271 		cdb[6] = cmd;
272 		put_unaligned_be16(buffer_length, &cdb[7]);
273 		break;
274 	case BMIC_WRITE_HOST_WELLNESS:
275 		request->data_direction = SOP_WRITE_FLAG;
276 		cdb[0] = BMIC_WRITE;
277 		cdb[6] = cmd;
278 		put_unaligned_be16(buffer_length, &cdb[7]);
279 		break;
280 	default:
281 		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
282 			cmd);
283 		WARN_ON(cmd);
284 		break;
285 	}
286 
287 	switch (request->data_direction) {
288 	case SOP_READ_FLAG:
289 		pci_dir = PCI_DMA_FROMDEVICE;
290 		break;
291 	case SOP_WRITE_FLAG:
292 		pci_dir = PCI_DMA_TODEVICE;
293 		break;
294 	case SOP_NO_DIRECTION_FLAG:
295 		pci_dir = PCI_DMA_NONE;
296 		break;
297 	default:
298 		pci_dir = PCI_DMA_BIDIRECTIONAL;
299 		break;
300 	}
301 
302 	*pci_direction = pci_dir;
303 
304 	return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
305 		buffer, buffer_length, pci_dir);
306 }
307 
308 static struct pqi_io_request *pqi_alloc_io_request(
309 	struct pqi_ctrl_info *ctrl_info)
310 {
311 	struct pqi_io_request *io_request;
312 	u16 i = ctrl_info->next_io_request_slot;	/* benignly racy */
313 
314 	while (1) {
315 		io_request = &ctrl_info->io_request_pool[i];
316 		if (atomic_inc_return(&io_request->refcount) == 1)
317 			break;
318 		atomic_dec(&io_request->refcount);
319 		i = (i + 1) % ctrl_info->max_io_slots;
320 	}
321 
322 	/* benignly racy */
323 	ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
324 
325 	io_request->scmd = NULL;
326 	io_request->status = 0;
327 	io_request->error_info = NULL;
328 
329 	return io_request;
330 }
331 
332 static void pqi_free_io_request(struct pqi_io_request *io_request)
333 {
334 	atomic_dec(&io_request->refcount);
335 }
336 
337 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
338 	struct bmic_identify_controller *buffer)
339 {
340 	int rc;
341 	int pci_direction;
342 	struct pqi_raid_path_request request;
343 
344 	rc = pqi_build_raid_path_request(ctrl_info, &request,
345 		BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
346 		sizeof(*buffer), 0, &pci_direction);
347 	if (rc)
348 		return rc;
349 
350 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
351 		NULL, NO_TIMEOUT);
352 
353 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
354 		pci_direction);
355 
356 	return rc;
357 }
358 
359 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
360 	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
361 {
362 	int rc;
363 	int pci_direction;
364 	struct pqi_raid_path_request request;
365 
366 	rc = pqi_build_raid_path_request(ctrl_info, &request,
367 		INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
368 		&pci_direction);
369 	if (rc)
370 		return rc;
371 
372 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
373 		NULL, NO_TIMEOUT);
374 
375 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
376 		pci_direction);
377 
378 	return rc;
379 }
380 
381 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
382 	struct pqi_scsi_dev *device,
383 	struct bmic_identify_physical_device *buffer,
384 	size_t buffer_length)
385 {
386 	int rc;
387 	int pci_direction;
388 	u16 bmic_device_index;
389 	struct pqi_raid_path_request request;
390 
391 	rc = pqi_build_raid_path_request(ctrl_info, &request,
392 		BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
393 		buffer_length, 0, &pci_direction);
394 	if (rc)
395 		return rc;
396 
397 	bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
398 	request.cdb[2] = (u8)bmic_device_index;
399 	request.cdb[9] = (u8)(bmic_device_index >> 8);
400 
401 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
402 		0, NULL, NO_TIMEOUT);
403 
404 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
405 		pci_direction);
406 
407 	return rc;
408 }
409 
410 #define SA_CACHE_FLUSH_BUFFER_LENGTH	4
411 
412 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
413 {
414 	int rc;
415 	struct pqi_raid_path_request request;
416 	int pci_direction;
417 	u8 *buffer;
418 
419 	/*
420 	 * Don't bother trying to flush the cache if the controller is
421 	 * locked up.
422 	 */
423 	if (pqi_ctrl_offline(ctrl_info))
424 		return -ENXIO;
425 
426 	buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
427 	if (!buffer)
428 		return -ENOMEM;
429 
430 	rc = pqi_build_raid_path_request(ctrl_info, &request,
431 		SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
432 		SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
433 	if (rc)
434 		goto out;
435 
436 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
437 		0, NULL, NO_TIMEOUT);
438 
439 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
440 		pci_direction);
441 
442 out:
443 	kfree(buffer);
444 
445 	return rc;
446 }
447 
448 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
449 	void *buffer, size_t buffer_length)
450 {
451 	int rc;
452 	struct pqi_raid_path_request request;
453 	int pci_direction;
454 
455 	rc = pqi_build_raid_path_request(ctrl_info, &request,
456 		BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
457 		buffer_length, 0, &pci_direction);
458 	if (rc)
459 		return rc;
460 
461 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
462 		0, NULL, NO_TIMEOUT);
463 
464 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
465 		pci_direction);
466 
467 	return rc;
468 }
469 
470 #pragma pack(1)
471 
472 struct bmic_host_wellness_driver_version {
473 	u8	start_tag[4];
474 	u8	driver_version_tag[2];
475 	__le16	driver_version_length;
476 	char	driver_version[32];
477 	u8	end_tag[2];
478 };
479 
480 #pragma pack()
481 
482 static int pqi_write_driver_version_to_host_wellness(
483 	struct pqi_ctrl_info *ctrl_info)
484 {
485 	int rc;
486 	struct bmic_host_wellness_driver_version *buffer;
487 	size_t buffer_length;
488 
489 	buffer_length = sizeof(*buffer);
490 
491 	buffer = kmalloc(buffer_length, GFP_KERNEL);
492 	if (!buffer)
493 		return -ENOMEM;
494 
495 	buffer->start_tag[0] = '<';
496 	buffer->start_tag[1] = 'H';
497 	buffer->start_tag[2] = 'W';
498 	buffer->start_tag[3] = '>';
499 	buffer->driver_version_tag[0] = 'D';
500 	buffer->driver_version_tag[1] = 'V';
501 	put_unaligned_le16(sizeof(buffer->driver_version),
502 		&buffer->driver_version_length);
503 	strncpy(buffer->driver_version, DRIVER_VERSION,
504 		sizeof(buffer->driver_version) - 1);
505 	buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
506 	buffer->end_tag[0] = 'Z';
507 	buffer->end_tag[1] = 'Z';
508 
509 	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
510 
511 	kfree(buffer);
512 
513 	return rc;
514 }
515 
516 #pragma pack(1)
517 
518 struct bmic_host_wellness_time {
519 	u8	start_tag[4];
520 	u8	time_tag[2];
521 	__le16	time_length;
522 	u8	time[8];
523 	u8	dont_write_tag[2];
524 	u8	end_tag[2];
525 };
526 
527 #pragma pack()
528 
529 static int pqi_write_current_time_to_host_wellness(
530 	struct pqi_ctrl_info *ctrl_info)
531 {
532 	int rc;
533 	struct bmic_host_wellness_time *buffer;
534 	size_t buffer_length;
535 	time64_t local_time;
536 	unsigned int year;
537 	struct tm tm;
538 
539 	buffer_length = sizeof(*buffer);
540 
541 	buffer = kmalloc(buffer_length, GFP_KERNEL);
542 	if (!buffer)
543 		return -ENOMEM;
544 
545 	buffer->start_tag[0] = '<';
546 	buffer->start_tag[1] = 'H';
547 	buffer->start_tag[2] = 'W';
548 	buffer->start_tag[3] = '>';
549 	buffer->time_tag[0] = 'T';
550 	buffer->time_tag[1] = 'D';
551 	put_unaligned_le16(sizeof(buffer->time),
552 		&buffer->time_length);
553 
554 	local_time = ktime_get_real_seconds();
555 	time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
556 	year = tm.tm_year + 1900;
557 
558 	buffer->time[0] = bin2bcd(tm.tm_hour);
559 	buffer->time[1] = bin2bcd(tm.tm_min);
560 	buffer->time[2] = bin2bcd(tm.tm_sec);
561 	buffer->time[3] = 0;
562 	buffer->time[4] = bin2bcd(tm.tm_mon + 1);
563 	buffer->time[5] = bin2bcd(tm.tm_mday);
564 	buffer->time[6] = bin2bcd(year / 100);
565 	buffer->time[7] = bin2bcd(year % 100);
566 
567 	buffer->dont_write_tag[0] = 'D';
568 	buffer->dont_write_tag[1] = 'W';
569 	buffer->end_tag[0] = 'Z';
570 	buffer->end_tag[1] = 'Z';
571 
572 	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
573 
574 	kfree(buffer);
575 
576 	return rc;
577 }
578 
579 #define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * HZ)
580 
581 static void pqi_update_time_worker(struct work_struct *work)
582 {
583 	int rc;
584 	struct pqi_ctrl_info *ctrl_info;
585 
586 	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
587 		update_time_work);
588 
589 	rc = pqi_write_current_time_to_host_wellness(ctrl_info);
590 	if (rc)
591 		dev_warn(&ctrl_info->pci_dev->dev,
592 			"error updating time on controller\n");
593 
594 	schedule_delayed_work(&ctrl_info->update_time_work,
595 		PQI_UPDATE_TIME_WORK_INTERVAL);
596 }
597 
598 static inline void pqi_schedule_update_time_worker(
599 	struct pqi_ctrl_info *ctrl_info)
600 {
601 	schedule_delayed_work(&ctrl_info->update_time_work, 0);
602 }
603 
604 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
605 	void *buffer, size_t buffer_length)
606 {
607 	int rc;
608 	int pci_direction;
609 	struct pqi_raid_path_request request;
610 
611 	rc = pqi_build_raid_path_request(ctrl_info, &request,
612 		cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
613 	if (rc)
614 		return rc;
615 
616 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
617 		NULL, NO_TIMEOUT);
618 
619 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
620 		pci_direction);
621 
622 	return rc;
623 }
624 
625 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
626 	void **buffer)
627 {
628 	int rc;
629 	size_t lun_list_length;
630 	size_t lun_data_length;
631 	size_t new_lun_list_length;
632 	void *lun_data = NULL;
633 	struct report_lun_header *report_lun_header;
634 
635 	report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
636 	if (!report_lun_header) {
637 		rc = -ENOMEM;
638 		goto out;
639 	}
640 
641 	rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
642 		sizeof(*report_lun_header));
643 	if (rc)
644 		goto out;
645 
646 	lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
647 
648 again:
649 	lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
650 
651 	lun_data = kmalloc(lun_data_length, GFP_KERNEL);
652 	if (!lun_data) {
653 		rc = -ENOMEM;
654 		goto out;
655 	}
656 
657 	if (lun_list_length == 0) {
658 		memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
659 		goto out;
660 	}
661 
662 	rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
663 	if (rc)
664 		goto out;
665 
666 	new_lun_list_length = get_unaligned_be32(
667 		&((struct report_lun_header *)lun_data)->list_length);
668 
669 	if (new_lun_list_length > lun_list_length) {
670 		lun_list_length = new_lun_list_length;
671 		kfree(lun_data);
672 		goto again;
673 	}
674 
675 out:
676 	kfree(report_lun_header);
677 
678 	if (rc) {
679 		kfree(lun_data);
680 		lun_data = NULL;
681 	}
682 
683 	*buffer = lun_data;
684 
685 	return rc;
686 }
687 
688 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
689 	void **buffer)
690 {
691 	return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
692 		buffer);
693 }
694 
695 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
696 	void **buffer)
697 {
698 	return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
699 }
700 
701 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
702 	struct report_phys_lun_extended **physdev_list,
703 	struct report_log_lun_extended **logdev_list)
704 {
705 	int rc;
706 	size_t logdev_list_length;
707 	size_t logdev_data_length;
708 	struct report_log_lun_extended *internal_logdev_list;
709 	struct report_log_lun_extended *logdev_data;
710 	struct report_lun_header report_lun_header;
711 
712 	rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
713 	if (rc)
714 		dev_err(&ctrl_info->pci_dev->dev,
715 			"report physical LUNs failed\n");
716 
717 	rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
718 	if (rc)
719 		dev_err(&ctrl_info->pci_dev->dev,
720 			"report logical LUNs failed\n");
721 
722 	/*
723 	 * Tack the controller itself onto the end of the logical device list.
724 	 */
725 
726 	logdev_data = *logdev_list;
727 
728 	if (logdev_data) {
729 		logdev_list_length =
730 			get_unaligned_be32(&logdev_data->header.list_length);
731 	} else {
732 		memset(&report_lun_header, 0, sizeof(report_lun_header));
733 		logdev_data =
734 			(struct report_log_lun_extended *)&report_lun_header;
735 		logdev_list_length = 0;
736 	}
737 
738 	logdev_data_length = sizeof(struct report_lun_header) +
739 		logdev_list_length;
740 
741 	internal_logdev_list = kmalloc(logdev_data_length +
742 		sizeof(struct report_log_lun_extended), GFP_KERNEL);
743 	if (!internal_logdev_list) {
744 		kfree(*logdev_list);
745 		*logdev_list = NULL;
746 		return -ENOMEM;
747 	}
748 
749 	memcpy(internal_logdev_list, logdev_data, logdev_data_length);
750 	memset((u8 *)internal_logdev_list + logdev_data_length, 0,
751 		sizeof(struct report_log_lun_extended_entry));
752 	put_unaligned_be32(logdev_list_length +
753 		sizeof(struct report_log_lun_extended_entry),
754 		&internal_logdev_list->header.list_length);
755 
756 	kfree(*logdev_list);
757 	*logdev_list = internal_logdev_list;
758 
759 	return 0;
760 }
761 
762 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
763 	int bus, int target, int lun)
764 {
765 	device->bus = bus;
766 	device->target = target;
767 	device->lun = lun;
768 }
769 
770 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
771 {
772 	u8 *scsi3addr;
773 	u32 lunid;
774 
775 	scsi3addr = device->scsi3addr;
776 	lunid = get_unaligned_le32(scsi3addr);
777 
778 	if (pqi_is_hba_lunid(scsi3addr)) {
779 		/* The specified device is the controller. */
780 		pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
781 		device->target_lun_valid = true;
782 		return;
783 	}
784 
785 	if (pqi_is_logical_device(device)) {
786 		pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
787 			lunid & 0x3fff);
788 		device->target_lun_valid = true;
789 		return;
790 	}
791 
792 	/*
793 	 * Defer target and LUN assignment for non-controller physical devices
794 	 * because the SAS transport layer will make these assignments later.
795 	 */
796 	pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
797 }
798 
799 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
800 	struct pqi_scsi_dev *device)
801 {
802 	int rc;
803 	u8 raid_level;
804 	u8 *buffer;
805 
806 	raid_level = SA_RAID_UNKNOWN;
807 
808 	buffer = kmalloc(64, GFP_KERNEL);
809 	if (buffer) {
810 		rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
811 			VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
812 		if (rc == 0) {
813 			raid_level = buffer[8];
814 			if (raid_level > SA_RAID_MAX)
815 				raid_level = SA_RAID_UNKNOWN;
816 		}
817 		kfree(buffer);
818 	}
819 
820 	device->raid_level = raid_level;
821 }
822 
823 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
824 	struct pqi_scsi_dev *device, struct raid_map *raid_map)
825 {
826 	char *err_msg;
827 	u32 raid_map_size;
828 	u32 r5or6_blocks_per_row;
829 	unsigned int num_phys_disks;
830 	unsigned int num_raid_map_entries;
831 
832 	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
833 
834 	if (raid_map_size < offsetof(struct raid_map, disk_data)) {
835 		err_msg = "RAID map too small";
836 		goto bad_raid_map;
837 	}
838 
839 	if (raid_map_size > sizeof(*raid_map)) {
840 		err_msg = "RAID map too large";
841 		goto bad_raid_map;
842 	}
843 
844 	num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
845 		(get_unaligned_le16(&raid_map->data_disks_per_row) +
846 		get_unaligned_le16(&raid_map->metadata_disks_per_row));
847 	num_raid_map_entries = num_phys_disks *
848 		get_unaligned_le16(&raid_map->row_cnt);
849 
850 	if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
851 		err_msg = "invalid number of map entries in RAID map";
852 		goto bad_raid_map;
853 	}
854 
855 	if (device->raid_level == SA_RAID_1) {
856 		if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
857 			err_msg = "invalid RAID-1 map";
858 			goto bad_raid_map;
859 		}
860 	} else if (device->raid_level == SA_RAID_ADM) {
861 		if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
862 			err_msg = "invalid RAID-1(ADM) map";
863 			goto bad_raid_map;
864 		}
865 	} else if ((device->raid_level == SA_RAID_5 ||
866 		device->raid_level == SA_RAID_6) &&
867 		get_unaligned_le16(&raid_map->layout_map_count) > 1) {
868 		/* RAID 50/60 */
869 		r5or6_blocks_per_row =
870 			get_unaligned_le16(&raid_map->strip_size) *
871 			get_unaligned_le16(&raid_map->data_disks_per_row);
872 		if (r5or6_blocks_per_row == 0) {
873 			err_msg = "invalid RAID-5 or RAID-6 map";
874 			goto bad_raid_map;
875 		}
876 	}
877 
878 	return 0;
879 
880 bad_raid_map:
881 	dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
882 
883 	return -EINVAL;
884 }
885 
886 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
887 	struct pqi_scsi_dev *device)
888 {
889 	int rc;
890 	int pci_direction;
891 	struct pqi_raid_path_request request;
892 	struct raid_map *raid_map;
893 
894 	raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
895 	if (!raid_map)
896 		return -ENOMEM;
897 
898 	rc = pqi_build_raid_path_request(ctrl_info, &request,
899 		CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
900 		sizeof(*raid_map), 0, &pci_direction);
901 	if (rc)
902 		goto error;
903 
904 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
905 		NULL, NO_TIMEOUT);
906 
907 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
908 		pci_direction);
909 
910 	if (rc)
911 		goto error;
912 
913 	rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
914 	if (rc)
915 		goto error;
916 
917 	device->raid_map = raid_map;
918 
919 	return 0;
920 
921 error:
922 	kfree(raid_map);
923 
924 	return rc;
925 }
926 
927 static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
928 	struct pqi_scsi_dev *device)
929 {
930 	int rc;
931 	u8 *buffer;
932 	u8 offload_status;
933 
934 	buffer = kmalloc(64, GFP_KERNEL);
935 	if (!buffer)
936 		return;
937 
938 	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
939 		VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
940 	if (rc)
941 		goto out;
942 
943 #define OFFLOAD_STATUS_BYTE	4
944 #define OFFLOAD_CONFIGURED_BIT	0x1
945 #define OFFLOAD_ENABLED_BIT	0x2
946 
947 	offload_status = buffer[OFFLOAD_STATUS_BYTE];
948 	device->offload_configured =
949 		!!(offload_status & OFFLOAD_CONFIGURED_BIT);
950 	if (device->offload_configured) {
951 		device->offload_enabled_pending =
952 			!!(offload_status & OFFLOAD_ENABLED_BIT);
953 		if (pqi_get_raid_map(ctrl_info, device))
954 			device->offload_enabled_pending = false;
955 	}
956 
957 out:
958 	kfree(buffer);
959 }
960 
961 /*
962  * Use vendor-specific VPD to determine online/offline status of a volume.
963  */
964 
965 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
966 	struct pqi_scsi_dev *device)
967 {
968 	int rc;
969 	size_t page_length;
970 	u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
971 	bool volume_offline = true;
972 	u32 volume_flags;
973 	struct ciss_vpd_logical_volume_status *vpd;
974 
975 	vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
976 	if (!vpd)
977 		goto no_buffer;
978 
979 	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
980 		VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
981 	if (rc)
982 		goto out;
983 
984 	page_length = offsetof(struct ciss_vpd_logical_volume_status,
985 		volume_status) + vpd->page_length;
986 	if (page_length < sizeof(*vpd))
987 		goto out;
988 
989 	volume_status = vpd->volume_status;
990 	volume_flags = get_unaligned_be32(&vpd->flags);
991 	volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
992 
993 out:
994 	kfree(vpd);
995 no_buffer:
996 	device->volume_status = volume_status;
997 	device->volume_offline = volume_offline;
998 }
999 
1000 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1001 	struct pqi_scsi_dev *device)
1002 {
1003 	int rc;
1004 	u8 *buffer;
1005 
1006 	buffer = kmalloc(64, GFP_KERNEL);
1007 	if (!buffer)
1008 		return -ENOMEM;
1009 
1010 	/* Send an inquiry to the device to see what it is. */
1011 	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1012 	if (rc)
1013 		goto out;
1014 
1015 	scsi_sanitize_inquiry_string(&buffer[8], 8);
1016 	scsi_sanitize_inquiry_string(&buffer[16], 16);
1017 
1018 	device->devtype = buffer[0] & 0x1f;
1019 	memcpy(device->vendor, &buffer[8],
1020 		sizeof(device->vendor));
1021 	memcpy(device->model, &buffer[16],
1022 		sizeof(device->model));
1023 
1024 	if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1025 		pqi_get_raid_level(ctrl_info, device);
1026 		pqi_get_offload_status(ctrl_info, device);
1027 		pqi_get_volume_status(ctrl_info, device);
1028 	}
1029 
1030 out:
1031 	kfree(buffer);
1032 
1033 	return rc;
1034 }
1035 
1036 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1037 	struct pqi_scsi_dev *device,
1038 	struct bmic_identify_physical_device *id_phys)
1039 {
1040 	int rc;
1041 
1042 	memset(id_phys, 0, sizeof(*id_phys));
1043 
1044 	rc = pqi_identify_physical_device(ctrl_info, device,
1045 		id_phys, sizeof(*id_phys));
1046 	if (rc) {
1047 		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1048 		return;
1049 	}
1050 
1051 	device->queue_depth =
1052 		get_unaligned_le16(&id_phys->current_queue_depth_limit);
1053 	device->device_type = id_phys->device_type;
1054 	device->active_path_index = id_phys->active_path_number;
1055 	device->path_map = id_phys->redundant_path_present_map;
1056 	memcpy(&device->box,
1057 		&id_phys->alternate_paths_phys_box_on_port,
1058 		sizeof(device->box));
1059 	memcpy(&device->phys_connector,
1060 		&id_phys->alternate_paths_phys_connector,
1061 		sizeof(device->phys_connector));
1062 	device->bay = id_phys->phys_bay_in_box;
1063 }
1064 
1065 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1066 	struct pqi_scsi_dev *device)
1067 {
1068 	char *status;
1069 	static const char unknown_state_str[] =
1070 		"Volume is in an unknown state (%u)";
1071 	char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1072 
1073 	switch (device->volume_status) {
1074 	case CISS_LV_OK:
1075 		status = "Volume online";
1076 		break;
1077 	case CISS_LV_FAILED:
1078 		status = "Volume failed";
1079 		break;
1080 	case CISS_LV_NOT_CONFIGURED:
1081 		status = "Volume not configured";
1082 		break;
1083 	case CISS_LV_DEGRADED:
1084 		status = "Volume degraded";
1085 		break;
1086 	case CISS_LV_READY_FOR_RECOVERY:
1087 		status = "Volume ready for recovery operation";
1088 		break;
1089 	case CISS_LV_UNDERGOING_RECOVERY:
1090 		status = "Volume undergoing recovery";
1091 		break;
1092 	case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1093 		status = "Wrong physical drive was replaced";
1094 		break;
1095 	case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1096 		status = "A physical drive not properly connected";
1097 		break;
1098 	case CISS_LV_HARDWARE_OVERHEATING:
1099 		status = "Hardware is overheating";
1100 		break;
1101 	case CISS_LV_HARDWARE_HAS_OVERHEATED:
1102 		status = "Hardware has overheated";
1103 		break;
1104 	case CISS_LV_UNDERGOING_EXPANSION:
1105 		status = "Volume undergoing expansion";
1106 		break;
1107 	case CISS_LV_NOT_AVAILABLE:
1108 		status = "Volume waiting for transforming volume";
1109 		break;
1110 	case CISS_LV_QUEUED_FOR_EXPANSION:
1111 		status = "Volume queued for expansion";
1112 		break;
1113 	case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1114 		status = "Volume disabled due to SCSI ID conflict";
1115 		break;
1116 	case CISS_LV_EJECTED:
1117 		status = "Volume has been ejected";
1118 		break;
1119 	case CISS_LV_UNDERGOING_ERASE:
1120 		status = "Volume undergoing background erase";
1121 		break;
1122 	case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1123 		status = "Volume ready for predictive spare rebuild";
1124 		break;
1125 	case CISS_LV_UNDERGOING_RPI:
1126 		status = "Volume undergoing rapid parity initialization";
1127 		break;
1128 	case CISS_LV_PENDING_RPI:
1129 		status = "Volume queued for rapid parity initialization";
1130 		break;
1131 	case CISS_LV_ENCRYPTED_NO_KEY:
1132 		status = "Encrypted volume inaccessible - key not present";
1133 		break;
1134 	case CISS_LV_UNDERGOING_ENCRYPTION:
1135 		status = "Volume undergoing encryption process";
1136 		break;
1137 	case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1138 		status = "Volume undergoing encryption re-keying process";
1139 		break;
1140 	case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1141 		status =
1142 			"Encrypted volume inaccessible - disabled on ctrl";
1143 		break;
1144 	case CISS_LV_PENDING_ENCRYPTION:
1145 		status = "Volume pending migration to encrypted state";
1146 		break;
1147 	case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1148 		status = "Volume pending encryption rekeying";
1149 		break;
1150 	case CISS_LV_NOT_SUPPORTED:
1151 		status = "Volume not supported on this controller";
1152 		break;
1153 	case CISS_LV_STATUS_UNAVAILABLE:
1154 		status = "Volume status not available";
1155 		break;
1156 	default:
1157 		snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1158 			unknown_state_str, device->volume_status);
1159 		status = unknown_state_buffer;
1160 		break;
1161 	}
1162 
1163 	dev_info(&ctrl_info->pci_dev->dev,
1164 		"scsi %d:%d:%d:%d %s\n",
1165 		ctrl_info->scsi_host->host_no,
1166 		device->bus, device->target, device->lun, status);
1167 }
1168 
1169 static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1170 	struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1171 {
1172 	struct pqi_scsi_dev *device;
1173 
1174 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1175 		scsi_device_list_entry) {
1176 		if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1177 			continue;
1178 		if (pqi_is_logical_device(device))
1179 			continue;
1180 		if (device->aio_handle == aio_handle)
1181 			return device;
1182 	}
1183 
1184 	return NULL;
1185 }
1186 
1187 static void pqi_update_logical_drive_queue_depth(
1188 	struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1189 {
1190 	unsigned int i;
1191 	struct raid_map *raid_map;
1192 	struct raid_map_disk_data *disk_data;
1193 	struct pqi_scsi_dev *phys_disk;
1194 	unsigned int num_phys_disks;
1195 	unsigned int num_raid_map_entries;
1196 	unsigned int queue_depth;
1197 
1198 	logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1199 
1200 	raid_map = logical_drive->raid_map;
1201 	if (!raid_map)
1202 		return;
1203 
1204 	disk_data = raid_map->disk_data;
1205 	num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1206 		(get_unaligned_le16(&raid_map->data_disks_per_row) +
1207 		get_unaligned_le16(&raid_map->metadata_disks_per_row));
1208 	num_raid_map_entries = num_phys_disks *
1209 		get_unaligned_le16(&raid_map->row_cnt);
1210 
1211 	queue_depth = 0;
1212 	for (i = 0; i < num_raid_map_entries; i++) {
1213 		phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1214 			disk_data[i].aio_handle);
1215 
1216 		if (!phys_disk) {
1217 			dev_warn(&ctrl_info->pci_dev->dev,
1218 				"failed to find physical disk for logical drive %016llx\n",
1219 				get_unaligned_be64(logical_drive->scsi3addr));
1220 			logical_drive->offload_enabled = false;
1221 			logical_drive->offload_enabled_pending = false;
1222 			kfree(raid_map);
1223 			logical_drive->raid_map = NULL;
1224 			return;
1225 		}
1226 
1227 		queue_depth += phys_disk->queue_depth;
1228 	}
1229 
1230 	logical_drive->queue_depth = queue_depth;
1231 }
1232 
1233 static void pqi_update_all_logical_drive_queue_depths(
1234 	struct pqi_ctrl_info *ctrl_info)
1235 {
1236 	struct pqi_scsi_dev *device;
1237 
1238 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1239 		scsi_device_list_entry) {
1240 		if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1241 			continue;
1242 		if (!pqi_is_logical_device(device))
1243 			continue;
1244 		pqi_update_logical_drive_queue_depth(ctrl_info, device);
1245 	}
1246 }
1247 
1248 static void pqi_rescan_worker(struct work_struct *work)
1249 {
1250 	struct pqi_ctrl_info *ctrl_info;
1251 
1252 	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1253 		rescan_work);
1254 
1255 	pqi_scan_scsi_devices(ctrl_info);
1256 }
1257 
1258 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1259 	struct pqi_scsi_dev *device)
1260 {
1261 	int rc;
1262 
1263 	if (pqi_is_logical_device(device))
1264 		rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1265 			device->target, device->lun);
1266 	else
1267 		rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1268 
1269 	return rc;
1270 }
1271 
1272 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1273 	struct pqi_scsi_dev *device)
1274 {
1275 	if (pqi_is_logical_device(device))
1276 		scsi_remove_device(device->sdev);
1277 	else
1278 		pqi_remove_sas_device(device);
1279 }
1280 
1281 /* Assumes the SCSI device list lock is held. */
1282 
1283 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1284 	int bus, int target, int lun)
1285 {
1286 	struct pqi_scsi_dev *device;
1287 
1288 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1289 		scsi_device_list_entry)
1290 		if (device->bus == bus && device->target == target &&
1291 			device->lun == lun)
1292 			return device;
1293 
1294 	return NULL;
1295 }
1296 
1297 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1298 	struct pqi_scsi_dev *dev2)
1299 {
1300 	if (dev1->is_physical_device != dev2->is_physical_device)
1301 		return false;
1302 
1303 	if (dev1->is_physical_device)
1304 		return dev1->wwid == dev2->wwid;
1305 
1306 	return memcmp(dev1->volume_id, dev2->volume_id,
1307 		sizeof(dev1->volume_id)) == 0;
1308 }
1309 
1310 enum pqi_find_result {
1311 	DEVICE_NOT_FOUND,
1312 	DEVICE_CHANGED,
1313 	DEVICE_SAME,
1314 };
1315 
1316 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1317 	struct pqi_scsi_dev *device_to_find,
1318 	struct pqi_scsi_dev **matching_device)
1319 {
1320 	struct pqi_scsi_dev *device;
1321 
1322 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1323 		scsi_device_list_entry) {
1324 		if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1325 			device->scsi3addr)) {
1326 			*matching_device = device;
1327 			if (pqi_device_equal(device_to_find, device)) {
1328 				if (device_to_find->volume_offline)
1329 					return DEVICE_CHANGED;
1330 				return DEVICE_SAME;
1331 			}
1332 			return DEVICE_CHANGED;
1333 		}
1334 	}
1335 
1336 	return DEVICE_NOT_FOUND;
1337 }
1338 
1339 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1340 	char *action, struct pqi_scsi_dev *device)
1341 {
1342 	dev_info(&ctrl_info->pci_dev->dev,
1343 		"%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1344 		action,
1345 		ctrl_info->scsi_host->host_no,
1346 		device->bus,
1347 		device->target,
1348 		device->lun,
1349 		scsi_device_type(device->devtype),
1350 		device->vendor,
1351 		device->model,
1352 		pqi_raid_level_to_string(device->raid_level),
1353 		device->offload_configured ? '+' : '-',
1354 		device->offload_enabled_pending ? '+' : '-',
1355 		device->expose_device ? '+' : '-',
1356 		device->queue_depth);
1357 }
1358 
1359 /* Assumes the SCSI device list lock is held. */
1360 
1361 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1362 	struct pqi_scsi_dev *new_device)
1363 {
1364 	existing_device->devtype = new_device->devtype;
1365 	existing_device->device_type = new_device->device_type;
1366 	existing_device->bus = new_device->bus;
1367 	if (new_device->target_lun_valid) {
1368 		existing_device->target = new_device->target;
1369 		existing_device->lun = new_device->lun;
1370 		existing_device->target_lun_valid = true;
1371 	}
1372 
1373 	/* By definition, the scsi3addr and wwid fields are already the same. */
1374 
1375 	existing_device->is_physical_device = new_device->is_physical_device;
1376 	existing_device->expose_device = new_device->expose_device;
1377 	existing_device->no_uld_attach = new_device->no_uld_attach;
1378 	existing_device->aio_enabled = new_device->aio_enabled;
1379 	memcpy(existing_device->vendor, new_device->vendor,
1380 		sizeof(existing_device->vendor));
1381 	memcpy(existing_device->model, new_device->model,
1382 		sizeof(existing_device->model));
1383 	existing_device->sas_address = new_device->sas_address;
1384 	existing_device->raid_level = new_device->raid_level;
1385 	existing_device->queue_depth = new_device->queue_depth;
1386 	existing_device->aio_handle = new_device->aio_handle;
1387 	existing_device->volume_status = new_device->volume_status;
1388 	existing_device->active_path_index = new_device->active_path_index;
1389 	existing_device->path_map = new_device->path_map;
1390 	existing_device->bay = new_device->bay;
1391 	memcpy(existing_device->box, new_device->box,
1392 		sizeof(existing_device->box));
1393 	memcpy(existing_device->phys_connector, new_device->phys_connector,
1394 		sizeof(existing_device->phys_connector));
1395 	existing_device->offload_configured = new_device->offload_configured;
1396 	existing_device->offload_enabled = false;
1397 	existing_device->offload_enabled_pending =
1398 		new_device->offload_enabled_pending;
1399 	existing_device->offload_to_mirror = 0;
1400 	kfree(existing_device->raid_map);
1401 	existing_device->raid_map = new_device->raid_map;
1402 
1403 	/* To prevent this from being freed later. */
1404 	new_device->raid_map = NULL;
1405 }
1406 
1407 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1408 {
1409 	if (device) {
1410 		kfree(device->raid_map);
1411 		kfree(device);
1412 	}
1413 }
1414 
1415 /*
1416  * Called when exposing a new device to the OS fails in order to re-adjust
1417  * our internal SCSI device list to match the SCSI ML's view.
1418  */
1419 
1420 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1421 	struct pqi_scsi_dev *device)
1422 {
1423 	unsigned long flags;
1424 
1425 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1426 	list_del(&device->scsi_device_list_entry);
1427 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1428 
1429 	/* Allow the device structure to be freed later. */
1430 	device->keep_device = false;
1431 }
1432 
1433 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1434 	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1435 {
1436 	int rc;
1437 	unsigned int i;
1438 	unsigned long flags;
1439 	enum pqi_find_result find_result;
1440 	struct pqi_scsi_dev *device;
1441 	struct pqi_scsi_dev *next;
1442 	struct pqi_scsi_dev *matching_device;
1443 	struct list_head add_list;
1444 	struct list_head delete_list;
1445 
1446 	INIT_LIST_HEAD(&add_list);
1447 	INIT_LIST_HEAD(&delete_list);
1448 
1449 	/*
1450 	 * The idea here is to do as little work as possible while holding the
1451 	 * spinlock.  That's why we go to great pains to defer anything other
1452 	 * than updating the internal device list until after we release the
1453 	 * spinlock.
1454 	 */
1455 
1456 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1457 
1458 	/* Assume that all devices in the existing list have gone away. */
1459 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1460 		scsi_device_list_entry)
1461 		device->device_gone = true;
1462 
1463 	for (i = 0; i < num_new_devices; i++) {
1464 		device = new_device_list[i];
1465 
1466 		find_result = pqi_scsi_find_entry(ctrl_info, device,
1467 						&matching_device);
1468 
1469 		switch (find_result) {
1470 		case DEVICE_SAME:
1471 			/*
1472 			 * The newly found device is already in the existing
1473 			 * device list.
1474 			 */
1475 			device->new_device = false;
1476 			matching_device->device_gone = false;
1477 			pqi_scsi_update_device(matching_device, device);
1478 			break;
1479 		case DEVICE_NOT_FOUND:
1480 			/*
1481 			 * The newly found device is NOT in the existing device
1482 			 * list.
1483 			 */
1484 			device->new_device = true;
1485 			break;
1486 		case DEVICE_CHANGED:
1487 			/*
1488 			 * The original device has gone away and we need to add
1489 			 * the new device.
1490 			 */
1491 			device->new_device = true;
1492 			break;
1493 		default:
1494 			WARN_ON(find_result);
1495 			break;
1496 		}
1497 	}
1498 
1499 	/* Process all devices that have gone away. */
1500 	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1501 		scsi_device_list_entry) {
1502 		if (device->device_gone) {
1503 			list_del(&device->scsi_device_list_entry);
1504 			list_add_tail(&device->delete_list_entry, &delete_list);
1505 		}
1506 	}
1507 
1508 	/* Process all new devices. */
1509 	for (i = 0; i < num_new_devices; i++) {
1510 		device = new_device_list[i];
1511 		if (!device->new_device)
1512 			continue;
1513 		if (device->volume_offline)
1514 			continue;
1515 		list_add_tail(&device->scsi_device_list_entry,
1516 			&ctrl_info->scsi_device_list);
1517 		list_add_tail(&device->add_list_entry, &add_list);
1518 		/* To prevent this device structure from being freed later. */
1519 		device->keep_device = true;
1520 	}
1521 
1522 	pqi_update_all_logical_drive_queue_depths(ctrl_info);
1523 
1524 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1525 		scsi_device_list_entry)
1526 		device->offload_enabled =
1527 			device->offload_enabled_pending;
1528 
1529 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1530 
1531 	/* Remove all devices that have gone away. */
1532 	list_for_each_entry_safe(device, next, &delete_list,
1533 		delete_list_entry) {
1534 		if (device->sdev)
1535 			pqi_remove_device(ctrl_info, device);
1536 		if (device->volume_offline) {
1537 			pqi_dev_info(ctrl_info, "offline", device);
1538 			pqi_show_volume_status(ctrl_info, device);
1539 		} else {
1540 			pqi_dev_info(ctrl_info, "removed", device);
1541 		}
1542 		list_del(&device->delete_list_entry);
1543 		pqi_free_device(device);
1544 	}
1545 
1546 	/*
1547 	 * Notify the SCSI ML if the queue depth of any existing device has
1548 	 * changed.
1549 	 */
1550 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
1551 		scsi_device_list_entry) {
1552 		if (device->sdev && device->queue_depth !=
1553 			device->advertised_queue_depth) {
1554 			device->advertised_queue_depth = device->queue_depth;
1555 			scsi_change_queue_depth(device->sdev,
1556 				device->advertised_queue_depth);
1557 		}
1558 	}
1559 
1560 	/* Expose any new devices. */
1561 	list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1562 		if (device->expose_device && !device->sdev) {
1563 			rc = pqi_add_device(ctrl_info, device);
1564 			if (rc) {
1565 				dev_warn(&ctrl_info->pci_dev->dev,
1566 					"scsi %d:%d:%d:%d addition failed, device not added\n",
1567 					ctrl_info->scsi_host->host_no,
1568 					device->bus, device->target,
1569 					device->lun);
1570 				pqi_fixup_botched_add(ctrl_info, device);
1571 				continue;
1572 			}
1573 		}
1574 		pqi_dev_info(ctrl_info, "added", device);
1575 	}
1576 }
1577 
1578 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1579 {
1580 	bool is_supported = false;
1581 
1582 	switch (device->devtype) {
1583 	case TYPE_DISK:
1584 	case TYPE_ZBC:
1585 	case TYPE_TAPE:
1586 	case TYPE_MEDIUM_CHANGER:
1587 	case TYPE_ENCLOSURE:
1588 		is_supported = true;
1589 		break;
1590 	case TYPE_RAID:
1591 		/*
1592 		 * Only support the HBA controller itself as a RAID
1593 		 * controller.  If it's a RAID controller other than
1594 		 * the HBA itself (an external RAID controller, MSA500
1595 		 * or similar), we don't support it.
1596 		 */
1597 		if (pqi_is_hba_lunid(device->scsi3addr))
1598 			is_supported = true;
1599 		break;
1600 	}
1601 
1602 	return is_supported;
1603 }
1604 
1605 static inline bool pqi_skip_device(u8 *scsi3addr,
1606 	struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1607 {
1608 	u8 device_flags;
1609 
1610 	if (!MASKED_DEVICE(scsi3addr))
1611 		return false;
1612 
1613 	/* The device is masked. */
1614 
1615 	device_flags = phys_lun_ext_entry->device_flags;
1616 
1617 	if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1618 		/*
1619 		 * It's a non-disk device.  We ignore all devices of this type
1620 		 * when they're masked.
1621 		 */
1622 		return true;
1623 	}
1624 
1625 	return false;
1626 }
1627 
1628 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1629 {
1630 	/* Expose all devices except for physical devices that are masked. */
1631 	if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1632 		return false;
1633 
1634 	return true;
1635 }
1636 
1637 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1638 {
1639 	int i;
1640 	int rc;
1641 	struct list_head new_device_list_head;
1642 	struct report_phys_lun_extended *physdev_list = NULL;
1643 	struct report_log_lun_extended *logdev_list = NULL;
1644 	struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1645 	struct report_log_lun_extended_entry *log_lun_ext_entry;
1646 	struct bmic_identify_physical_device *id_phys = NULL;
1647 	u32 num_physicals;
1648 	u32 num_logicals;
1649 	struct pqi_scsi_dev **new_device_list = NULL;
1650 	struct pqi_scsi_dev *device;
1651 	struct pqi_scsi_dev *next;
1652 	unsigned int num_new_devices;
1653 	unsigned int num_valid_devices;
1654 	bool is_physical_device;
1655 	u8 *scsi3addr;
1656 	static char *out_of_memory_msg =
1657 		"out of memory, device discovery stopped";
1658 
1659 	INIT_LIST_HEAD(&new_device_list_head);
1660 
1661 	rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1662 	if (rc)
1663 		goto out;
1664 
1665 	if (physdev_list)
1666 		num_physicals =
1667 			get_unaligned_be32(&physdev_list->header.list_length)
1668 				/ sizeof(physdev_list->lun_entries[0]);
1669 	else
1670 		num_physicals = 0;
1671 
1672 	if (logdev_list)
1673 		num_logicals =
1674 			get_unaligned_be32(&logdev_list->header.list_length)
1675 				/ sizeof(logdev_list->lun_entries[0]);
1676 	else
1677 		num_logicals = 0;
1678 
1679 	if (num_physicals) {
1680 		/*
1681 		 * We need this buffer for calls to pqi_get_physical_disk_info()
1682 		 * below.  We allocate it here instead of inside
1683 		 * pqi_get_physical_disk_info() because it's a fairly large
1684 		 * buffer.
1685 		 */
1686 		id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1687 		if (!id_phys) {
1688 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1689 				out_of_memory_msg);
1690 			rc = -ENOMEM;
1691 			goto out;
1692 		}
1693 	}
1694 
1695 	num_new_devices = num_physicals + num_logicals;
1696 
1697 	new_device_list = kmalloc(sizeof(*new_device_list) *
1698 		num_new_devices, GFP_KERNEL);
1699 	if (!new_device_list) {
1700 		dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1701 		rc = -ENOMEM;
1702 		goto out;
1703 	}
1704 
1705 	for (i = 0; i < num_new_devices; i++) {
1706 		device = kzalloc(sizeof(*device), GFP_KERNEL);
1707 		if (!device) {
1708 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1709 				out_of_memory_msg);
1710 			rc = -ENOMEM;
1711 			goto out;
1712 		}
1713 		list_add_tail(&device->new_device_list_entry,
1714 			&new_device_list_head);
1715 	}
1716 
1717 	device = NULL;
1718 	num_valid_devices = 0;
1719 
1720 	for (i = 0; i < num_new_devices; i++) {
1721 
1722 		if (i < num_physicals) {
1723 			is_physical_device = true;
1724 			phys_lun_ext_entry = &physdev_list->lun_entries[i];
1725 			log_lun_ext_entry = NULL;
1726 			scsi3addr = phys_lun_ext_entry->lunid;
1727 		} else {
1728 			is_physical_device = false;
1729 			phys_lun_ext_entry = NULL;
1730 			log_lun_ext_entry =
1731 				&logdev_list->lun_entries[i - num_physicals];
1732 			scsi3addr = log_lun_ext_entry->lunid;
1733 		}
1734 
1735 		if (is_physical_device &&
1736 			pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1737 			continue;
1738 
1739 		if (device)
1740 			device = list_next_entry(device, new_device_list_entry);
1741 		else
1742 			device = list_first_entry(&new_device_list_head,
1743 				struct pqi_scsi_dev, new_device_list_entry);
1744 
1745 		memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1746 		device->is_physical_device = is_physical_device;
1747 		device->raid_level = SA_RAID_UNKNOWN;
1748 
1749 		/* Gather information about the device. */
1750 		rc = pqi_get_device_info(ctrl_info, device);
1751 		if (rc == -ENOMEM) {
1752 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1753 				out_of_memory_msg);
1754 			goto out;
1755 		}
1756 		if (rc) {
1757 			dev_warn(&ctrl_info->pci_dev->dev,
1758 				"obtaining device info failed, skipping device %016llx\n",
1759 				get_unaligned_be64(device->scsi3addr));
1760 			rc = 0;
1761 			continue;
1762 		}
1763 
1764 		if (!pqi_is_supported_device(device))
1765 			continue;
1766 
1767 		pqi_assign_bus_target_lun(device);
1768 
1769 		device->expose_device = pqi_expose_device(device);
1770 
1771 		if (device->is_physical_device) {
1772 			device->wwid = phys_lun_ext_entry->wwid;
1773 			if ((phys_lun_ext_entry->device_flags &
1774 				REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1775 				phys_lun_ext_entry->aio_handle)
1776 				device->aio_enabled = true;
1777 		} else {
1778 			memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1779 				sizeof(device->volume_id));
1780 		}
1781 
1782 		switch (device->devtype) {
1783 		case TYPE_DISK:
1784 		case TYPE_ZBC:
1785 		case TYPE_ENCLOSURE:
1786 			if (device->is_physical_device) {
1787 				device->sas_address =
1788 					get_unaligned_be64(&device->wwid);
1789 				if (device->devtype == TYPE_DISK ||
1790 					device->devtype == TYPE_ZBC) {
1791 					device->aio_handle =
1792 						phys_lun_ext_entry->aio_handle;
1793 					pqi_get_physical_disk_info(ctrl_info,
1794 						device, id_phys);
1795 				}
1796 			}
1797 			break;
1798 		}
1799 
1800 		new_device_list[num_valid_devices++] = device;
1801 	}
1802 
1803 	pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1804 
1805 out:
1806 	list_for_each_entry_safe(device, next, &new_device_list_head,
1807 		new_device_list_entry) {
1808 		if (device->keep_device)
1809 			continue;
1810 		list_del(&device->new_device_list_entry);
1811 		pqi_free_device(device);
1812 	}
1813 
1814 	kfree(new_device_list);
1815 	kfree(physdev_list);
1816 	kfree(logdev_list);
1817 	kfree(id_phys);
1818 
1819 	return rc;
1820 }
1821 
1822 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1823 {
1824 	unsigned long flags;
1825 	struct pqi_scsi_dev *device;
1826 	struct pqi_scsi_dev *next;
1827 
1828 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1829 
1830 	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1831 		scsi_device_list_entry) {
1832 		if (device->sdev)
1833 			pqi_remove_device(ctrl_info, device);
1834 		list_del(&device->scsi_device_list_entry);
1835 		pqi_free_device(device);
1836 	}
1837 
1838 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1839 }
1840 
1841 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1842 {
1843 	int rc;
1844 
1845 	if (pqi_ctrl_offline(ctrl_info))
1846 		return -ENXIO;
1847 
1848 	mutex_lock(&ctrl_info->scan_mutex);
1849 
1850 	rc = pqi_update_scsi_devices(ctrl_info);
1851 	if (rc)
1852 		pqi_schedule_rescan_worker(ctrl_info);
1853 
1854 	mutex_unlock(&ctrl_info->scan_mutex);
1855 
1856 	return rc;
1857 }
1858 
1859 static void pqi_scan_start(struct Scsi_Host *shost)
1860 {
1861 	pqi_scan_scsi_devices(shost_to_hba(shost));
1862 }
1863 
1864 /* Returns TRUE if scan is finished. */
1865 
1866 static int pqi_scan_finished(struct Scsi_Host *shost,
1867 	unsigned long elapsed_time)
1868 {
1869 	struct pqi_ctrl_info *ctrl_info;
1870 
1871 	ctrl_info = shost_priv(shost);
1872 
1873 	return !mutex_is_locked(&ctrl_info->scan_mutex);
1874 }
1875 
1876 static inline void pqi_set_encryption_info(
1877 	struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1878 	u64 first_block)
1879 {
1880 	u32 volume_blk_size;
1881 
1882 	/*
1883 	 * Set the encryption tweak values based on logical block address.
1884 	 * If the block size is 512, the tweak value is equal to the LBA.
1885 	 * For other block sizes, tweak value is (LBA * block size) / 512.
1886 	 */
1887 	volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1888 	if (volume_blk_size != 512)
1889 		first_block = (first_block * volume_blk_size) / 512;
1890 
1891 	encryption_info->data_encryption_key_index =
1892 		get_unaligned_le16(&raid_map->data_encryption_key_index);
1893 	encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1894 	encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1895 }
1896 
1897 /*
1898  * Attempt to perform offload RAID mapping for a logical volume I/O.
1899  */
1900 
1901 #define PQI_RAID_BYPASS_INELIGIBLE	1
1902 
1903 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1904 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1905 	struct pqi_queue_group *queue_group)
1906 {
1907 	struct raid_map *raid_map;
1908 	bool is_write = false;
1909 	u32 map_index;
1910 	u64 first_block;
1911 	u64 last_block;
1912 	u32 block_cnt;
1913 	u32 blocks_per_row;
1914 	u64 first_row;
1915 	u64 last_row;
1916 	u32 first_row_offset;
1917 	u32 last_row_offset;
1918 	u32 first_column;
1919 	u32 last_column;
1920 	u64 r0_first_row;
1921 	u64 r0_last_row;
1922 	u32 r5or6_blocks_per_row;
1923 	u64 r5or6_first_row;
1924 	u64 r5or6_last_row;
1925 	u32 r5or6_first_row_offset;
1926 	u32 r5or6_last_row_offset;
1927 	u32 r5or6_first_column;
1928 	u32 r5or6_last_column;
1929 	u16 data_disks_per_row;
1930 	u32 total_disks_per_row;
1931 	u16 layout_map_count;
1932 	u32 stripesize;
1933 	u16 strip_size;
1934 	u32 first_group;
1935 	u32 last_group;
1936 	u32 current_group;
1937 	u32 map_row;
1938 	u32 aio_handle;
1939 	u64 disk_block;
1940 	u32 disk_block_cnt;
1941 	u8 cdb[16];
1942 	u8 cdb_length;
1943 	int offload_to_mirror;
1944 	struct pqi_encryption_info *encryption_info_ptr;
1945 	struct pqi_encryption_info encryption_info;
1946 #if BITS_PER_LONG == 32
1947 	u64 tmpdiv;
1948 #endif
1949 
1950 	/* Check for valid opcode, get LBA and block count. */
1951 	switch (scmd->cmnd[0]) {
1952 	case WRITE_6:
1953 		is_write = true;
1954 		/* fall through */
1955 	case READ_6:
1956 		first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
1957 			(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
1958 		block_cnt = (u32)scmd->cmnd[4];
1959 		if (block_cnt == 0)
1960 			block_cnt = 256;
1961 		break;
1962 	case WRITE_10:
1963 		is_write = true;
1964 		/* fall through */
1965 	case READ_10:
1966 		first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1967 		block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1968 		break;
1969 	case WRITE_12:
1970 		is_write = true;
1971 		/* fall through */
1972 	case READ_12:
1973 		first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1974 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1975 		break;
1976 	case WRITE_16:
1977 		is_write = true;
1978 		/* fall through */
1979 	case READ_16:
1980 		first_block = get_unaligned_be64(&scmd->cmnd[2]);
1981 		block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1982 		break;
1983 	default:
1984 		/* Process via normal I/O path. */
1985 		return PQI_RAID_BYPASS_INELIGIBLE;
1986 	}
1987 
1988 	/* Check for write to non-RAID-0. */
1989 	if (is_write && device->raid_level != SA_RAID_0)
1990 		return PQI_RAID_BYPASS_INELIGIBLE;
1991 
1992 	if (unlikely(block_cnt == 0))
1993 		return PQI_RAID_BYPASS_INELIGIBLE;
1994 
1995 	last_block = first_block + block_cnt - 1;
1996 	raid_map = device->raid_map;
1997 
1998 	/* Check for invalid block or wraparound. */
1999 	if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2000 		last_block < first_block)
2001 		return PQI_RAID_BYPASS_INELIGIBLE;
2002 
2003 	data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2004 	strip_size = get_unaligned_le16(&raid_map->strip_size);
2005 	layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2006 
2007 	/* Calculate stripe information for the request. */
2008 	blocks_per_row = data_disks_per_row * strip_size;
2009 #if BITS_PER_LONG == 32
2010 	tmpdiv = first_block;
2011 	do_div(tmpdiv, blocks_per_row);
2012 	first_row = tmpdiv;
2013 	tmpdiv = last_block;
2014 	do_div(tmpdiv, blocks_per_row);
2015 	last_row = tmpdiv;
2016 	first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2017 	last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2018 	tmpdiv = first_row_offset;
2019 	do_div(tmpdiv, strip_size);
2020 	first_column = tmpdiv;
2021 	tmpdiv = last_row_offset;
2022 	do_div(tmpdiv, strip_size);
2023 	last_column = tmpdiv;
2024 #else
2025 	first_row = first_block / blocks_per_row;
2026 	last_row = last_block / blocks_per_row;
2027 	first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2028 	last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2029 	first_column = first_row_offset / strip_size;
2030 	last_column = last_row_offset / strip_size;
2031 #endif
2032 
2033 	/* If this isn't a single row/column then give to the controller. */
2034 	if (first_row != last_row || first_column != last_column)
2035 		return PQI_RAID_BYPASS_INELIGIBLE;
2036 
2037 	/* Proceeding with driver mapping. */
2038 	total_disks_per_row = data_disks_per_row +
2039 		get_unaligned_le16(&raid_map->metadata_disks_per_row);
2040 	map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2041 		get_unaligned_le16(&raid_map->row_cnt);
2042 	map_index = (map_row * total_disks_per_row) + first_column;
2043 
2044 	/* RAID 1 */
2045 	if (device->raid_level == SA_RAID_1) {
2046 		if (device->offload_to_mirror)
2047 			map_index += data_disks_per_row;
2048 		device->offload_to_mirror = !device->offload_to_mirror;
2049 	} else if (device->raid_level == SA_RAID_ADM) {
2050 		/* RAID ADM */
2051 		/*
2052 		 * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
2053 		 * divisible by 3.
2054 		 */
2055 		offload_to_mirror = device->offload_to_mirror;
2056 		if (offload_to_mirror == 0)  {
2057 			/* use physical disk in the first mirrored group. */
2058 			map_index %= data_disks_per_row;
2059 		} else {
2060 			do {
2061 				/*
2062 				 * Determine mirror group that map_index
2063 				 * indicates.
2064 				 */
2065 				current_group = map_index / data_disks_per_row;
2066 
2067 				if (offload_to_mirror != current_group) {
2068 					if (current_group <
2069 						layout_map_count - 1) {
2070 						/*
2071 						 * Select raid index from
2072 						 * next group.
2073 						 */
2074 						map_index += data_disks_per_row;
2075 						current_group++;
2076 					} else {
2077 						/*
2078 						 * Select raid index from first
2079 						 * group.
2080 						 */
2081 						map_index %= data_disks_per_row;
2082 						current_group = 0;
2083 					}
2084 				}
2085 			} while (offload_to_mirror != current_group);
2086 		}
2087 
2088 		/* Set mirror group to use next time. */
2089 		offload_to_mirror =
2090 			(offload_to_mirror >= layout_map_count - 1) ?
2091 				0 : offload_to_mirror + 1;
2092 		WARN_ON(offload_to_mirror >= layout_map_count);
2093 		device->offload_to_mirror = offload_to_mirror;
2094 		/*
2095 		 * Avoid direct use of device->offload_to_mirror within this
2096 		 * function since multiple threads might simultaneously
2097 		 * increment it beyond the range of device->layout_map_count -1.
2098 		 */
2099 	} else if ((device->raid_level == SA_RAID_5 ||
2100 		device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2101 		/* RAID 50/60 */
2102 		/* Verify first and last block are in same RAID group */
2103 		r5or6_blocks_per_row = strip_size * data_disks_per_row;
2104 		stripesize = r5or6_blocks_per_row * layout_map_count;
2105 #if BITS_PER_LONG == 32
2106 		tmpdiv = first_block;
2107 		first_group = do_div(tmpdiv, stripesize);
2108 		tmpdiv = first_group;
2109 		do_div(tmpdiv, r5or6_blocks_per_row);
2110 		first_group = tmpdiv;
2111 		tmpdiv = last_block;
2112 		last_group = do_div(tmpdiv, stripesize);
2113 		tmpdiv = last_group;
2114 		do_div(tmpdiv, r5or6_blocks_per_row);
2115 		last_group = tmpdiv;
2116 #else
2117 		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2118 		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2119 #endif
2120 		if (first_group != last_group)
2121 			return PQI_RAID_BYPASS_INELIGIBLE;
2122 
2123 		/* Verify request is in a single row of RAID 5/6 */
2124 #if BITS_PER_LONG == 32
2125 		tmpdiv = first_block;
2126 		do_div(tmpdiv, stripesize);
2127 		first_row = r5or6_first_row = r0_first_row = tmpdiv;
2128 		tmpdiv = last_block;
2129 		do_div(tmpdiv, stripesize);
2130 		r5or6_last_row = r0_last_row = tmpdiv;
2131 #else
2132 		first_row = r5or6_first_row = r0_first_row =
2133 			first_block / stripesize;
2134 		r5or6_last_row = r0_last_row = last_block / stripesize;
2135 #endif
2136 		if (r5or6_first_row != r5or6_last_row)
2137 			return PQI_RAID_BYPASS_INELIGIBLE;
2138 
2139 		/* Verify request is in a single column */
2140 #if BITS_PER_LONG == 32
2141 		tmpdiv = first_block;
2142 		first_row_offset = do_div(tmpdiv, stripesize);
2143 		tmpdiv = first_row_offset;
2144 		first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2145 		r5or6_first_row_offset = first_row_offset;
2146 		tmpdiv = last_block;
2147 		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2148 		tmpdiv = r5or6_last_row_offset;
2149 		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2150 		tmpdiv = r5or6_first_row_offset;
2151 		do_div(tmpdiv, strip_size);
2152 		first_column = r5or6_first_column = tmpdiv;
2153 		tmpdiv = r5or6_last_row_offset;
2154 		do_div(tmpdiv, strip_size);
2155 		r5or6_last_column = tmpdiv;
2156 #else
2157 		first_row_offset = r5or6_first_row_offset =
2158 			(u32)((first_block % stripesize) %
2159 			r5or6_blocks_per_row);
2160 
2161 		r5or6_last_row_offset =
2162 			(u32)((last_block % stripesize) %
2163 			r5or6_blocks_per_row);
2164 
2165 		first_column = r5or6_first_row_offset / strip_size;
2166 		r5or6_first_column = first_column;
2167 		r5or6_last_column = r5or6_last_row_offset / strip_size;
2168 #endif
2169 		if (r5or6_first_column != r5or6_last_column)
2170 			return PQI_RAID_BYPASS_INELIGIBLE;
2171 
2172 		/* Request is eligible */
2173 		map_row =
2174 			((u32)(first_row >> raid_map->parity_rotation_shift)) %
2175 			get_unaligned_le16(&raid_map->row_cnt);
2176 
2177 		map_index = (first_group *
2178 			(get_unaligned_le16(&raid_map->row_cnt) *
2179 			total_disks_per_row)) +
2180 			(map_row * total_disks_per_row) + first_column;
2181 	}
2182 
2183 	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2184 		return PQI_RAID_BYPASS_INELIGIBLE;
2185 
2186 	aio_handle = raid_map->disk_data[map_index].aio_handle;
2187 	disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2188 		first_row * strip_size +
2189 		(first_row_offset - first_column * strip_size);
2190 	disk_block_cnt = block_cnt;
2191 
2192 	/* Handle differing logical/physical block sizes. */
2193 	if (raid_map->phys_blk_shift) {
2194 		disk_block <<= raid_map->phys_blk_shift;
2195 		disk_block_cnt <<= raid_map->phys_blk_shift;
2196 	}
2197 
2198 	if (unlikely(disk_block_cnt > 0xffff))
2199 		return PQI_RAID_BYPASS_INELIGIBLE;
2200 
2201 	/* Build the new CDB for the physical disk I/O. */
2202 	if (disk_block > 0xffffffff) {
2203 		cdb[0] = is_write ? WRITE_16 : READ_16;
2204 		cdb[1] = 0;
2205 		put_unaligned_be64(disk_block, &cdb[2]);
2206 		put_unaligned_be32(disk_block_cnt, &cdb[10]);
2207 		cdb[14] = 0;
2208 		cdb[15] = 0;
2209 		cdb_length = 16;
2210 	} else {
2211 		cdb[0] = is_write ? WRITE_10 : READ_10;
2212 		cdb[1] = 0;
2213 		put_unaligned_be32((u32)disk_block, &cdb[2]);
2214 		cdb[6] = 0;
2215 		put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2216 		cdb[9] = 0;
2217 		cdb_length = 10;
2218 	}
2219 
2220 	if (get_unaligned_le16(&raid_map->flags) &
2221 		RAID_MAP_ENCRYPTION_ENABLED) {
2222 		pqi_set_encryption_info(&encryption_info, raid_map,
2223 			first_block);
2224 		encryption_info_ptr = &encryption_info;
2225 	} else {
2226 		encryption_info_ptr = NULL;
2227 	}
2228 
2229 	return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2230 		cdb, cdb_length, queue_group, encryption_info_ptr);
2231 }
2232 
2233 #define PQI_STATUS_IDLE		0x0
2234 
2235 #define PQI_CREATE_ADMIN_QUEUE_PAIR	1
2236 #define PQI_DELETE_ADMIN_QUEUE_PAIR	2
2237 
2238 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET		0x0
2239 #define PQI_DEVICE_STATE_STATUS_AVAILABLE		0x1
2240 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY		0x2
2241 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY		0x3
2242 #define PQI_DEVICE_STATE_ERROR				0x4
2243 
2244 #define PQI_MODE_READY_TIMEOUT_SECS		30
2245 #define PQI_MODE_READY_POLL_INTERVAL_MSECS	1
2246 
2247 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2248 {
2249 	struct pqi_device_registers __iomem *pqi_registers;
2250 	unsigned long timeout;
2251 	u64 signature;
2252 	u8 status;
2253 
2254 	pqi_registers = ctrl_info->pqi_registers;
2255 	timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2256 
2257 	while (1) {
2258 		signature = readq(&pqi_registers->signature);
2259 		if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2260 			sizeof(signature)) == 0)
2261 			break;
2262 		if (time_after(jiffies, timeout)) {
2263 			dev_err(&ctrl_info->pci_dev->dev,
2264 				"timed out waiting for PQI signature\n");
2265 			return -ETIMEDOUT;
2266 		}
2267 		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2268 	}
2269 
2270 	while (1) {
2271 		status = readb(&pqi_registers->function_and_status_code);
2272 		if (status == PQI_STATUS_IDLE)
2273 			break;
2274 		if (time_after(jiffies, timeout)) {
2275 			dev_err(&ctrl_info->pci_dev->dev,
2276 				"timed out waiting for PQI IDLE\n");
2277 			return -ETIMEDOUT;
2278 		}
2279 		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2280 	}
2281 
2282 	while (1) {
2283 		if (readl(&pqi_registers->device_status) ==
2284 			PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2285 			break;
2286 		if (time_after(jiffies, timeout)) {
2287 			dev_err(&ctrl_info->pci_dev->dev,
2288 				"timed out waiting for PQI all registers ready\n");
2289 			return -ETIMEDOUT;
2290 		}
2291 		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2292 	}
2293 
2294 	return 0;
2295 }
2296 
2297 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2298 {
2299 	struct pqi_scsi_dev *device;
2300 
2301 	device = io_request->scmd->device->hostdata;
2302 	device->offload_enabled = false;
2303 }
2304 
2305 static inline void pqi_take_device_offline(struct scsi_device *sdev)
2306 {
2307 	struct pqi_ctrl_info *ctrl_info;
2308 	struct pqi_scsi_dev *device;
2309 
2310 	if (scsi_device_online(sdev)) {
2311 		scsi_device_set_state(sdev, SDEV_OFFLINE);
2312 		ctrl_info = shost_to_hba(sdev->host);
2313 		schedule_delayed_work(&ctrl_info->rescan_work, 0);
2314 		device = sdev->hostdata;
2315 		dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2316 			ctrl_info->scsi_host->host_no, device->bus,
2317 			device->target, device->lun);
2318 	}
2319 }
2320 
2321 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2322 {
2323 	u8 scsi_status;
2324 	u8 host_byte;
2325 	struct scsi_cmnd *scmd;
2326 	struct pqi_raid_error_info *error_info;
2327 	size_t sense_data_length;
2328 	int residual_count;
2329 	int xfer_count;
2330 	struct scsi_sense_hdr sshdr;
2331 
2332 	scmd = io_request->scmd;
2333 	if (!scmd)
2334 		return;
2335 
2336 	error_info = io_request->error_info;
2337 	scsi_status = error_info->status;
2338 	host_byte = DID_OK;
2339 
2340 	if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2341 		xfer_count =
2342 			get_unaligned_le32(&error_info->data_out_transferred);
2343 		residual_count = scsi_bufflen(scmd) - xfer_count;
2344 		scsi_set_resid(scmd, residual_count);
2345 		if (xfer_count < scmd->underflow)
2346 			host_byte = DID_SOFT_ERROR;
2347 	}
2348 
2349 	sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2350 	if (sense_data_length == 0)
2351 		sense_data_length =
2352 			get_unaligned_le16(&error_info->response_data_length);
2353 	if (sense_data_length) {
2354 		if (sense_data_length > sizeof(error_info->data))
2355 			sense_data_length = sizeof(error_info->data);
2356 
2357 		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2358 			scsi_normalize_sense(error_info->data,
2359 				sense_data_length, &sshdr) &&
2360 				sshdr.sense_key == HARDWARE_ERROR &&
2361 				sshdr.asc == 0x3e &&
2362 				sshdr.ascq == 0x1) {
2363 			pqi_take_device_offline(scmd->device);
2364 			host_byte = DID_NO_CONNECT;
2365 		}
2366 
2367 		if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2368 			sense_data_length = SCSI_SENSE_BUFFERSIZE;
2369 		memcpy(scmd->sense_buffer, error_info->data,
2370 			sense_data_length);
2371 	}
2372 
2373 	scmd->result = scsi_status;
2374 	set_host_byte(scmd, host_byte);
2375 }
2376 
2377 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2378 {
2379 	u8 scsi_status;
2380 	u8 host_byte;
2381 	struct scsi_cmnd *scmd;
2382 	struct pqi_aio_error_info *error_info;
2383 	size_t sense_data_length;
2384 	int residual_count;
2385 	int xfer_count;
2386 	bool device_offline;
2387 
2388 	scmd = io_request->scmd;
2389 	error_info = io_request->error_info;
2390 	host_byte = DID_OK;
2391 	sense_data_length = 0;
2392 	device_offline = false;
2393 
2394 	switch (error_info->service_response) {
2395 	case PQI_AIO_SERV_RESPONSE_COMPLETE:
2396 		scsi_status = error_info->status;
2397 		break;
2398 	case PQI_AIO_SERV_RESPONSE_FAILURE:
2399 		switch (error_info->status) {
2400 		case PQI_AIO_STATUS_IO_ABORTED:
2401 			scsi_status = SAM_STAT_TASK_ABORTED;
2402 			break;
2403 		case PQI_AIO_STATUS_UNDERRUN:
2404 			scsi_status = SAM_STAT_GOOD;
2405 			residual_count = get_unaligned_le32(
2406 						&error_info->residual_count);
2407 			scsi_set_resid(scmd, residual_count);
2408 			xfer_count = scsi_bufflen(scmd) - residual_count;
2409 			if (xfer_count < scmd->underflow)
2410 				host_byte = DID_SOFT_ERROR;
2411 			break;
2412 		case PQI_AIO_STATUS_OVERRUN:
2413 			scsi_status = SAM_STAT_GOOD;
2414 			break;
2415 		case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2416 			pqi_aio_path_disabled(io_request);
2417 			scsi_status = SAM_STAT_GOOD;
2418 			io_request->status = -EAGAIN;
2419 			break;
2420 		case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2421 		case PQI_AIO_STATUS_INVALID_DEVICE:
2422 			device_offline = true;
2423 			pqi_take_device_offline(scmd->device);
2424 			host_byte = DID_NO_CONNECT;
2425 			scsi_status = SAM_STAT_CHECK_CONDITION;
2426 			break;
2427 		case PQI_AIO_STATUS_IO_ERROR:
2428 		default:
2429 			scsi_status = SAM_STAT_CHECK_CONDITION;
2430 			break;
2431 		}
2432 		break;
2433 	case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2434 	case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2435 		scsi_status = SAM_STAT_GOOD;
2436 		break;
2437 	case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2438 	case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2439 	default:
2440 		scsi_status = SAM_STAT_CHECK_CONDITION;
2441 		break;
2442 	}
2443 
2444 	if (error_info->data_present) {
2445 		sense_data_length =
2446 			get_unaligned_le16(&error_info->data_length);
2447 		if (sense_data_length) {
2448 			if (sense_data_length > sizeof(error_info->data))
2449 				sense_data_length = sizeof(error_info->data);
2450 			if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2451 				sense_data_length = SCSI_SENSE_BUFFERSIZE;
2452 			memcpy(scmd->sense_buffer, error_info->data,
2453 				sense_data_length);
2454 		}
2455 	}
2456 
2457 	if (device_offline && sense_data_length == 0)
2458 		scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2459 			0x3e, 0x1);
2460 
2461 	scmd->result = scsi_status;
2462 	set_host_byte(scmd, host_byte);
2463 }
2464 
2465 static void pqi_process_io_error(unsigned int iu_type,
2466 	struct pqi_io_request *io_request)
2467 {
2468 	switch (iu_type) {
2469 	case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2470 		pqi_process_raid_io_error(io_request);
2471 		break;
2472 	case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2473 		pqi_process_aio_io_error(io_request);
2474 		break;
2475 	}
2476 }
2477 
2478 static int pqi_interpret_task_management_response(
2479 	struct pqi_task_management_response *response)
2480 {
2481 	int rc;
2482 
2483 	switch (response->response_code) {
2484 	case SOP_TMF_COMPLETE:
2485 	case SOP_TMF_FUNCTION_SUCCEEDED:
2486 		rc = 0;
2487 		break;
2488 	default:
2489 		rc = -EIO;
2490 		break;
2491 	}
2492 
2493 	return rc;
2494 }
2495 
2496 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2497 	struct pqi_queue_group *queue_group)
2498 {
2499 	unsigned int num_responses;
2500 	pqi_index_t oq_pi;
2501 	pqi_index_t oq_ci;
2502 	struct pqi_io_request *io_request;
2503 	struct pqi_io_response *response;
2504 	u16 request_id;
2505 
2506 	num_responses = 0;
2507 	oq_ci = queue_group->oq_ci_copy;
2508 
2509 	while (1) {
2510 		oq_pi = *queue_group->oq_pi;
2511 		if (oq_pi == oq_ci)
2512 			break;
2513 
2514 		num_responses++;
2515 		response = queue_group->oq_element_array +
2516 			(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2517 
2518 		request_id = get_unaligned_le16(&response->request_id);
2519 		WARN_ON(request_id >= ctrl_info->max_io_slots);
2520 
2521 		io_request = &ctrl_info->io_request_pool[request_id];
2522 		WARN_ON(atomic_read(&io_request->refcount) == 0);
2523 
2524 		switch (response->header.iu_type) {
2525 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2526 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2527 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2528 			break;
2529 		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2530 			io_request->status =
2531 				pqi_interpret_task_management_response(
2532 					(void *)response);
2533 			break;
2534 		case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2535 			pqi_aio_path_disabled(io_request);
2536 			io_request->status = -EAGAIN;
2537 			break;
2538 		case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2539 		case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2540 			io_request->error_info = ctrl_info->error_buffer +
2541 				(get_unaligned_le16(&response->error_index) *
2542 				PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2543 			pqi_process_io_error(response->header.iu_type,
2544 				io_request);
2545 			break;
2546 		default:
2547 			dev_err(&ctrl_info->pci_dev->dev,
2548 				"unexpected IU type: 0x%x\n",
2549 				response->header.iu_type);
2550 			WARN_ON(response->header.iu_type);
2551 			break;
2552 		}
2553 
2554 		io_request->io_complete_callback(io_request,
2555 			io_request->context);
2556 
2557 		/*
2558 		 * Note that the I/O request structure CANNOT BE TOUCHED after
2559 		 * returning from the I/O completion callback!
2560 		 */
2561 
2562 		oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2563 	}
2564 
2565 	if (num_responses) {
2566 		queue_group->oq_ci_copy = oq_ci;
2567 		writel(oq_ci, queue_group->oq_ci);
2568 	}
2569 
2570 	return num_responses;
2571 }
2572 
2573 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2574 	unsigned int ci, unsigned int elements_in_queue)
2575 {
2576 	unsigned int num_elements_used;
2577 
2578 	if (pi >= ci)
2579 		num_elements_used = pi - ci;
2580 	else
2581 		num_elements_used = elements_in_queue - ci + pi;
2582 
2583 	return elements_in_queue - num_elements_used - 1;
2584 }
2585 
2586 #define PQI_EVENT_ACK_TIMEOUT	30
2587 
2588 static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2589 	struct pqi_event_acknowledge_request *iu, size_t iu_length)
2590 {
2591 	pqi_index_t iq_pi;
2592 	pqi_index_t iq_ci;
2593 	unsigned long flags;
2594 	void *next_element;
2595 	unsigned long timeout;
2596 	struct pqi_queue_group *queue_group;
2597 
2598 	queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2599 	put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2600 
2601 	timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2602 
2603 	while (1) {
2604 		spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2605 
2606 		iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2607 		iq_ci = *queue_group->iq_ci[RAID_PATH];
2608 
2609 		if (pqi_num_elements_free(iq_pi, iq_ci,
2610 			ctrl_info->num_elements_per_iq))
2611 			break;
2612 
2613 		spin_unlock_irqrestore(
2614 			&queue_group->submit_lock[RAID_PATH], flags);
2615 
2616 		if (time_after(jiffies, timeout)) {
2617 			dev_err(&ctrl_info->pci_dev->dev,
2618 				"sending event acknowledge timed out\n");
2619 			return;
2620 		}
2621 	}
2622 
2623 	next_element = queue_group->iq_element_array[RAID_PATH] +
2624 		(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2625 
2626 	memcpy(next_element, iu, iu_length);
2627 
2628 	iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2629 
2630 	queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2631 
2632 	/*
2633 	 * This write notifies the controller that an IU is available to be
2634 	 * processed.
2635 	 */
2636 	writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2637 
2638 	spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2639 }
2640 
2641 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2642 	struct pqi_event *event)
2643 {
2644 	struct pqi_event_acknowledge_request request;
2645 
2646 	memset(&request, 0, sizeof(request));
2647 
2648 	request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2649 	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2650 		&request.header.iu_length);
2651 	request.event_type = event->event_type;
2652 	request.event_id = event->event_id;
2653 	request.additional_event_id = event->additional_event_id;
2654 
2655 	pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2656 }
2657 
2658 static void pqi_event_worker(struct work_struct *work)
2659 {
2660 	unsigned int i;
2661 	struct pqi_ctrl_info *ctrl_info;
2662 	struct pqi_event *pending_event;
2663 	bool got_non_heartbeat_event = false;
2664 
2665 	ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2666 
2667 	pending_event = ctrl_info->pending_events;
2668 	for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2669 		if (pending_event->pending) {
2670 			pending_event->pending = false;
2671 			pqi_acknowledge_event(ctrl_info, pending_event);
2672 			if (i != PQI_EVENT_HEARTBEAT)
2673 				got_non_heartbeat_event = true;
2674 		}
2675 		pending_event++;
2676 	}
2677 
2678 	if (got_non_heartbeat_event)
2679 		pqi_schedule_rescan_worker(ctrl_info);
2680 }
2681 
2682 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2683 {
2684 	unsigned int i;
2685 	unsigned int path;
2686 	struct pqi_queue_group *queue_group;
2687 	unsigned long flags;
2688 	struct pqi_io_request *io_request;
2689 	struct pqi_io_request *next;
2690 	struct scsi_cmnd *scmd;
2691 
2692 	ctrl_info->controller_online = false;
2693 	dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2694 
2695 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2696 		queue_group = &ctrl_info->queue_groups[i];
2697 
2698 		for (path = 0; path < 2; path++) {
2699 			spin_lock_irqsave(
2700 				&queue_group->submit_lock[path], flags);
2701 
2702 			list_for_each_entry_safe(io_request, next,
2703 				&queue_group->request_list[path],
2704 				request_list_entry) {
2705 
2706 				scmd = io_request->scmd;
2707 				if (scmd) {
2708 					set_host_byte(scmd, DID_NO_CONNECT);
2709 					pqi_scsi_done(scmd);
2710 				}
2711 
2712 				list_del(&io_request->request_list_entry);
2713 			}
2714 
2715 			spin_unlock_irqrestore(
2716 				&queue_group->submit_lock[path], flags);
2717 		}
2718 	}
2719 }
2720 
2721 #define PQI_HEARTBEAT_TIMER_INTERVAL	(5 * HZ)
2722 #define PQI_MAX_HEARTBEAT_REQUESTS	5
2723 
2724 static void pqi_heartbeat_timer_handler(unsigned long data)
2725 {
2726 	int num_interrupts;
2727 	struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2728 
2729 	num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2730 
2731 	if (num_interrupts == ctrl_info->previous_num_interrupts) {
2732 		ctrl_info->num_heartbeats_requested++;
2733 		if (ctrl_info->num_heartbeats_requested >
2734 			PQI_MAX_HEARTBEAT_REQUESTS) {
2735 			pqi_take_ctrl_offline(ctrl_info);
2736 			return;
2737 		}
2738 		ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2739 		schedule_work(&ctrl_info->event_work);
2740 	} else {
2741 		ctrl_info->num_heartbeats_requested = 0;
2742 	}
2743 
2744 	ctrl_info->previous_num_interrupts = num_interrupts;
2745 	mod_timer(&ctrl_info->heartbeat_timer,
2746 		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2747 }
2748 
2749 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2750 {
2751 	ctrl_info->previous_num_interrupts =
2752 		atomic_read(&ctrl_info->num_interrupts);
2753 
2754 	init_timer(&ctrl_info->heartbeat_timer);
2755 	ctrl_info->heartbeat_timer.expires =
2756 		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2757 	ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2758 	ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2759 	add_timer(&ctrl_info->heartbeat_timer);
2760 	ctrl_info->heartbeat_timer_started = true;
2761 }
2762 
2763 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2764 {
2765 	if (ctrl_info->heartbeat_timer_started)
2766 		del_timer_sync(&ctrl_info->heartbeat_timer);
2767 }
2768 
2769 static int pqi_event_type_to_event_index(unsigned int event_type)
2770 {
2771 	int index;
2772 
2773 	switch (event_type) {
2774 	case PQI_EVENT_TYPE_HEARTBEAT:
2775 		index = PQI_EVENT_HEARTBEAT;
2776 		break;
2777 	case PQI_EVENT_TYPE_HOTPLUG:
2778 		index = PQI_EVENT_HOTPLUG;
2779 		break;
2780 	case PQI_EVENT_TYPE_HARDWARE:
2781 		index = PQI_EVENT_HARDWARE;
2782 		break;
2783 	case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2784 		index = PQI_EVENT_PHYSICAL_DEVICE;
2785 		break;
2786 	case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2787 		index = PQI_EVENT_LOGICAL_DEVICE;
2788 		break;
2789 	case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2790 		index = PQI_EVENT_AIO_STATE_CHANGE;
2791 		break;
2792 	case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2793 		index = PQI_EVENT_AIO_CONFIG_CHANGE;
2794 		break;
2795 	default:
2796 		index = -1;
2797 		break;
2798 	}
2799 
2800 	return index;
2801 }
2802 
2803 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2804 {
2805 	unsigned int num_events;
2806 	pqi_index_t oq_pi;
2807 	pqi_index_t oq_ci;
2808 	struct pqi_event_queue *event_queue;
2809 	struct pqi_event_response *response;
2810 	struct pqi_event *pending_event;
2811 	bool need_delayed_work;
2812 	int event_index;
2813 
2814 	event_queue = &ctrl_info->event_queue;
2815 	num_events = 0;
2816 	need_delayed_work = false;
2817 	oq_ci = event_queue->oq_ci_copy;
2818 
2819 	while (1) {
2820 		oq_pi = *event_queue->oq_pi;
2821 		if (oq_pi == oq_ci)
2822 			break;
2823 
2824 		num_events++;
2825 		response = event_queue->oq_element_array +
2826 			(oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2827 
2828 		event_index =
2829 			pqi_event_type_to_event_index(response->event_type);
2830 
2831 		if (event_index >= 0) {
2832 			if (response->request_acknowlege) {
2833 				pending_event =
2834 					&ctrl_info->pending_events[event_index];
2835 				pending_event->event_type =
2836 					response->event_type;
2837 				pending_event->event_id = response->event_id;
2838 				pending_event->additional_event_id =
2839 					response->additional_event_id;
2840 				if (event_index != PQI_EVENT_HEARTBEAT) {
2841 					pending_event->pending = true;
2842 					need_delayed_work = true;
2843 				}
2844 			}
2845 		}
2846 
2847 		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2848 	}
2849 
2850 	if (num_events) {
2851 		event_queue->oq_ci_copy = oq_ci;
2852 		writel(oq_ci, event_queue->oq_ci);
2853 
2854 		if (need_delayed_work)
2855 			schedule_work(&ctrl_info->event_work);
2856 	}
2857 
2858 	return num_events;
2859 }
2860 
2861 static irqreturn_t pqi_irq_handler(int irq, void *data)
2862 {
2863 	struct pqi_ctrl_info *ctrl_info;
2864 	struct pqi_queue_group *queue_group;
2865 	unsigned int num_responses_handled;
2866 
2867 	queue_group = data;
2868 	ctrl_info = queue_group->ctrl_info;
2869 
2870 	if (!ctrl_info || !queue_group->oq_ci)
2871 		return IRQ_NONE;
2872 
2873 	num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2874 
2875 	if (irq == ctrl_info->event_irq)
2876 		num_responses_handled += pqi_process_event_intr(ctrl_info);
2877 
2878 	if (num_responses_handled)
2879 		atomic_inc(&ctrl_info->num_interrupts);
2880 
2881 	pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2882 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2883 
2884 	return IRQ_HANDLED;
2885 }
2886 
2887 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2888 {
2889 	struct pci_dev *pdev = ctrl_info->pci_dev;
2890 	int i;
2891 	int rc;
2892 
2893 	ctrl_info->event_irq = pci_irq_vector(pdev, 0);
2894 
2895 	for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2896 		rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
2897 			DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
2898 		if (rc) {
2899 			dev_err(&pdev->dev,
2900 				"irq %u init failed with error %d\n",
2901 				pci_irq_vector(pdev, i), rc);
2902 			return rc;
2903 		}
2904 		ctrl_info->num_msix_vectors_initialized++;
2905 	}
2906 
2907 	return 0;
2908 }
2909 
2910 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2911 {
2912 	int ret;
2913 
2914 	ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
2915 			PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
2916 			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
2917 	if (ret < 0) {
2918 		dev_err(&ctrl_info->pci_dev->dev,
2919 			"MSI-X init failed with error %d\n", ret);
2920 		return ret;
2921 	}
2922 
2923 	ctrl_info->num_msix_vectors_enabled = ret;
2924 	return 0;
2925 }
2926 
2927 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2928 {
2929 	unsigned int i;
2930 	size_t alloc_length;
2931 	size_t element_array_length_per_iq;
2932 	size_t element_array_length_per_oq;
2933 	void *element_array;
2934 	void *next_queue_index;
2935 	void *aligned_pointer;
2936 	unsigned int num_inbound_queues;
2937 	unsigned int num_outbound_queues;
2938 	unsigned int num_queue_indexes;
2939 	struct pqi_queue_group *queue_group;
2940 
2941 	element_array_length_per_iq =
2942 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
2943 		ctrl_info->num_elements_per_iq;
2944 	element_array_length_per_oq =
2945 		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
2946 		ctrl_info->num_elements_per_oq;
2947 	num_inbound_queues = ctrl_info->num_queue_groups * 2;
2948 	num_outbound_queues = ctrl_info->num_queue_groups;
2949 	num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
2950 
2951 	aligned_pointer = NULL;
2952 
2953 	for (i = 0; i < num_inbound_queues; i++) {
2954 		aligned_pointer = PTR_ALIGN(aligned_pointer,
2955 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2956 		aligned_pointer += element_array_length_per_iq;
2957 	}
2958 
2959 	for (i = 0; i < num_outbound_queues; i++) {
2960 		aligned_pointer = PTR_ALIGN(aligned_pointer,
2961 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2962 		aligned_pointer += element_array_length_per_oq;
2963 	}
2964 
2965 	aligned_pointer = PTR_ALIGN(aligned_pointer,
2966 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2967 	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
2968 		PQI_EVENT_OQ_ELEMENT_LENGTH;
2969 
2970 	for (i = 0; i < num_queue_indexes; i++) {
2971 		aligned_pointer = PTR_ALIGN(aligned_pointer,
2972 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
2973 		aligned_pointer += sizeof(pqi_index_t);
2974 	}
2975 
2976 	alloc_length = (size_t)aligned_pointer +
2977 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
2978 
2979 	ctrl_info->queue_memory_base =
2980 		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
2981 			alloc_length,
2982 			&ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
2983 
2984 	if (!ctrl_info->queue_memory_base) {
2985 		dev_err(&ctrl_info->pci_dev->dev,
2986 			"failed to allocate memory for PQI admin queues\n");
2987 		return -ENOMEM;
2988 	}
2989 
2990 	ctrl_info->queue_memory_length = alloc_length;
2991 
2992 	element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
2993 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2994 
2995 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2996 		queue_group = &ctrl_info->queue_groups[i];
2997 		queue_group->iq_element_array[RAID_PATH] = element_array;
2998 		queue_group->iq_element_array_bus_addr[RAID_PATH] =
2999 			ctrl_info->queue_memory_base_dma_handle +
3000 				(element_array - ctrl_info->queue_memory_base);
3001 		element_array += element_array_length_per_iq;
3002 		element_array = PTR_ALIGN(element_array,
3003 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3004 		queue_group->iq_element_array[AIO_PATH] = element_array;
3005 		queue_group->iq_element_array_bus_addr[AIO_PATH] =
3006 			ctrl_info->queue_memory_base_dma_handle +
3007 			(element_array - ctrl_info->queue_memory_base);
3008 		element_array += element_array_length_per_iq;
3009 		element_array = PTR_ALIGN(element_array,
3010 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3011 	}
3012 
3013 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3014 		queue_group = &ctrl_info->queue_groups[i];
3015 		queue_group->oq_element_array = element_array;
3016 		queue_group->oq_element_array_bus_addr =
3017 			ctrl_info->queue_memory_base_dma_handle +
3018 			(element_array - ctrl_info->queue_memory_base);
3019 		element_array += element_array_length_per_oq;
3020 		element_array = PTR_ALIGN(element_array,
3021 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3022 	}
3023 
3024 	ctrl_info->event_queue.oq_element_array = element_array;
3025 	ctrl_info->event_queue.oq_element_array_bus_addr =
3026 		ctrl_info->queue_memory_base_dma_handle +
3027 		(element_array - ctrl_info->queue_memory_base);
3028 	element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3029 		PQI_EVENT_OQ_ELEMENT_LENGTH;
3030 
3031 	next_queue_index = PTR_ALIGN(element_array,
3032 		PQI_OPERATIONAL_INDEX_ALIGNMENT);
3033 
3034 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3035 		queue_group = &ctrl_info->queue_groups[i];
3036 		queue_group->iq_ci[RAID_PATH] = next_queue_index;
3037 		queue_group->iq_ci_bus_addr[RAID_PATH] =
3038 			ctrl_info->queue_memory_base_dma_handle +
3039 			(next_queue_index - ctrl_info->queue_memory_base);
3040 		next_queue_index += sizeof(pqi_index_t);
3041 		next_queue_index = PTR_ALIGN(next_queue_index,
3042 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
3043 		queue_group->iq_ci[AIO_PATH] = next_queue_index;
3044 		queue_group->iq_ci_bus_addr[AIO_PATH] =
3045 			ctrl_info->queue_memory_base_dma_handle +
3046 			(next_queue_index - ctrl_info->queue_memory_base);
3047 		next_queue_index += sizeof(pqi_index_t);
3048 		next_queue_index = PTR_ALIGN(next_queue_index,
3049 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
3050 		queue_group->oq_pi = next_queue_index;
3051 		queue_group->oq_pi_bus_addr =
3052 			ctrl_info->queue_memory_base_dma_handle +
3053 			(next_queue_index - ctrl_info->queue_memory_base);
3054 		next_queue_index += sizeof(pqi_index_t);
3055 		next_queue_index = PTR_ALIGN(next_queue_index,
3056 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
3057 	}
3058 
3059 	ctrl_info->event_queue.oq_pi = next_queue_index;
3060 	ctrl_info->event_queue.oq_pi_bus_addr =
3061 		ctrl_info->queue_memory_base_dma_handle +
3062 		(next_queue_index - ctrl_info->queue_memory_base);
3063 
3064 	return 0;
3065 }
3066 
3067 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3068 {
3069 	unsigned int i;
3070 	u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3071 	u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3072 
3073 	/*
3074 	 * Initialize the backpointers to the controller structure in
3075 	 * each operational queue group structure.
3076 	 */
3077 	for (i = 0; i < ctrl_info->num_queue_groups; i++)
3078 		ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3079 
3080 	/*
3081 	 * Assign IDs to all operational queues.  Note that the IDs
3082 	 * assigned to operational IQs are independent of the IDs
3083 	 * assigned to operational OQs.
3084 	 */
3085 	ctrl_info->event_queue.oq_id = next_oq_id++;
3086 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3087 		ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3088 		ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3089 		ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3090 	}
3091 
3092 	/*
3093 	 * Assign MSI-X table entry indexes to all queues.  Note that the
3094 	 * interrupt for the event queue is shared with the first queue group.
3095 	 */
3096 	ctrl_info->event_queue.int_msg_num = 0;
3097 	for (i = 0; i < ctrl_info->num_queue_groups; i++)
3098 		ctrl_info->queue_groups[i].int_msg_num = i;
3099 
3100 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3101 		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3102 		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3103 		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3104 		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3105 	}
3106 }
3107 
3108 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3109 {
3110 	size_t alloc_length;
3111 	struct pqi_admin_queues_aligned *admin_queues_aligned;
3112 	struct pqi_admin_queues *admin_queues;
3113 
3114 	alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3115 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3116 
3117 	ctrl_info->admin_queue_memory_base =
3118 		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3119 			alloc_length,
3120 			&ctrl_info->admin_queue_memory_base_dma_handle,
3121 			GFP_KERNEL);
3122 
3123 	if (!ctrl_info->admin_queue_memory_base)
3124 		return -ENOMEM;
3125 
3126 	ctrl_info->admin_queue_memory_length = alloc_length;
3127 
3128 	admin_queues = &ctrl_info->admin_queues;
3129 	admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3130 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3131 	admin_queues->iq_element_array =
3132 		&admin_queues_aligned->iq_element_array;
3133 	admin_queues->oq_element_array =
3134 		&admin_queues_aligned->oq_element_array;
3135 	admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3136 	admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3137 
3138 	admin_queues->iq_element_array_bus_addr =
3139 		ctrl_info->admin_queue_memory_base_dma_handle +
3140 		(admin_queues->iq_element_array -
3141 		ctrl_info->admin_queue_memory_base);
3142 	admin_queues->oq_element_array_bus_addr =
3143 		ctrl_info->admin_queue_memory_base_dma_handle +
3144 		(admin_queues->oq_element_array -
3145 		ctrl_info->admin_queue_memory_base);
3146 	admin_queues->iq_ci_bus_addr =
3147 		ctrl_info->admin_queue_memory_base_dma_handle +
3148 		((void *)admin_queues->iq_ci -
3149 		ctrl_info->admin_queue_memory_base);
3150 	admin_queues->oq_pi_bus_addr =
3151 		ctrl_info->admin_queue_memory_base_dma_handle +
3152 		((void *)admin_queues->oq_pi -
3153 		ctrl_info->admin_queue_memory_base);
3154 
3155 	return 0;
3156 }
3157 
3158 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		HZ
3159 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS	1
3160 
3161 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3162 {
3163 	struct pqi_device_registers __iomem *pqi_registers;
3164 	struct pqi_admin_queues *admin_queues;
3165 	unsigned long timeout;
3166 	u8 status;
3167 	u32 reg;
3168 
3169 	pqi_registers = ctrl_info->pqi_registers;
3170 	admin_queues = &ctrl_info->admin_queues;
3171 
3172 	writeq((u64)admin_queues->iq_element_array_bus_addr,
3173 		&pqi_registers->admin_iq_element_array_addr);
3174 	writeq((u64)admin_queues->oq_element_array_bus_addr,
3175 		&pqi_registers->admin_oq_element_array_addr);
3176 	writeq((u64)admin_queues->iq_ci_bus_addr,
3177 		&pqi_registers->admin_iq_ci_addr);
3178 	writeq((u64)admin_queues->oq_pi_bus_addr,
3179 		&pqi_registers->admin_oq_pi_addr);
3180 
3181 	reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3182 		(PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3183 		(admin_queues->int_msg_num << 16);
3184 	writel(reg, &pqi_registers->admin_iq_num_elements);
3185 	writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3186 		&pqi_registers->function_and_status_code);
3187 
3188 	timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3189 	while (1) {
3190 		status = readb(&pqi_registers->function_and_status_code);
3191 		if (status == PQI_STATUS_IDLE)
3192 			break;
3193 		if (time_after(jiffies, timeout))
3194 			return -ETIMEDOUT;
3195 		msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3196 	}
3197 
3198 	/*
3199 	 * The offset registers are not initialized to the correct
3200 	 * offsets until *after* the create admin queue pair command
3201 	 * completes successfully.
3202 	 */
3203 	admin_queues->iq_pi = ctrl_info->iomem_base +
3204 		PQI_DEVICE_REGISTERS_OFFSET +
3205 		readq(&pqi_registers->admin_iq_pi_offset);
3206 	admin_queues->oq_ci = ctrl_info->iomem_base +
3207 		PQI_DEVICE_REGISTERS_OFFSET +
3208 		readq(&pqi_registers->admin_oq_ci_offset);
3209 
3210 	return 0;
3211 }
3212 
3213 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3214 	struct pqi_general_admin_request *request)
3215 {
3216 	struct pqi_admin_queues *admin_queues;
3217 	void *next_element;
3218 	pqi_index_t iq_pi;
3219 
3220 	admin_queues = &ctrl_info->admin_queues;
3221 	iq_pi = admin_queues->iq_pi_copy;
3222 
3223 	next_element = admin_queues->iq_element_array +
3224 		(iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3225 
3226 	memcpy(next_element, request, sizeof(*request));
3227 
3228 	iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3229 	admin_queues->iq_pi_copy = iq_pi;
3230 
3231 	/*
3232 	 * This write notifies the controller that an IU is available to be
3233 	 * processed.
3234 	 */
3235 	writel(iq_pi, admin_queues->iq_pi);
3236 }
3237 
3238 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3239 	struct pqi_general_admin_response *response)
3240 {
3241 	struct pqi_admin_queues *admin_queues;
3242 	pqi_index_t oq_pi;
3243 	pqi_index_t oq_ci;
3244 	unsigned long timeout;
3245 
3246 	admin_queues = &ctrl_info->admin_queues;
3247 	oq_ci = admin_queues->oq_ci_copy;
3248 
3249 	timeout = (3 * HZ) + jiffies;
3250 
3251 	while (1) {
3252 		oq_pi = *admin_queues->oq_pi;
3253 		if (oq_pi != oq_ci)
3254 			break;
3255 		if (time_after(jiffies, timeout)) {
3256 			dev_err(&ctrl_info->pci_dev->dev,
3257 				"timed out waiting for admin response\n");
3258 			return -ETIMEDOUT;
3259 		}
3260 		usleep_range(1000, 2000);
3261 	}
3262 
3263 	memcpy(response, admin_queues->oq_element_array +
3264 		(oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3265 
3266 	oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3267 	admin_queues->oq_ci_copy = oq_ci;
3268 	writel(oq_ci, admin_queues->oq_ci);
3269 
3270 	return 0;
3271 }
3272 
3273 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3274 	struct pqi_queue_group *queue_group, enum pqi_io_path path,
3275 	struct pqi_io_request *io_request)
3276 {
3277 	struct pqi_io_request *next;
3278 	void *next_element;
3279 	pqi_index_t iq_pi;
3280 	pqi_index_t iq_ci;
3281 	size_t iu_length;
3282 	unsigned long flags;
3283 	unsigned int num_elements_needed;
3284 	unsigned int num_elements_to_end_of_queue;
3285 	size_t copy_count;
3286 	struct pqi_iu_header *request;
3287 
3288 	spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3289 
3290 	if (io_request)
3291 		list_add_tail(&io_request->request_list_entry,
3292 			&queue_group->request_list[path]);
3293 
3294 	iq_pi = queue_group->iq_pi_copy[path];
3295 
3296 	list_for_each_entry_safe(io_request, next,
3297 		&queue_group->request_list[path], request_list_entry) {
3298 
3299 		request = io_request->iu;
3300 
3301 		iu_length = get_unaligned_le16(&request->iu_length) +
3302 			PQI_REQUEST_HEADER_LENGTH;
3303 		num_elements_needed =
3304 			DIV_ROUND_UP(iu_length,
3305 				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3306 
3307 		iq_ci = *queue_group->iq_ci[path];
3308 
3309 		if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3310 			ctrl_info->num_elements_per_iq))
3311 			break;
3312 
3313 		put_unaligned_le16(queue_group->oq_id,
3314 			&request->response_queue_id);
3315 
3316 		next_element = queue_group->iq_element_array[path] +
3317 			(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3318 
3319 		num_elements_to_end_of_queue =
3320 			ctrl_info->num_elements_per_iq - iq_pi;
3321 
3322 		if (num_elements_needed <= num_elements_to_end_of_queue) {
3323 			memcpy(next_element, request, iu_length);
3324 		} else {
3325 			copy_count = num_elements_to_end_of_queue *
3326 				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3327 			memcpy(next_element, request, copy_count);
3328 			memcpy(queue_group->iq_element_array[path],
3329 				(u8 *)request + copy_count,
3330 				iu_length - copy_count);
3331 		}
3332 
3333 		iq_pi = (iq_pi + num_elements_needed) %
3334 			ctrl_info->num_elements_per_iq;
3335 
3336 		list_del(&io_request->request_list_entry);
3337 	}
3338 
3339 	if (iq_pi != queue_group->iq_pi_copy[path]) {
3340 		queue_group->iq_pi_copy[path] = iq_pi;
3341 		/*
3342 		 * This write notifies the controller that one or more IUs are
3343 		 * available to be processed.
3344 		 */
3345 		writel(iq_pi, queue_group->iq_pi[path]);
3346 	}
3347 
3348 	spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3349 }
3350 
3351 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3352 	void *context)
3353 {
3354 	struct completion *waiting = context;
3355 
3356 	complete(waiting);
3357 }
3358 
3359 static int pqi_submit_raid_request_synchronous_with_io_request(
3360 	struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3361 	unsigned long timeout_msecs)
3362 {
3363 	int rc = 0;
3364 	DECLARE_COMPLETION_ONSTACK(wait);
3365 
3366 	io_request->io_complete_callback = pqi_raid_synchronous_complete;
3367 	io_request->context = &wait;
3368 
3369 	pqi_start_io(ctrl_info,
3370 		&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3371 		io_request);
3372 
3373 	if (timeout_msecs == NO_TIMEOUT) {
3374 		wait_for_completion_io(&wait);
3375 	} else {
3376 		if (!wait_for_completion_io_timeout(&wait,
3377 			msecs_to_jiffies(timeout_msecs))) {
3378 			dev_warn(&ctrl_info->pci_dev->dev,
3379 				"command timed out\n");
3380 			rc = -ETIMEDOUT;
3381 		}
3382 	}
3383 
3384 	return rc;
3385 }
3386 
3387 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3388 	struct pqi_iu_header *request, unsigned int flags,
3389 	struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3390 {
3391 	int rc;
3392 	struct pqi_io_request *io_request;
3393 	unsigned long start_jiffies;
3394 	unsigned long msecs_blocked;
3395 	size_t iu_length;
3396 
3397 	/*
3398 	 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3399 	 * are mutually exclusive.
3400 	 */
3401 
3402 	if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3403 		if (down_interruptible(&ctrl_info->sync_request_sem))
3404 			return -ERESTARTSYS;
3405 	} else {
3406 		if (timeout_msecs == NO_TIMEOUT) {
3407 			down(&ctrl_info->sync_request_sem);
3408 		} else {
3409 			start_jiffies = jiffies;
3410 			if (down_timeout(&ctrl_info->sync_request_sem,
3411 				msecs_to_jiffies(timeout_msecs)))
3412 				return -ETIMEDOUT;
3413 			msecs_blocked =
3414 				jiffies_to_msecs(jiffies - start_jiffies);
3415 			if (msecs_blocked >= timeout_msecs)
3416 				return -ETIMEDOUT;
3417 			timeout_msecs -= msecs_blocked;
3418 		}
3419 	}
3420 
3421 	io_request = pqi_alloc_io_request(ctrl_info);
3422 
3423 	put_unaligned_le16(io_request->index,
3424 		&(((struct pqi_raid_path_request *)request)->request_id));
3425 
3426 	if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3427 		((struct pqi_raid_path_request *)request)->error_index =
3428 			((struct pqi_raid_path_request *)request)->request_id;
3429 
3430 	iu_length = get_unaligned_le16(&request->iu_length) +
3431 		PQI_REQUEST_HEADER_LENGTH;
3432 	memcpy(io_request->iu, request, iu_length);
3433 
3434 	rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3435 		io_request, timeout_msecs);
3436 
3437 	if (error_info) {
3438 		if (io_request->error_info)
3439 			memcpy(error_info, io_request->error_info,
3440 				sizeof(*error_info));
3441 		else
3442 			memset(error_info, 0, sizeof(*error_info));
3443 	} else if (rc == 0 && io_request->error_info) {
3444 		u8 scsi_status;
3445 		struct pqi_raid_error_info *raid_error_info;
3446 
3447 		raid_error_info = io_request->error_info;
3448 		scsi_status = raid_error_info->status;
3449 
3450 		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3451 			raid_error_info->data_out_result ==
3452 			PQI_DATA_IN_OUT_UNDERFLOW)
3453 			scsi_status = SAM_STAT_GOOD;
3454 
3455 		if (scsi_status != SAM_STAT_GOOD)
3456 			rc = -EIO;
3457 	}
3458 
3459 	pqi_free_io_request(io_request);
3460 
3461 	up(&ctrl_info->sync_request_sem);
3462 
3463 	return rc;
3464 }
3465 
3466 static int pqi_validate_admin_response(
3467 	struct pqi_general_admin_response *response, u8 expected_function_code)
3468 {
3469 	if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3470 		return -EINVAL;
3471 
3472 	if (get_unaligned_le16(&response->header.iu_length) !=
3473 		PQI_GENERAL_ADMIN_IU_LENGTH)
3474 		return -EINVAL;
3475 
3476 	if (response->function_code != expected_function_code)
3477 		return -EINVAL;
3478 
3479 	if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3480 		return -EINVAL;
3481 
3482 	return 0;
3483 }
3484 
3485 static int pqi_submit_admin_request_synchronous(
3486 	struct pqi_ctrl_info *ctrl_info,
3487 	struct pqi_general_admin_request *request,
3488 	struct pqi_general_admin_response *response)
3489 {
3490 	int rc;
3491 
3492 	pqi_submit_admin_request(ctrl_info, request);
3493 
3494 	rc = pqi_poll_for_admin_response(ctrl_info, response);
3495 
3496 	if (rc == 0)
3497 		rc = pqi_validate_admin_response(response,
3498 			request->function_code);
3499 
3500 	return rc;
3501 }
3502 
3503 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3504 {
3505 	int rc;
3506 	struct pqi_general_admin_request request;
3507 	struct pqi_general_admin_response response;
3508 	struct pqi_device_capability *capability;
3509 	struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3510 
3511 	capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3512 	if (!capability)
3513 		return -ENOMEM;
3514 
3515 	memset(&request, 0, sizeof(request));
3516 
3517 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3518 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3519 		&request.header.iu_length);
3520 	request.function_code =
3521 		PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3522 	put_unaligned_le32(sizeof(*capability),
3523 		&request.data.report_device_capability.buffer_length);
3524 
3525 	rc = pqi_map_single(ctrl_info->pci_dev,
3526 		&request.data.report_device_capability.sg_descriptor,
3527 		capability, sizeof(*capability),
3528 		PCI_DMA_FROMDEVICE);
3529 	if (rc)
3530 		goto out;
3531 
3532 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3533 		&response);
3534 
3535 	pqi_pci_unmap(ctrl_info->pci_dev,
3536 		&request.data.report_device_capability.sg_descriptor, 1,
3537 		PCI_DMA_FROMDEVICE);
3538 
3539 	if (rc)
3540 		goto out;
3541 
3542 	if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3543 		rc = -EIO;
3544 		goto out;
3545 	}
3546 
3547 	ctrl_info->max_inbound_queues =
3548 		get_unaligned_le16(&capability->max_inbound_queues);
3549 	ctrl_info->max_elements_per_iq =
3550 		get_unaligned_le16(&capability->max_elements_per_iq);
3551 	ctrl_info->max_iq_element_length =
3552 		get_unaligned_le16(&capability->max_iq_element_length)
3553 		* 16;
3554 	ctrl_info->max_outbound_queues =
3555 		get_unaligned_le16(&capability->max_outbound_queues);
3556 	ctrl_info->max_elements_per_oq =
3557 		get_unaligned_le16(&capability->max_elements_per_oq);
3558 	ctrl_info->max_oq_element_length =
3559 		get_unaligned_le16(&capability->max_oq_element_length)
3560 		* 16;
3561 
3562 	sop_iu_layer_descriptor =
3563 		&capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3564 
3565 	ctrl_info->max_inbound_iu_length_per_firmware =
3566 		get_unaligned_le16(
3567 			&sop_iu_layer_descriptor->max_inbound_iu_length);
3568 	ctrl_info->inbound_spanning_supported =
3569 		sop_iu_layer_descriptor->inbound_spanning_supported;
3570 	ctrl_info->outbound_spanning_supported =
3571 		sop_iu_layer_descriptor->outbound_spanning_supported;
3572 
3573 out:
3574 	kfree(capability);
3575 
3576 	return rc;
3577 }
3578 
3579 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3580 {
3581 	if (ctrl_info->max_iq_element_length <
3582 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3583 		dev_err(&ctrl_info->pci_dev->dev,
3584 			"max. inbound queue element length of %d is less than the required length of %d\n",
3585 			ctrl_info->max_iq_element_length,
3586 			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3587 		return -EINVAL;
3588 	}
3589 
3590 	if (ctrl_info->max_oq_element_length <
3591 		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3592 		dev_err(&ctrl_info->pci_dev->dev,
3593 			"max. outbound queue element length of %d is less than the required length of %d\n",
3594 			ctrl_info->max_oq_element_length,
3595 			PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3596 		return -EINVAL;
3597 	}
3598 
3599 	if (ctrl_info->max_inbound_iu_length_per_firmware <
3600 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3601 		dev_err(&ctrl_info->pci_dev->dev,
3602 			"max. inbound IU length of %u is less than the min. required length of %d\n",
3603 			ctrl_info->max_inbound_iu_length_per_firmware,
3604 			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3605 		return -EINVAL;
3606 	}
3607 
3608 	if (!ctrl_info->inbound_spanning_supported) {
3609 		dev_err(&ctrl_info->pci_dev->dev,
3610 			"the controller does not support inbound spanning\n");
3611 		return -EINVAL;
3612 	}
3613 
3614 	if (ctrl_info->outbound_spanning_supported) {
3615 		dev_err(&ctrl_info->pci_dev->dev,
3616 			"the controller supports outbound spanning but this driver does not\n");
3617 		return -EINVAL;
3618 	}
3619 
3620 	return 0;
3621 }
3622 
3623 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3624 	bool inbound_queue, u16 queue_id)
3625 {
3626 	struct pqi_general_admin_request request;
3627 	struct pqi_general_admin_response response;
3628 
3629 	memset(&request, 0, sizeof(request));
3630 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3631 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3632 		&request.header.iu_length);
3633 	if (inbound_queue)
3634 		request.function_code =
3635 			PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3636 	else
3637 		request.function_code =
3638 			PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3639 	put_unaligned_le16(queue_id,
3640 		&request.data.delete_operational_queue.queue_id);
3641 
3642 	return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3643 		&response);
3644 }
3645 
3646 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3647 {
3648 	int rc;
3649 	struct pqi_event_queue *event_queue;
3650 	struct pqi_general_admin_request request;
3651 	struct pqi_general_admin_response response;
3652 
3653 	event_queue = &ctrl_info->event_queue;
3654 
3655 	/*
3656 	 * Create OQ (Outbound Queue - device to host queue) to dedicate
3657 	 * to events.
3658 	 */
3659 	memset(&request, 0, sizeof(request));
3660 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3661 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3662 		&request.header.iu_length);
3663 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3664 	put_unaligned_le16(event_queue->oq_id,
3665 		&request.data.create_operational_oq.queue_id);
3666 	put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3667 		&request.data.create_operational_oq.element_array_addr);
3668 	put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3669 		&request.data.create_operational_oq.pi_addr);
3670 	put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3671 		&request.data.create_operational_oq.num_elements);
3672 	put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3673 		&request.data.create_operational_oq.element_length);
3674 	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3675 	put_unaligned_le16(event_queue->int_msg_num,
3676 		&request.data.create_operational_oq.int_msg_num);
3677 
3678 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3679 		&response);
3680 	if (rc)
3681 		return rc;
3682 
3683 	event_queue->oq_ci = ctrl_info->iomem_base +
3684 		PQI_DEVICE_REGISTERS_OFFSET +
3685 		get_unaligned_le64(
3686 			&response.data.create_operational_oq.oq_ci_offset);
3687 
3688 	return 0;
3689 }
3690 
3691 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3692 {
3693 	unsigned int i;
3694 	int rc;
3695 	struct pqi_queue_group *queue_group;
3696 	struct pqi_general_admin_request request;
3697 	struct pqi_general_admin_response response;
3698 
3699 	i = ctrl_info->num_active_queue_groups;
3700 	queue_group = &ctrl_info->queue_groups[i];
3701 
3702 	/*
3703 	 * Create IQ (Inbound Queue - host to device queue) for
3704 	 * RAID path.
3705 	 */
3706 	memset(&request, 0, sizeof(request));
3707 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3708 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3709 		&request.header.iu_length);
3710 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3711 	put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3712 		&request.data.create_operational_iq.queue_id);
3713 	put_unaligned_le64(
3714 		(u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3715 		&request.data.create_operational_iq.element_array_addr);
3716 	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3717 		&request.data.create_operational_iq.ci_addr);
3718 	put_unaligned_le16(ctrl_info->num_elements_per_iq,
3719 		&request.data.create_operational_iq.num_elements);
3720 	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3721 		&request.data.create_operational_iq.element_length);
3722 	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3723 
3724 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3725 		&response);
3726 	if (rc) {
3727 		dev_err(&ctrl_info->pci_dev->dev,
3728 			"error creating inbound RAID queue\n");
3729 		return rc;
3730 	}
3731 
3732 	queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3733 		PQI_DEVICE_REGISTERS_OFFSET +
3734 		get_unaligned_le64(
3735 			&response.data.create_operational_iq.iq_pi_offset);
3736 
3737 	/*
3738 	 * Create IQ (Inbound Queue - host to device queue) for
3739 	 * Advanced I/O (AIO) path.
3740 	 */
3741 	memset(&request, 0, sizeof(request));
3742 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3743 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3744 		&request.header.iu_length);
3745 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3746 	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3747 		&request.data.create_operational_iq.queue_id);
3748 	put_unaligned_le64((u64)queue_group->
3749 		iq_element_array_bus_addr[AIO_PATH],
3750 		&request.data.create_operational_iq.element_array_addr);
3751 	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3752 		&request.data.create_operational_iq.ci_addr);
3753 	put_unaligned_le16(ctrl_info->num_elements_per_iq,
3754 		&request.data.create_operational_iq.num_elements);
3755 	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3756 		&request.data.create_operational_iq.element_length);
3757 	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3758 
3759 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3760 		&response);
3761 	if (rc) {
3762 		dev_err(&ctrl_info->pci_dev->dev,
3763 			"error creating inbound AIO queue\n");
3764 		goto delete_inbound_queue_raid;
3765 	}
3766 
3767 	queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3768 		PQI_DEVICE_REGISTERS_OFFSET +
3769 		get_unaligned_le64(
3770 			&response.data.create_operational_iq.iq_pi_offset);
3771 
3772 	/*
3773 	 * Designate the 2nd IQ as the AIO path.  By default, all IQs are
3774 	 * assumed to be for RAID path I/O unless we change the queue's
3775 	 * property.
3776 	 */
3777 	memset(&request, 0, sizeof(request));
3778 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3779 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3780 		&request.header.iu_length);
3781 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3782 	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3783 		&request.data.change_operational_iq_properties.queue_id);
3784 	put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3785 		&request.data.change_operational_iq_properties.vendor_specific);
3786 
3787 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3788 		&response);
3789 	if (rc) {
3790 		dev_err(&ctrl_info->pci_dev->dev,
3791 			"error changing queue property\n");
3792 		goto delete_inbound_queue_aio;
3793 	}
3794 
3795 	/*
3796 	 * Create OQ (Outbound Queue - device to host queue).
3797 	 */
3798 	memset(&request, 0, sizeof(request));
3799 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3800 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3801 		&request.header.iu_length);
3802 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3803 	put_unaligned_le16(queue_group->oq_id,
3804 		&request.data.create_operational_oq.queue_id);
3805 	put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3806 		&request.data.create_operational_oq.element_array_addr);
3807 	put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3808 		&request.data.create_operational_oq.pi_addr);
3809 	put_unaligned_le16(ctrl_info->num_elements_per_oq,
3810 		&request.data.create_operational_oq.num_elements);
3811 	put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3812 		&request.data.create_operational_oq.element_length);
3813 	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3814 	put_unaligned_le16(queue_group->int_msg_num,
3815 		&request.data.create_operational_oq.int_msg_num);
3816 
3817 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3818 		&response);
3819 	if (rc) {
3820 		dev_err(&ctrl_info->pci_dev->dev,
3821 			"error creating outbound queue\n");
3822 		goto delete_inbound_queue_aio;
3823 	}
3824 
3825 	queue_group->oq_ci = ctrl_info->iomem_base +
3826 		PQI_DEVICE_REGISTERS_OFFSET +
3827 		get_unaligned_le64(
3828 			&response.data.create_operational_oq.oq_ci_offset);
3829 
3830 	ctrl_info->num_active_queue_groups++;
3831 
3832 	return 0;
3833 
3834 delete_inbound_queue_aio:
3835 	pqi_delete_operational_queue(ctrl_info, true,
3836 		queue_group->iq_id[AIO_PATH]);
3837 
3838 delete_inbound_queue_raid:
3839 	pqi_delete_operational_queue(ctrl_info, true,
3840 		queue_group->iq_id[RAID_PATH]);
3841 
3842 	return rc;
3843 }
3844 
3845 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3846 {
3847 	int rc;
3848 	unsigned int i;
3849 
3850 	rc = pqi_create_event_queue(ctrl_info);
3851 	if (rc) {
3852 		dev_err(&ctrl_info->pci_dev->dev,
3853 			"error creating event queue\n");
3854 		return rc;
3855 	}
3856 
3857 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3858 		rc = pqi_create_queue_group(ctrl_info);
3859 		if (rc) {
3860 			dev_err(&ctrl_info->pci_dev->dev,
3861 				"error creating queue group number %u/%u\n",
3862 				i, ctrl_info->num_queue_groups);
3863 			return rc;
3864 		}
3865 	}
3866 
3867 	return 0;
3868 }
3869 
3870 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH	\
3871 	(offsetof(struct pqi_event_config, descriptors) + \
3872 	(PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3873 
3874 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3875 {
3876 	int rc;
3877 	unsigned int i;
3878 	struct pqi_event_config *event_config;
3879 	struct pqi_general_management_request request;
3880 
3881 	event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3882 		GFP_KERNEL);
3883 	if (!event_config)
3884 		return -ENOMEM;
3885 
3886 	memset(&request, 0, sizeof(request));
3887 
3888 	request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3889 	put_unaligned_le16(offsetof(struct pqi_general_management_request,
3890 		data.report_event_configuration.sg_descriptors[1]) -
3891 		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3892 	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3893 		&request.data.report_event_configuration.buffer_length);
3894 
3895 	rc = pqi_map_single(ctrl_info->pci_dev,
3896 		request.data.report_event_configuration.sg_descriptors,
3897 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3898 		PCI_DMA_FROMDEVICE);
3899 	if (rc)
3900 		goto out;
3901 
3902 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3903 		0, NULL, NO_TIMEOUT);
3904 
3905 	pqi_pci_unmap(ctrl_info->pci_dev,
3906 		request.data.report_event_configuration.sg_descriptors, 1,
3907 		PCI_DMA_FROMDEVICE);
3908 
3909 	if (rc)
3910 		goto out;
3911 
3912 	for (i = 0; i < event_config->num_event_descriptors; i++)
3913 		put_unaligned_le16(ctrl_info->event_queue.oq_id,
3914 			&event_config->descriptors[i].oq_id);
3915 
3916 	memset(&request, 0, sizeof(request));
3917 
3918 	request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3919 	put_unaligned_le16(offsetof(struct pqi_general_management_request,
3920 		data.report_event_configuration.sg_descriptors[1]) -
3921 		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3922 	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3923 		&request.data.report_event_configuration.buffer_length);
3924 
3925 	rc = pqi_map_single(ctrl_info->pci_dev,
3926 		request.data.report_event_configuration.sg_descriptors,
3927 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3928 		PCI_DMA_TODEVICE);
3929 	if (rc)
3930 		goto out;
3931 
3932 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3933 		NULL, NO_TIMEOUT);
3934 
3935 	pqi_pci_unmap(ctrl_info->pci_dev,
3936 		request.data.report_event_configuration.sg_descriptors, 1,
3937 		PCI_DMA_TODEVICE);
3938 
3939 out:
3940 	kfree(event_config);
3941 
3942 	return rc;
3943 }
3944 
3945 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
3946 {
3947 	unsigned int i;
3948 	struct device *dev;
3949 	size_t sg_chain_buffer_length;
3950 	struct pqi_io_request *io_request;
3951 
3952 	if (!ctrl_info->io_request_pool)
3953 		return;
3954 
3955 	dev = &ctrl_info->pci_dev->dev;
3956 	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
3957 	io_request = ctrl_info->io_request_pool;
3958 
3959 	for (i = 0; i < ctrl_info->max_io_slots; i++) {
3960 		kfree(io_request->iu);
3961 		if (!io_request->sg_chain_buffer)
3962 			break;
3963 		dma_free_coherent(dev, sg_chain_buffer_length,
3964 			io_request->sg_chain_buffer,
3965 			io_request->sg_chain_buffer_dma_handle);
3966 		io_request++;
3967 	}
3968 
3969 	kfree(ctrl_info->io_request_pool);
3970 	ctrl_info->io_request_pool = NULL;
3971 }
3972 
3973 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
3974 {
3975 	ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3976 		ctrl_info->error_buffer_length,
3977 		&ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
3978 
3979 	if (!ctrl_info->error_buffer)
3980 		return -ENOMEM;
3981 
3982 	return 0;
3983 }
3984 
3985 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
3986 {
3987 	unsigned int i;
3988 	void *sg_chain_buffer;
3989 	size_t sg_chain_buffer_length;
3990 	dma_addr_t sg_chain_buffer_dma_handle;
3991 	struct device *dev;
3992 	struct pqi_io_request *io_request;
3993 
3994 	ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
3995 		sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
3996 
3997 	if (!ctrl_info->io_request_pool) {
3998 		dev_err(&ctrl_info->pci_dev->dev,
3999 			"failed to allocate I/O request pool\n");
4000 		goto error;
4001 	}
4002 
4003 	dev = &ctrl_info->pci_dev->dev;
4004 	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4005 	io_request = ctrl_info->io_request_pool;
4006 
4007 	for (i = 0; i < ctrl_info->max_io_slots; i++) {
4008 		io_request->iu =
4009 			kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4010 
4011 		if (!io_request->iu) {
4012 			dev_err(&ctrl_info->pci_dev->dev,
4013 				"failed to allocate IU buffers\n");
4014 			goto error;
4015 		}
4016 
4017 		sg_chain_buffer = dma_alloc_coherent(dev,
4018 			sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4019 			GFP_KERNEL);
4020 
4021 		if (!sg_chain_buffer) {
4022 			dev_err(&ctrl_info->pci_dev->dev,
4023 				"failed to allocate PQI scatter-gather chain buffers\n");
4024 			goto error;
4025 		}
4026 
4027 		io_request->index = i;
4028 		io_request->sg_chain_buffer = sg_chain_buffer;
4029 		io_request->sg_chain_buffer_dma_handle =
4030 			sg_chain_buffer_dma_handle;
4031 		io_request++;
4032 	}
4033 
4034 	return 0;
4035 
4036 error:
4037 	pqi_free_all_io_requests(ctrl_info);
4038 
4039 	return -ENOMEM;
4040 }
4041 
4042 /*
4043  * Calculate required resources that are sized based on max. outstanding
4044  * requests and max. transfer size.
4045  */
4046 
4047 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4048 {
4049 	u32 max_transfer_size;
4050 	u32 max_sg_entries;
4051 
4052 	ctrl_info->scsi_ml_can_queue =
4053 		ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4054 	ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4055 
4056 	ctrl_info->error_buffer_length =
4057 		ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4058 
4059 	max_transfer_size =
4060 		min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4061 
4062 	max_sg_entries = max_transfer_size / PAGE_SIZE;
4063 
4064 	/* +1 to cover when the buffer is not page-aligned. */
4065 	max_sg_entries++;
4066 
4067 	max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4068 
4069 	max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4070 
4071 	ctrl_info->sg_chain_buffer_length =
4072 		max_sg_entries * sizeof(struct pqi_sg_descriptor);
4073 	ctrl_info->sg_tablesize = max_sg_entries;
4074 	ctrl_info->max_sectors = max_transfer_size / 512;
4075 }
4076 
4077 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4078 {
4079 	int num_cpus;
4080 	int max_queue_groups;
4081 	int num_queue_groups;
4082 	u16 num_elements_per_iq;
4083 	u16 num_elements_per_oq;
4084 
4085 	max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4086 		ctrl_info->max_outbound_queues - 1);
4087 	max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4088 
4089 	num_cpus = num_online_cpus();
4090 	num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4091 	num_queue_groups = min(num_queue_groups, max_queue_groups);
4092 
4093 	ctrl_info->num_queue_groups = num_queue_groups;
4094 
4095 	/*
4096 	 * Make sure that the max. inbound IU length is an even multiple
4097 	 * of our inbound element length.
4098 	 */
4099 	ctrl_info->max_inbound_iu_length =
4100 		(ctrl_info->max_inbound_iu_length_per_firmware /
4101 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4102 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4103 
4104 	num_elements_per_iq =
4105 		(ctrl_info->max_inbound_iu_length /
4106 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4107 
4108 	/* Add one because one element in each queue is unusable. */
4109 	num_elements_per_iq++;
4110 
4111 	num_elements_per_iq = min(num_elements_per_iq,
4112 		ctrl_info->max_elements_per_iq);
4113 
4114 	num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4115 	num_elements_per_oq = min(num_elements_per_oq,
4116 		ctrl_info->max_elements_per_oq);
4117 
4118 	ctrl_info->num_elements_per_iq = num_elements_per_iq;
4119 	ctrl_info->num_elements_per_oq = num_elements_per_oq;
4120 
4121 	ctrl_info->max_sg_per_iu =
4122 		((ctrl_info->max_inbound_iu_length -
4123 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4124 		sizeof(struct pqi_sg_descriptor)) +
4125 		PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4126 }
4127 
4128 static inline void pqi_set_sg_descriptor(
4129 	struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4130 {
4131 	u64 address = (u64)sg_dma_address(sg);
4132 	unsigned int length = sg_dma_len(sg);
4133 
4134 	put_unaligned_le64(address, &sg_descriptor->address);
4135 	put_unaligned_le32(length, &sg_descriptor->length);
4136 	put_unaligned_le32(0, &sg_descriptor->flags);
4137 }
4138 
4139 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4140 	struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4141 	struct pqi_io_request *io_request)
4142 {
4143 	int i;
4144 	u16 iu_length;
4145 	int sg_count;
4146 	bool chained;
4147 	unsigned int num_sg_in_iu;
4148 	unsigned int max_sg_per_iu;
4149 	struct scatterlist *sg;
4150 	struct pqi_sg_descriptor *sg_descriptor;
4151 
4152 	sg_count = scsi_dma_map(scmd);
4153 	if (sg_count < 0)
4154 		return sg_count;
4155 
4156 	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4157 		PQI_REQUEST_HEADER_LENGTH;
4158 
4159 	if (sg_count == 0)
4160 		goto out;
4161 
4162 	sg = scsi_sglist(scmd);
4163 	sg_descriptor = request->sg_descriptors;
4164 	max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4165 	chained = false;
4166 	num_sg_in_iu = 0;
4167 	i = 0;
4168 
4169 	while (1) {
4170 		pqi_set_sg_descriptor(sg_descriptor, sg);
4171 		if (!chained)
4172 			num_sg_in_iu++;
4173 		i++;
4174 		if (i == sg_count)
4175 			break;
4176 		sg_descriptor++;
4177 		if (i == max_sg_per_iu) {
4178 			put_unaligned_le64(
4179 				(u64)io_request->sg_chain_buffer_dma_handle,
4180 				&sg_descriptor->address);
4181 			put_unaligned_le32((sg_count - num_sg_in_iu)
4182 				* sizeof(*sg_descriptor),
4183 				&sg_descriptor->length);
4184 			put_unaligned_le32(CISS_SG_CHAIN,
4185 				&sg_descriptor->flags);
4186 			chained = true;
4187 			num_sg_in_iu++;
4188 			sg_descriptor = io_request->sg_chain_buffer;
4189 		}
4190 		sg = sg_next(sg);
4191 	}
4192 
4193 	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4194 	request->partial = chained;
4195 	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4196 
4197 out:
4198 	put_unaligned_le16(iu_length, &request->header.iu_length);
4199 
4200 	return 0;
4201 }
4202 
4203 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4204 	struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4205 	struct pqi_io_request *io_request)
4206 {
4207 	int i;
4208 	u16 iu_length;
4209 	int sg_count;
4210 	bool chained;
4211 	unsigned int num_sg_in_iu;
4212 	unsigned int max_sg_per_iu;
4213 	struct scatterlist *sg;
4214 	struct pqi_sg_descriptor *sg_descriptor;
4215 
4216 	sg_count = scsi_dma_map(scmd);
4217 	if (sg_count < 0)
4218 		return sg_count;
4219 
4220 	iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4221 		PQI_REQUEST_HEADER_LENGTH;
4222 	num_sg_in_iu = 0;
4223 
4224 	if (sg_count == 0)
4225 		goto out;
4226 
4227 	sg = scsi_sglist(scmd);
4228 	sg_descriptor = request->sg_descriptors;
4229 	max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4230 	chained = false;
4231 	i = 0;
4232 
4233 	while (1) {
4234 		pqi_set_sg_descriptor(sg_descriptor, sg);
4235 		if (!chained)
4236 			num_sg_in_iu++;
4237 		i++;
4238 		if (i == sg_count)
4239 			break;
4240 		sg_descriptor++;
4241 		if (i == max_sg_per_iu) {
4242 			put_unaligned_le64(
4243 				(u64)io_request->sg_chain_buffer_dma_handle,
4244 				&sg_descriptor->address);
4245 			put_unaligned_le32((sg_count - num_sg_in_iu)
4246 				* sizeof(*sg_descriptor),
4247 				&sg_descriptor->length);
4248 			put_unaligned_le32(CISS_SG_CHAIN,
4249 				&sg_descriptor->flags);
4250 			chained = true;
4251 			num_sg_in_iu++;
4252 			sg_descriptor = io_request->sg_chain_buffer;
4253 		}
4254 		sg = sg_next(sg);
4255 	}
4256 
4257 	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4258 	request->partial = chained;
4259 	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4260 
4261 out:
4262 	put_unaligned_le16(iu_length, &request->header.iu_length);
4263 	request->num_sg_descriptors = num_sg_in_iu;
4264 
4265 	return 0;
4266 }
4267 
4268 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4269 	void *context)
4270 {
4271 	struct scsi_cmnd *scmd;
4272 
4273 	scmd = io_request->scmd;
4274 	pqi_free_io_request(io_request);
4275 	scsi_dma_unmap(scmd);
4276 	pqi_scsi_done(scmd);
4277 }
4278 
4279 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4280 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4281 	struct pqi_queue_group *queue_group)
4282 {
4283 	int rc;
4284 	size_t cdb_length;
4285 	struct pqi_io_request *io_request;
4286 	struct pqi_raid_path_request *request;
4287 
4288 	io_request = pqi_alloc_io_request(ctrl_info);
4289 	io_request->io_complete_callback = pqi_raid_io_complete;
4290 	io_request->scmd = scmd;
4291 
4292 	scmd->host_scribble = (unsigned char *)io_request;
4293 
4294 	request = io_request->iu;
4295 	memset(request, 0,
4296 		offsetof(struct pqi_raid_path_request, sg_descriptors));
4297 
4298 	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4299 	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4300 	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4301 	put_unaligned_le16(io_request->index, &request->request_id);
4302 	request->error_index = request->request_id;
4303 	memcpy(request->lun_number, device->scsi3addr,
4304 		sizeof(request->lun_number));
4305 
4306 	cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4307 	memcpy(request->cdb, scmd->cmnd, cdb_length);
4308 
4309 	switch (cdb_length) {
4310 	case 6:
4311 	case 10:
4312 	case 12:
4313 	case 16:
4314 		/* No bytes in the Additional CDB bytes field */
4315 		request->additional_cdb_bytes_usage =
4316 			SOP_ADDITIONAL_CDB_BYTES_0;
4317 		break;
4318 	case 20:
4319 		/* 4 bytes in the Additional cdb field */
4320 		request->additional_cdb_bytes_usage =
4321 			SOP_ADDITIONAL_CDB_BYTES_4;
4322 		break;
4323 	case 24:
4324 		/* 8 bytes in the Additional cdb field */
4325 		request->additional_cdb_bytes_usage =
4326 			SOP_ADDITIONAL_CDB_BYTES_8;
4327 		break;
4328 	case 28:
4329 		/* 12 bytes in the Additional cdb field */
4330 		request->additional_cdb_bytes_usage =
4331 			SOP_ADDITIONAL_CDB_BYTES_12;
4332 		break;
4333 	case 32:
4334 	default:
4335 		/* 16 bytes in the Additional cdb field */
4336 		request->additional_cdb_bytes_usage =
4337 			SOP_ADDITIONAL_CDB_BYTES_16;
4338 		break;
4339 	}
4340 
4341 	switch (scmd->sc_data_direction) {
4342 	case DMA_TO_DEVICE:
4343 		request->data_direction = SOP_READ_FLAG;
4344 		break;
4345 	case DMA_FROM_DEVICE:
4346 		request->data_direction = SOP_WRITE_FLAG;
4347 		break;
4348 	case DMA_NONE:
4349 		request->data_direction = SOP_NO_DIRECTION_FLAG;
4350 		break;
4351 	case DMA_BIDIRECTIONAL:
4352 		request->data_direction = SOP_BIDIRECTIONAL;
4353 		break;
4354 	default:
4355 		dev_err(&ctrl_info->pci_dev->dev,
4356 			"unknown data direction: %d\n",
4357 			scmd->sc_data_direction);
4358 		WARN_ON(scmd->sc_data_direction);
4359 		break;
4360 	}
4361 
4362 	rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4363 	if (rc) {
4364 		pqi_free_io_request(io_request);
4365 		return SCSI_MLQUEUE_HOST_BUSY;
4366 	}
4367 
4368 	pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4369 
4370 	return 0;
4371 }
4372 
4373 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4374 	void *context)
4375 {
4376 	struct scsi_cmnd *scmd;
4377 
4378 	scmd = io_request->scmd;
4379 	scsi_dma_unmap(scmd);
4380 	if (io_request->status == -EAGAIN)
4381 		set_host_byte(scmd, DID_IMM_RETRY);
4382 	pqi_free_io_request(io_request);
4383 	pqi_scsi_done(scmd);
4384 }
4385 
4386 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4387 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4388 	struct pqi_queue_group *queue_group)
4389 {
4390 	return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4391 		scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4392 }
4393 
4394 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4395 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4396 	unsigned int cdb_length, struct pqi_queue_group *queue_group,
4397 	struct pqi_encryption_info *encryption_info)
4398 {
4399 	int rc;
4400 	struct pqi_io_request *io_request;
4401 	struct pqi_aio_path_request *request;
4402 
4403 	io_request = pqi_alloc_io_request(ctrl_info);
4404 	io_request->io_complete_callback = pqi_aio_io_complete;
4405 	io_request->scmd = scmd;
4406 
4407 	scmd->host_scribble = (unsigned char *)io_request;
4408 
4409 	request = io_request->iu;
4410 	memset(request, 0,
4411 		offsetof(struct pqi_raid_path_request, sg_descriptors));
4412 
4413 	request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4414 	put_unaligned_le32(aio_handle, &request->nexus_id);
4415 	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4416 	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4417 	put_unaligned_le16(io_request->index, &request->request_id);
4418 	request->error_index = request->request_id;
4419 	if (cdb_length > sizeof(request->cdb))
4420 		cdb_length = sizeof(request->cdb);
4421 	request->cdb_length = cdb_length;
4422 	memcpy(request->cdb, cdb, cdb_length);
4423 
4424 	switch (scmd->sc_data_direction) {
4425 	case DMA_TO_DEVICE:
4426 		request->data_direction = SOP_READ_FLAG;
4427 		break;
4428 	case DMA_FROM_DEVICE:
4429 		request->data_direction = SOP_WRITE_FLAG;
4430 		break;
4431 	case DMA_NONE:
4432 		request->data_direction = SOP_NO_DIRECTION_FLAG;
4433 		break;
4434 	case DMA_BIDIRECTIONAL:
4435 		request->data_direction = SOP_BIDIRECTIONAL;
4436 		break;
4437 	default:
4438 		dev_err(&ctrl_info->pci_dev->dev,
4439 			"unknown data direction: %d\n",
4440 			scmd->sc_data_direction);
4441 		WARN_ON(scmd->sc_data_direction);
4442 		break;
4443 	}
4444 
4445 	if (encryption_info) {
4446 		request->encryption_enable = true;
4447 		put_unaligned_le16(encryption_info->data_encryption_key_index,
4448 			&request->data_encryption_key_index);
4449 		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4450 			&request->encrypt_tweak_lower);
4451 		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4452 			&request->encrypt_tweak_upper);
4453 	}
4454 
4455 	rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4456 	if (rc) {
4457 		pqi_free_io_request(io_request);
4458 		return SCSI_MLQUEUE_HOST_BUSY;
4459 	}
4460 
4461 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4462 
4463 	return 0;
4464 }
4465 
4466 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4467 	struct scsi_cmnd *scmd)
4468 {
4469 	int rc;
4470 	struct pqi_ctrl_info *ctrl_info;
4471 	struct pqi_scsi_dev *device;
4472 	u16 hwq;
4473 	struct pqi_queue_group *queue_group;
4474 	bool raid_bypassed;
4475 
4476 	device = scmd->device->hostdata;
4477 	ctrl_info = shost_to_hba(shost);
4478 
4479 	if (pqi_ctrl_offline(ctrl_info)) {
4480 		set_host_byte(scmd, DID_NO_CONNECT);
4481 		pqi_scsi_done(scmd);
4482 		return 0;
4483 	}
4484 
4485 	/*
4486 	 * This is necessary because the SML doesn't zero out this field during
4487 	 * error recovery.
4488 	 */
4489 	scmd->result = 0;
4490 
4491 	hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4492 	if (hwq >= ctrl_info->num_queue_groups)
4493 		hwq = 0;
4494 
4495 	queue_group = &ctrl_info->queue_groups[hwq];
4496 
4497 	if (pqi_is_logical_device(device)) {
4498 		raid_bypassed = false;
4499 		if (device->offload_enabled &&
4500 				!blk_rq_is_passthrough(scmd->request)) {
4501 			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4502 				scmd, queue_group);
4503 			if (rc == 0 ||
4504 				rc == SCSI_MLQUEUE_HOST_BUSY ||
4505 				rc == SAM_STAT_CHECK_CONDITION ||
4506 				rc == SAM_STAT_RESERVATION_CONFLICT)
4507 				raid_bypassed = true;
4508 		}
4509 		if (!raid_bypassed)
4510 			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4511 				queue_group);
4512 	} else {
4513 		if (device->aio_enabled)
4514 			rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4515 				queue_group);
4516 		else
4517 			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4518 				queue_group);
4519 	}
4520 
4521 	return rc;
4522 }
4523 
4524 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4525 	void *context)
4526 {
4527 	struct completion *waiting = context;
4528 
4529 	complete(waiting);
4530 }
4531 
4532 #define PQI_LUN_RESET_TIMEOUT_SECS	10
4533 
4534 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4535 	struct pqi_scsi_dev *device, struct completion *wait)
4536 {
4537 	int rc;
4538 	unsigned int wait_secs = 0;
4539 
4540 	while (1) {
4541 		if (wait_for_completion_io_timeout(wait,
4542 			PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4543 			rc = 0;
4544 			break;
4545 		}
4546 
4547 		pqi_check_ctrl_health(ctrl_info);
4548 		if (pqi_ctrl_offline(ctrl_info)) {
4549 			rc = -ETIMEDOUT;
4550 			break;
4551 		}
4552 
4553 		wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
4554 
4555 		dev_err(&ctrl_info->pci_dev->dev,
4556 			"resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4557 			ctrl_info->scsi_host->host_no, device->bus,
4558 			device->target, device->lun, wait_secs);
4559 	}
4560 
4561 	return rc;
4562 }
4563 
4564 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
4565 	struct pqi_scsi_dev *device)
4566 {
4567 	int rc;
4568 	struct pqi_io_request *io_request;
4569 	DECLARE_COMPLETION_ONSTACK(wait);
4570 	struct pqi_task_management_request *request;
4571 
4572 	down(&ctrl_info->lun_reset_sem);
4573 
4574 	io_request = pqi_alloc_io_request(ctrl_info);
4575 	io_request->io_complete_callback = pqi_lun_reset_complete;
4576 	io_request->context = &wait;
4577 
4578 	request = io_request->iu;
4579 	memset(request, 0, sizeof(*request));
4580 
4581 	request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4582 	put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4583 		&request->header.iu_length);
4584 	put_unaligned_le16(io_request->index, &request->request_id);
4585 	memcpy(request->lun_number, device->scsi3addr,
4586 		sizeof(request->lun_number));
4587 	request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4588 
4589 	pqi_start_io(ctrl_info,
4590 		&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4591 		io_request);
4592 
4593 	rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4594 	if (rc == 0)
4595 		rc = io_request->status;
4596 
4597 	pqi_free_io_request(io_request);
4598 	up(&ctrl_info->lun_reset_sem);
4599 
4600 	return rc;
4601 }
4602 
4603 /* Performs a reset at the LUN level. */
4604 
4605 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4606 	struct pqi_scsi_dev *device)
4607 {
4608 	int rc;
4609 
4610 	pqi_check_ctrl_health(ctrl_info);
4611 	if (pqi_ctrl_offline(ctrl_info))
4612 		return FAILED;
4613 
4614 	rc = pqi_lun_reset(ctrl_info, device);
4615 
4616 	return rc == 0 ? SUCCESS : FAILED;
4617 }
4618 
4619 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4620 {
4621 	int rc;
4622 	struct pqi_ctrl_info *ctrl_info;
4623 	struct pqi_scsi_dev *device;
4624 
4625 	ctrl_info = shost_to_hba(scmd->device->host);
4626 	device = scmd->device->hostdata;
4627 
4628 	dev_err(&ctrl_info->pci_dev->dev,
4629 		"resetting scsi %d:%d:%d:%d\n",
4630 		ctrl_info->scsi_host->host_no,
4631 		device->bus, device->target, device->lun);
4632 
4633 	rc = pqi_device_reset(ctrl_info, device);
4634 
4635 	dev_err(&ctrl_info->pci_dev->dev,
4636 		"reset of scsi %d:%d:%d:%d: %s\n",
4637 		ctrl_info->scsi_host->host_no,
4638 		device->bus, device->target, device->lun,
4639 		rc == SUCCESS ? "SUCCESS" : "FAILED");
4640 
4641 	return rc;
4642 }
4643 
4644 static int pqi_slave_alloc(struct scsi_device *sdev)
4645 {
4646 	struct pqi_scsi_dev *device;
4647 	unsigned long flags;
4648 	struct pqi_ctrl_info *ctrl_info;
4649 	struct scsi_target *starget;
4650 	struct sas_rphy *rphy;
4651 
4652 	ctrl_info = shost_to_hba(sdev->host);
4653 
4654 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4655 
4656 	if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4657 		starget = scsi_target(sdev);
4658 		rphy = target_to_rphy(starget);
4659 		device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4660 		if (device) {
4661 			device->target = sdev_id(sdev);
4662 			device->lun = sdev->lun;
4663 			device->target_lun_valid = true;
4664 		}
4665 	} else {
4666 		device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4667 			sdev_id(sdev), sdev->lun);
4668 	}
4669 
4670 	if (device && device->expose_device) {
4671 		sdev->hostdata = device;
4672 		device->sdev = sdev;
4673 		if (device->queue_depth) {
4674 			device->advertised_queue_depth = device->queue_depth;
4675 			scsi_change_queue_depth(sdev,
4676 				device->advertised_queue_depth);
4677 		}
4678 	}
4679 
4680 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4681 
4682 	return 0;
4683 }
4684 
4685 static int pqi_slave_configure(struct scsi_device *sdev)
4686 {
4687 	struct pqi_scsi_dev *device;
4688 
4689 	device = sdev->hostdata;
4690 	if (!device->expose_device)
4691 		sdev->no_uld_attach = true;
4692 
4693 	return 0;
4694 }
4695 
4696 static int pqi_map_queues(struct Scsi_Host *shost)
4697 {
4698 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
4699 
4700 	return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
4701 }
4702 
4703 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4704 	void __user *arg)
4705 {
4706 	struct pci_dev *pci_dev;
4707 	u32 subsystem_vendor;
4708 	u32 subsystem_device;
4709 	cciss_pci_info_struct pciinfo;
4710 
4711 	if (!arg)
4712 		return -EINVAL;
4713 
4714 	pci_dev = ctrl_info->pci_dev;
4715 
4716 	pciinfo.domain = pci_domain_nr(pci_dev->bus);
4717 	pciinfo.bus = pci_dev->bus->number;
4718 	pciinfo.dev_fn = pci_dev->devfn;
4719 	subsystem_vendor = pci_dev->subsystem_vendor;
4720 	subsystem_device = pci_dev->subsystem_device;
4721 	pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4722 		subsystem_vendor;
4723 
4724 	if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4725 		return -EFAULT;
4726 
4727 	return 0;
4728 }
4729 
4730 static int pqi_getdrivver_ioctl(void __user *arg)
4731 {
4732 	u32 version;
4733 
4734 	if (!arg)
4735 		return -EINVAL;
4736 
4737 	version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4738 		(DRIVER_RELEASE << 16) | DRIVER_REVISION;
4739 
4740 	if (copy_to_user(arg, &version, sizeof(version)))
4741 		return -EFAULT;
4742 
4743 	return 0;
4744 }
4745 
4746 struct ciss_error_info {
4747 	u8	scsi_status;
4748 	int	command_status;
4749 	size_t	sense_data_length;
4750 };
4751 
4752 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4753 	struct ciss_error_info *ciss_error_info)
4754 {
4755 	int ciss_cmd_status;
4756 	size_t sense_data_length;
4757 
4758 	switch (pqi_error_info->data_out_result) {
4759 	case PQI_DATA_IN_OUT_GOOD:
4760 		ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4761 		break;
4762 	case PQI_DATA_IN_OUT_UNDERFLOW:
4763 		ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4764 		break;
4765 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4766 		ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4767 		break;
4768 	case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4769 	case PQI_DATA_IN_OUT_BUFFER_ERROR:
4770 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4771 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4772 	case PQI_DATA_IN_OUT_ERROR:
4773 		ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4774 		break;
4775 	case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4776 	case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4777 	case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4778 	case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4779 	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4780 	case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4781 	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4782 	case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4783 	case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4784 	case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4785 		ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4786 		break;
4787 	case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4788 		ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4789 		break;
4790 	case PQI_DATA_IN_OUT_ABORTED:
4791 		ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4792 		break;
4793 	case PQI_DATA_IN_OUT_TIMEOUT:
4794 		ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4795 		break;
4796 	default:
4797 		ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4798 		break;
4799 	}
4800 
4801 	sense_data_length =
4802 		get_unaligned_le16(&pqi_error_info->sense_data_length);
4803 	if (sense_data_length == 0)
4804 		sense_data_length =
4805 		get_unaligned_le16(&pqi_error_info->response_data_length);
4806 	if (sense_data_length)
4807 		if (sense_data_length > sizeof(pqi_error_info->data))
4808 			sense_data_length = sizeof(pqi_error_info->data);
4809 
4810 	ciss_error_info->scsi_status = pqi_error_info->status;
4811 	ciss_error_info->command_status = ciss_cmd_status;
4812 	ciss_error_info->sense_data_length = sense_data_length;
4813 }
4814 
4815 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4816 {
4817 	int rc;
4818 	char *kernel_buffer = NULL;
4819 	u16 iu_length;
4820 	size_t sense_data_length;
4821 	IOCTL_Command_struct iocommand;
4822 	struct pqi_raid_path_request request;
4823 	struct pqi_raid_error_info pqi_error_info;
4824 	struct ciss_error_info ciss_error_info;
4825 
4826 	if (pqi_ctrl_offline(ctrl_info))
4827 		return -ENXIO;
4828 	if (!arg)
4829 		return -EINVAL;
4830 	if (!capable(CAP_SYS_RAWIO))
4831 		return -EPERM;
4832 	if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4833 		return -EFAULT;
4834 	if (iocommand.buf_size < 1 &&
4835 		iocommand.Request.Type.Direction != XFER_NONE)
4836 		return -EINVAL;
4837 	if (iocommand.Request.CDBLen > sizeof(request.cdb))
4838 		return -EINVAL;
4839 	if (iocommand.Request.Type.Type != TYPE_CMD)
4840 		return -EINVAL;
4841 
4842 	switch (iocommand.Request.Type.Direction) {
4843 	case XFER_NONE:
4844 	case XFER_WRITE:
4845 	case XFER_READ:
4846 		break;
4847 	default:
4848 		return -EINVAL;
4849 	}
4850 
4851 	if (iocommand.buf_size > 0) {
4852 		kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4853 		if (!kernel_buffer)
4854 			return -ENOMEM;
4855 		if (iocommand.Request.Type.Direction & XFER_WRITE) {
4856 			if (copy_from_user(kernel_buffer, iocommand.buf,
4857 				iocommand.buf_size)) {
4858 				rc = -EFAULT;
4859 				goto out;
4860 			}
4861 		} else {
4862 			memset(kernel_buffer, 0, iocommand.buf_size);
4863 		}
4864 	}
4865 
4866 	memset(&request, 0, sizeof(request));
4867 
4868 	request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4869 	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4870 		PQI_REQUEST_HEADER_LENGTH;
4871 	memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4872 		sizeof(request.lun_number));
4873 	memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4874 	request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4875 
4876 	switch (iocommand.Request.Type.Direction) {
4877 	case XFER_NONE:
4878 		request.data_direction = SOP_NO_DIRECTION_FLAG;
4879 		break;
4880 	case XFER_WRITE:
4881 		request.data_direction = SOP_WRITE_FLAG;
4882 		break;
4883 	case XFER_READ:
4884 		request.data_direction = SOP_READ_FLAG;
4885 		break;
4886 	}
4887 
4888 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4889 
4890 	if (iocommand.buf_size > 0) {
4891 		put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4892 
4893 		rc = pqi_map_single(ctrl_info->pci_dev,
4894 			&request.sg_descriptors[0], kernel_buffer,
4895 			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4896 		if (rc)
4897 			goto out;
4898 
4899 		iu_length += sizeof(request.sg_descriptors[0]);
4900 	}
4901 
4902 	put_unaligned_le16(iu_length, &request.header.iu_length);
4903 
4904 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4905 		PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4906 
4907 	if (iocommand.buf_size > 0)
4908 		pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4909 			PCI_DMA_BIDIRECTIONAL);
4910 
4911 	memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4912 
4913 	if (rc == 0) {
4914 		pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4915 		iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4916 		iocommand.error_info.CommandStatus =
4917 			ciss_error_info.command_status;
4918 		sense_data_length = ciss_error_info.sense_data_length;
4919 		if (sense_data_length) {
4920 			if (sense_data_length >
4921 				sizeof(iocommand.error_info.SenseInfo))
4922 				sense_data_length =
4923 					sizeof(iocommand.error_info.SenseInfo);
4924 			memcpy(iocommand.error_info.SenseInfo,
4925 				pqi_error_info.data, sense_data_length);
4926 			iocommand.error_info.SenseLen = sense_data_length;
4927 		}
4928 	}
4929 
4930 	if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4931 		rc = -EFAULT;
4932 		goto out;
4933 	}
4934 
4935 	if (rc == 0 && iocommand.buf_size > 0 &&
4936 		(iocommand.Request.Type.Direction & XFER_READ)) {
4937 		if (copy_to_user(iocommand.buf, kernel_buffer,
4938 			iocommand.buf_size)) {
4939 			rc = -EFAULT;
4940 		}
4941 	}
4942 
4943 out:
4944 	kfree(kernel_buffer);
4945 
4946 	return rc;
4947 }
4948 
4949 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4950 {
4951 	int rc;
4952 	struct pqi_ctrl_info *ctrl_info;
4953 
4954 	ctrl_info = shost_to_hba(sdev->host);
4955 
4956 	switch (cmd) {
4957 	case CCISS_DEREGDISK:
4958 	case CCISS_REGNEWDISK:
4959 	case CCISS_REGNEWD:
4960 		rc = pqi_scan_scsi_devices(ctrl_info);
4961 		break;
4962 	case CCISS_GETPCIINFO:
4963 		rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
4964 		break;
4965 	case CCISS_GETDRIVVER:
4966 		rc = pqi_getdrivver_ioctl(arg);
4967 		break;
4968 	case CCISS_PASSTHRU:
4969 		rc = pqi_passthru_ioctl(ctrl_info, arg);
4970 		break;
4971 	default:
4972 		rc = -EINVAL;
4973 		break;
4974 	}
4975 
4976 	return rc;
4977 }
4978 
4979 static ssize_t pqi_version_show(struct device *dev,
4980 	struct device_attribute *attr, char *buffer)
4981 {
4982 	ssize_t count = 0;
4983 	struct Scsi_Host *shost;
4984 	struct pqi_ctrl_info *ctrl_info;
4985 
4986 	shost = class_to_shost(dev);
4987 	ctrl_info = shost_to_hba(shost);
4988 
4989 	count += snprintf(buffer + count, PAGE_SIZE - count,
4990 		"  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
4991 
4992 	count += snprintf(buffer + count, PAGE_SIZE - count,
4993 		"firmware: %s\n", ctrl_info->firmware_version);
4994 
4995 	return count;
4996 }
4997 
4998 static ssize_t pqi_host_rescan_store(struct device *dev,
4999 	struct device_attribute *attr, const char *buffer, size_t count)
5000 {
5001 	struct Scsi_Host *shost = class_to_shost(dev);
5002 
5003 	pqi_scan_start(shost);
5004 
5005 	return count;
5006 }
5007 
5008 static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5009 static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5010 
5011 static struct device_attribute *pqi_shost_attrs[] = {
5012 	&dev_attr_version,
5013 	&dev_attr_rescan,
5014 	NULL
5015 };
5016 
5017 static ssize_t pqi_sas_address_show(struct device *dev,
5018 	struct device_attribute *attr, char *buffer)
5019 {
5020 	struct pqi_ctrl_info *ctrl_info;
5021 	struct scsi_device *sdev;
5022 	struct pqi_scsi_dev *device;
5023 	unsigned long flags;
5024 	u64 sas_address;
5025 
5026 	sdev = to_scsi_device(dev);
5027 	ctrl_info = shost_to_hba(sdev->host);
5028 
5029 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5030 
5031 	device = sdev->hostdata;
5032 	if (pqi_is_logical_device(device)) {
5033 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5034 			flags);
5035 		return -ENODEV;
5036 	}
5037 	sas_address = device->sas_address;
5038 
5039 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5040 
5041 	return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5042 }
5043 
5044 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5045 	struct device_attribute *attr, char *buffer)
5046 {
5047 	struct pqi_ctrl_info *ctrl_info;
5048 	struct scsi_device *sdev;
5049 	struct pqi_scsi_dev *device;
5050 	unsigned long flags;
5051 
5052 	sdev = to_scsi_device(dev);
5053 	ctrl_info = shost_to_hba(sdev->host);
5054 
5055 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5056 
5057 	device = sdev->hostdata;
5058 	buffer[0] = device->offload_enabled ? '1' : '0';
5059 	buffer[1] = '\n';
5060 	buffer[2] = '\0';
5061 
5062 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5063 
5064 	return 2;
5065 }
5066 
5067 static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5068 static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5069 	pqi_ssd_smart_path_enabled_show, NULL);
5070 
5071 static struct device_attribute *pqi_sdev_attrs[] = {
5072 	&dev_attr_sas_address,
5073 	&dev_attr_ssd_smart_path_enabled,
5074 	NULL
5075 };
5076 
5077 static struct scsi_host_template pqi_driver_template = {
5078 	.module = THIS_MODULE,
5079 	.name = DRIVER_NAME_SHORT,
5080 	.proc_name = DRIVER_NAME_SHORT,
5081 	.queuecommand = pqi_scsi_queue_command,
5082 	.scan_start = pqi_scan_start,
5083 	.scan_finished = pqi_scan_finished,
5084 	.this_id = -1,
5085 	.use_clustering = ENABLE_CLUSTERING,
5086 	.eh_device_reset_handler = pqi_eh_device_reset_handler,
5087 	.ioctl = pqi_ioctl,
5088 	.slave_alloc = pqi_slave_alloc,
5089 	.slave_configure = pqi_slave_configure,
5090 	.map_queues = pqi_map_queues,
5091 	.sdev_attrs = pqi_sdev_attrs,
5092 	.shost_attrs = pqi_shost_attrs,
5093 };
5094 
5095 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5096 {
5097 	int rc;
5098 	struct Scsi_Host *shost;
5099 
5100 	shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5101 	if (!shost) {
5102 		dev_err(&ctrl_info->pci_dev->dev,
5103 			"scsi_host_alloc failed for controller %u\n",
5104 			ctrl_info->ctrl_id);
5105 		return -ENOMEM;
5106 	}
5107 
5108 	shost->io_port = 0;
5109 	shost->n_io_port = 0;
5110 	shost->this_id = -1;
5111 	shost->max_channel = PQI_MAX_BUS;
5112 	shost->max_cmd_len = MAX_COMMAND_SIZE;
5113 	shost->max_lun = ~0;
5114 	shost->max_id = ~0;
5115 	shost->max_sectors = ctrl_info->max_sectors;
5116 	shost->can_queue = ctrl_info->scsi_ml_can_queue;
5117 	shost->cmd_per_lun = shost->can_queue;
5118 	shost->sg_tablesize = ctrl_info->sg_tablesize;
5119 	shost->transportt = pqi_sas_transport_template;
5120 	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
5121 	shost->unique_id = shost->irq;
5122 	shost->nr_hw_queues = ctrl_info->num_queue_groups;
5123 	shost->hostdata[0] = (unsigned long)ctrl_info;
5124 
5125 	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5126 	if (rc) {
5127 		dev_err(&ctrl_info->pci_dev->dev,
5128 			"scsi_add_host failed for controller %u\n",
5129 			ctrl_info->ctrl_id);
5130 		goto free_host;
5131 	}
5132 
5133 	rc = pqi_add_sas_host(shost, ctrl_info);
5134 	if (rc) {
5135 		dev_err(&ctrl_info->pci_dev->dev,
5136 			"add SAS host failed for controller %u\n",
5137 			ctrl_info->ctrl_id);
5138 		goto remove_host;
5139 	}
5140 
5141 	ctrl_info->scsi_host = shost;
5142 
5143 	return 0;
5144 
5145 remove_host:
5146 	scsi_remove_host(shost);
5147 free_host:
5148 	scsi_host_put(shost);
5149 
5150 	return rc;
5151 }
5152 
5153 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5154 {
5155 	struct Scsi_Host *shost;
5156 
5157 	pqi_delete_sas_host(ctrl_info);
5158 
5159 	shost = ctrl_info->scsi_host;
5160 	if (!shost)
5161 		return;
5162 
5163 	scsi_remove_host(shost);
5164 	scsi_host_put(shost);
5165 }
5166 
5167 #define PQI_RESET_ACTION_RESET		0x1
5168 
5169 #define PQI_RESET_TYPE_NO_RESET		0x0
5170 #define PQI_RESET_TYPE_SOFT_RESET	0x1
5171 #define PQI_RESET_TYPE_FIRM_RESET	0x2
5172 #define PQI_RESET_TYPE_HARD_RESET	0x3
5173 
5174 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5175 {
5176 	int rc;
5177 	u32 reset_params;
5178 
5179 	reset_params = (PQI_RESET_ACTION_RESET << 5) |
5180 		PQI_RESET_TYPE_HARD_RESET;
5181 
5182 	writel(reset_params,
5183 		&ctrl_info->pqi_registers->device_reset);
5184 
5185 	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5186 	if (rc)
5187 		dev_err(&ctrl_info->pci_dev->dev,
5188 			"PQI reset failed\n");
5189 
5190 	return rc;
5191 }
5192 
5193 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5194 {
5195 	int rc;
5196 	struct bmic_identify_controller *identify;
5197 
5198 	identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5199 	if (!identify)
5200 		return -ENOMEM;
5201 
5202 	rc = pqi_identify_controller(ctrl_info, identify);
5203 	if (rc)
5204 		goto out;
5205 
5206 	memcpy(ctrl_info->firmware_version, identify->firmware_version,
5207 		sizeof(identify->firmware_version));
5208 	ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5209 	snprintf(ctrl_info->firmware_version +
5210 		strlen(ctrl_info->firmware_version),
5211 		sizeof(ctrl_info->firmware_version),
5212 		"-%u", get_unaligned_le16(&identify->firmware_build_number));
5213 
5214 out:
5215 	kfree(identify);
5216 
5217 	return rc;
5218 }
5219 
5220 static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5221 {
5222 	if (!sis_is_firmware_running(ctrl_info))
5223 		return -ENXIO;
5224 
5225 	if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5226 		sis_disable_msix(ctrl_info);
5227 		if (pqi_reset(ctrl_info) == 0)
5228 			sis_reenable_sis_mode(ctrl_info);
5229 	}
5230 
5231 	return 0;
5232 }
5233 
5234 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5235 {
5236 	int rc;
5237 
5238 	if (reset_devices) {
5239 		rc = pqi_kdump_init(ctrl_info);
5240 		if (rc)
5241 			return rc;
5242 	}
5243 
5244 	/*
5245 	 * When the controller comes out of reset, it is always running
5246 	 * in legacy SIS mode.  This is so that it can be compatible
5247 	 * with legacy drivers shipped with OSes.  So we have to talk
5248 	 * to it using SIS commands at first.  Once we are satisified
5249 	 * that the controller supports PQI, we transition it into PQI
5250 	 * mode.
5251 	 */
5252 
5253 	/*
5254 	 * Wait until the controller is ready to start accepting SIS
5255 	 * commands.
5256 	 */
5257 	rc = sis_wait_for_ctrl_ready(ctrl_info);
5258 	if (rc) {
5259 		dev_err(&ctrl_info->pci_dev->dev,
5260 			"error initializing SIS interface\n");
5261 		return rc;
5262 	}
5263 
5264 	/*
5265 	 * Get the controller properties.  This allows us to determine
5266 	 * whether or not it supports PQI mode.
5267 	 */
5268 	rc = sis_get_ctrl_properties(ctrl_info);
5269 	if (rc) {
5270 		dev_err(&ctrl_info->pci_dev->dev,
5271 			"error obtaining controller properties\n");
5272 		return rc;
5273 	}
5274 
5275 	rc = sis_get_pqi_capabilities(ctrl_info);
5276 	if (rc) {
5277 		dev_err(&ctrl_info->pci_dev->dev,
5278 			"error obtaining controller capabilities\n");
5279 		return rc;
5280 	}
5281 
5282 	if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5283 		ctrl_info->max_outstanding_requests =
5284 			PQI_MAX_OUTSTANDING_REQUESTS;
5285 
5286 	pqi_calculate_io_resources(ctrl_info);
5287 
5288 	rc = pqi_alloc_error_buffer(ctrl_info);
5289 	if (rc) {
5290 		dev_err(&ctrl_info->pci_dev->dev,
5291 			"failed to allocate PQI error buffer\n");
5292 		return rc;
5293 	}
5294 
5295 	/*
5296 	 * If the function we are about to call succeeds, the
5297 	 * controller will transition from legacy SIS mode
5298 	 * into PQI mode.
5299 	 */
5300 	rc = sis_init_base_struct_addr(ctrl_info);
5301 	if (rc) {
5302 		dev_err(&ctrl_info->pci_dev->dev,
5303 			"error initializing PQI mode\n");
5304 		return rc;
5305 	}
5306 
5307 	/* Wait for the controller to complete the SIS -> PQI transition. */
5308 	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5309 	if (rc) {
5310 		dev_err(&ctrl_info->pci_dev->dev,
5311 			"transition to PQI mode failed\n");
5312 		return rc;
5313 	}
5314 
5315 	/* From here on, we are running in PQI mode. */
5316 	ctrl_info->pqi_mode_enabled = true;
5317 	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5318 
5319 	rc = pqi_alloc_admin_queues(ctrl_info);
5320 	if (rc) {
5321 		dev_err(&ctrl_info->pci_dev->dev,
5322 			"error allocating admin queues\n");
5323 		return rc;
5324 	}
5325 
5326 	rc = pqi_create_admin_queues(ctrl_info);
5327 	if (rc) {
5328 		dev_err(&ctrl_info->pci_dev->dev,
5329 			"error creating admin queues\n");
5330 		return rc;
5331 	}
5332 
5333 	rc = pqi_report_device_capability(ctrl_info);
5334 	if (rc) {
5335 		dev_err(&ctrl_info->pci_dev->dev,
5336 			"obtaining device capability failed\n");
5337 		return rc;
5338 	}
5339 
5340 	rc = pqi_validate_device_capability(ctrl_info);
5341 	if (rc)
5342 		return rc;
5343 
5344 	pqi_calculate_queue_resources(ctrl_info);
5345 
5346 	rc = pqi_enable_msix_interrupts(ctrl_info);
5347 	if (rc)
5348 		return rc;
5349 
5350 	if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5351 		ctrl_info->max_msix_vectors =
5352 			ctrl_info->num_msix_vectors_enabled;
5353 		pqi_calculate_queue_resources(ctrl_info);
5354 	}
5355 
5356 	rc = pqi_alloc_io_resources(ctrl_info);
5357 	if (rc)
5358 		return rc;
5359 
5360 	rc = pqi_alloc_operational_queues(ctrl_info);
5361 	if (rc)
5362 		return rc;
5363 
5364 	pqi_init_operational_queues(ctrl_info);
5365 
5366 	rc = pqi_request_irqs(ctrl_info);
5367 	if (rc)
5368 		return rc;
5369 
5370 	rc = pqi_create_queues(ctrl_info);
5371 	if (rc)
5372 		return rc;
5373 
5374 	sis_enable_msix(ctrl_info);
5375 
5376 	rc = pqi_configure_events(ctrl_info);
5377 	if (rc) {
5378 		dev_err(&ctrl_info->pci_dev->dev,
5379 			"error configuring events\n");
5380 		return rc;
5381 	}
5382 
5383 	pqi_start_heartbeat_timer(ctrl_info);
5384 
5385 	ctrl_info->controller_online = true;
5386 
5387 	/* Register with the SCSI subsystem. */
5388 	rc = pqi_register_scsi(ctrl_info);
5389 	if (rc)
5390 		return rc;
5391 
5392 	rc = pqi_get_ctrl_firmware_version(ctrl_info);
5393 	if (rc) {
5394 		dev_err(&ctrl_info->pci_dev->dev,
5395 			"error obtaining firmware version\n");
5396 		return rc;
5397 	}
5398 
5399 	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5400 	if (rc) {
5401 		dev_err(&ctrl_info->pci_dev->dev,
5402 			"error updating host wellness\n");
5403 		return rc;
5404 	}
5405 
5406 	pqi_schedule_update_time_worker(ctrl_info);
5407 
5408 	pqi_scan_scsi_devices(ctrl_info);
5409 
5410 	return 0;
5411 }
5412 
5413 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5414 {
5415 	int rc;
5416 	u64 mask;
5417 
5418 	rc = pci_enable_device(ctrl_info->pci_dev);
5419 	if (rc) {
5420 		dev_err(&ctrl_info->pci_dev->dev,
5421 			"failed to enable PCI device\n");
5422 		return rc;
5423 	}
5424 
5425 	if (sizeof(dma_addr_t) > 4)
5426 		mask = DMA_BIT_MASK(64);
5427 	else
5428 		mask = DMA_BIT_MASK(32);
5429 
5430 	rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5431 	if (rc) {
5432 		dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5433 		goto disable_device;
5434 	}
5435 
5436 	rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5437 	if (rc) {
5438 		dev_err(&ctrl_info->pci_dev->dev,
5439 			"failed to obtain PCI resources\n");
5440 		goto disable_device;
5441 	}
5442 
5443 	ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5444 		ctrl_info->pci_dev, 0),
5445 		sizeof(struct pqi_ctrl_registers));
5446 	if (!ctrl_info->iomem_base) {
5447 		dev_err(&ctrl_info->pci_dev->dev,
5448 			"failed to map memory for controller registers\n");
5449 		rc = -ENOMEM;
5450 		goto release_regions;
5451 	}
5452 
5453 	ctrl_info->registers = ctrl_info->iomem_base;
5454 	ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5455 
5456 	/* Enable bus mastering. */
5457 	pci_set_master(ctrl_info->pci_dev);
5458 
5459 	pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5460 
5461 	return 0;
5462 
5463 release_regions:
5464 	pci_release_regions(ctrl_info->pci_dev);
5465 disable_device:
5466 	pci_disable_device(ctrl_info->pci_dev);
5467 
5468 	return rc;
5469 }
5470 
5471 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5472 {
5473 	iounmap(ctrl_info->iomem_base);
5474 	pci_release_regions(ctrl_info->pci_dev);
5475 	pci_disable_device(ctrl_info->pci_dev);
5476 	pci_set_drvdata(ctrl_info->pci_dev, NULL);
5477 }
5478 
5479 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5480 {
5481 	struct pqi_ctrl_info *ctrl_info;
5482 
5483 	ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5484 			GFP_KERNEL, numa_node);
5485 	if (!ctrl_info)
5486 		return NULL;
5487 
5488 	mutex_init(&ctrl_info->scan_mutex);
5489 
5490 	INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5491 	spin_lock_init(&ctrl_info->scsi_device_list_lock);
5492 
5493 	INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5494 	atomic_set(&ctrl_info->num_interrupts, 0);
5495 
5496 	INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5497 	INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5498 
5499 	sema_init(&ctrl_info->sync_request_sem,
5500 		PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5501 	sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5502 
5503 	ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5504 	ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5505 
5506 	return ctrl_info;
5507 }
5508 
5509 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5510 {
5511 	kfree(ctrl_info);
5512 }
5513 
5514 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5515 {
5516 	int i;
5517 
5518 	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
5519 		free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
5520 				&ctrl_info->queue_groups[i]);
5521 	}
5522 
5523 	pci_free_irq_vectors(ctrl_info->pci_dev);
5524 }
5525 
5526 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5527 {
5528 	pqi_stop_heartbeat_timer(ctrl_info);
5529 	pqi_free_interrupts(ctrl_info);
5530 	if (ctrl_info->queue_memory_base)
5531 		dma_free_coherent(&ctrl_info->pci_dev->dev,
5532 			ctrl_info->queue_memory_length,
5533 			ctrl_info->queue_memory_base,
5534 			ctrl_info->queue_memory_base_dma_handle);
5535 	if (ctrl_info->admin_queue_memory_base)
5536 		dma_free_coherent(&ctrl_info->pci_dev->dev,
5537 			ctrl_info->admin_queue_memory_length,
5538 			ctrl_info->admin_queue_memory_base,
5539 			ctrl_info->admin_queue_memory_base_dma_handle);
5540 	pqi_free_all_io_requests(ctrl_info);
5541 	if (ctrl_info->error_buffer)
5542 		dma_free_coherent(&ctrl_info->pci_dev->dev,
5543 			ctrl_info->error_buffer_length,
5544 			ctrl_info->error_buffer,
5545 			ctrl_info->error_buffer_dma_handle);
5546 	if (ctrl_info->iomem_base)
5547 		pqi_cleanup_pci_init(ctrl_info);
5548 	pqi_free_ctrl_info(ctrl_info);
5549 }
5550 
5551 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5552 {
5553 	cancel_delayed_work_sync(&ctrl_info->rescan_work);
5554 	cancel_delayed_work_sync(&ctrl_info->update_time_work);
5555 	pqi_remove_all_scsi_devices(ctrl_info);
5556 	pqi_unregister_scsi(ctrl_info);
5557 
5558 	if (ctrl_info->pqi_mode_enabled) {
5559 		sis_disable_msix(ctrl_info);
5560 		if (pqi_reset(ctrl_info) == 0)
5561 			sis_reenable_sis_mode(ctrl_info);
5562 	}
5563 	pqi_free_ctrl_resources(ctrl_info);
5564 }
5565 
5566 static void pqi_print_ctrl_info(struct pci_dev *pdev,
5567 	const struct pci_device_id *id)
5568 {
5569 	char *ctrl_description;
5570 
5571 	if (id->driver_data) {
5572 		ctrl_description = (char *)id->driver_data;
5573 	} else {
5574 		switch (id->subvendor) {
5575 		case PCI_VENDOR_ID_HP:
5576 			ctrl_description = hpe_branded_controller;
5577 			break;
5578 		case PCI_VENDOR_ID_ADAPTEC2:
5579 		default:
5580 			ctrl_description = microsemi_branded_controller;
5581 			break;
5582 		}
5583 	}
5584 
5585 	dev_info(&pdev->dev, "%s found\n", ctrl_description);
5586 }
5587 
5588 static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5589 {
5590 	int rc;
5591 	int node;
5592 	struct pqi_ctrl_info *ctrl_info;
5593 
5594 	pqi_print_ctrl_info(pdev, id);
5595 
5596 	if (pqi_disable_device_id_wildcards &&
5597 		id->subvendor == PCI_ANY_ID &&
5598 		id->subdevice == PCI_ANY_ID) {
5599 		dev_warn(&pdev->dev,
5600 			"controller not probed because device ID wildcards are disabled\n");
5601 		return -ENODEV;
5602 	}
5603 
5604 	if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5605 		dev_warn(&pdev->dev,
5606 			"controller device ID matched using wildcards\n");
5607 
5608 	node = dev_to_node(&pdev->dev);
5609 	if (node == NUMA_NO_NODE)
5610 		set_dev_node(&pdev->dev, 0);
5611 
5612 	ctrl_info = pqi_alloc_ctrl_info(node);
5613 	if (!ctrl_info) {
5614 		dev_err(&pdev->dev,
5615 			"failed to allocate controller info block\n");
5616 		return -ENOMEM;
5617 	}
5618 
5619 	ctrl_info->pci_dev = pdev;
5620 
5621 	rc = pqi_pci_init(ctrl_info);
5622 	if (rc)
5623 		goto error;
5624 
5625 	rc = pqi_ctrl_init(ctrl_info);
5626 	if (rc)
5627 		goto error;
5628 
5629 	return 0;
5630 
5631 error:
5632 	pqi_remove_ctrl(ctrl_info);
5633 
5634 	return rc;
5635 }
5636 
5637 static void pqi_pci_remove(struct pci_dev *pdev)
5638 {
5639 	struct pqi_ctrl_info *ctrl_info;
5640 
5641 	ctrl_info = pci_get_drvdata(pdev);
5642 	if (!ctrl_info)
5643 		return;
5644 
5645 	pqi_remove_ctrl(ctrl_info);
5646 }
5647 
5648 static void pqi_shutdown(struct pci_dev *pdev)
5649 {
5650 	int rc;
5651 	struct pqi_ctrl_info *ctrl_info;
5652 
5653 	ctrl_info = pci_get_drvdata(pdev);
5654 	if (!ctrl_info)
5655 		goto error;
5656 
5657 	/*
5658 	 * Write all data in the controller's battery-backed cache to
5659 	 * storage.
5660 	 */
5661 	rc = pqi_flush_cache(ctrl_info);
5662 	if (rc == 0)
5663 		return;
5664 
5665 error:
5666 	dev_warn(&pdev->dev,
5667 		"unable to flush controller cache\n");
5668 }
5669 
5670 /* Define the PCI IDs for the controllers that we support. */
5671 static const struct pci_device_id pqi_pci_id_table[] = {
5672 	{
5673 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5674 			       PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5675 	},
5676 	{
5677 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5678 			       PCI_VENDOR_ID_HP, 0x0600)
5679 	},
5680 	{
5681 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5682 			       PCI_VENDOR_ID_HP, 0x0601)
5683 	},
5684 	{
5685 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5686 			       PCI_VENDOR_ID_HP, 0x0602)
5687 	},
5688 	{
5689 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5690 			       PCI_VENDOR_ID_HP, 0x0603)
5691 	},
5692 	{
5693 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5694 			       PCI_VENDOR_ID_HP, 0x0650)
5695 	},
5696 	{
5697 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5698 			       PCI_VENDOR_ID_HP, 0x0651)
5699 	},
5700 	{
5701 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5702 			       PCI_VENDOR_ID_HP, 0x0652)
5703 	},
5704 	{
5705 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5706 			       PCI_VENDOR_ID_HP, 0x0653)
5707 	},
5708 	{
5709 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5710 			       PCI_VENDOR_ID_HP, 0x0654)
5711 	},
5712 	{
5713 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5714 			       PCI_VENDOR_ID_HP, 0x0655)
5715 	},
5716 	{
5717 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5718 			       PCI_VENDOR_ID_HP, 0x0700)
5719 	},
5720 	{
5721 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5722 			       PCI_VENDOR_ID_HP, 0x0701)
5723 	},
5724 	{
5725 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5726 			       PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5727 	},
5728 	{
5729 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5730 			       PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5731 	},
5732 	{
5733 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5734 			       PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5735 	},
5736 	{
5737 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5738 			       PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5739 	},
5740 	{
5741 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5742 			       PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5743 	},
5744 	{
5745 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5746 			       PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5747 	},
5748 	{
5749 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5750 			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5751 	},
5752 	{
5753 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5754 			       PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5755 	},
5756 	{
5757 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5758 			       PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5759 	},
5760 	{
5761 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5762 			       PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5763 	},
5764 	{
5765 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5766 			       PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5767 	},
5768 	{
5769 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5770 			       PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5771 	},
5772 	{
5773 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5774 			       PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5775 	},
5776 	{
5777 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5778 			       PCI_VENDOR_ID_HP, 0x1001)
5779 	},
5780 	{
5781 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5782 			       PCI_VENDOR_ID_HP, 0x1100)
5783 	},
5784 	{
5785 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5786 			       PCI_VENDOR_ID_HP, 0x1101)
5787 	},
5788 	{
5789 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5790 			       PCI_VENDOR_ID_HP, 0x1102)
5791 	},
5792 	{
5793 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5794 			       PCI_VENDOR_ID_HP, 0x1150)
5795 	},
5796 	{
5797 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5798 			       PCI_ANY_ID, PCI_ANY_ID)
5799 	},
5800 	{ 0 }
5801 };
5802 
5803 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5804 
5805 static struct pci_driver pqi_pci_driver = {
5806 	.name = DRIVER_NAME_SHORT,
5807 	.id_table = pqi_pci_id_table,
5808 	.probe = pqi_pci_probe,
5809 	.remove = pqi_pci_remove,
5810 	.shutdown = pqi_shutdown,
5811 };
5812 
5813 static int __init pqi_init(void)
5814 {
5815 	int rc;
5816 
5817 	pr_info(DRIVER_NAME "\n");
5818 
5819 	pqi_sas_transport_template =
5820 		sas_attach_transport(&pqi_sas_transport_functions);
5821 	if (!pqi_sas_transport_template)
5822 		return -ENODEV;
5823 
5824 	rc = pci_register_driver(&pqi_pci_driver);
5825 	if (rc)
5826 		sas_release_transport(pqi_sas_transport_template);
5827 
5828 	return rc;
5829 }
5830 
5831 static void __exit pqi_cleanup(void)
5832 {
5833 	pci_unregister_driver(&pqi_pci_driver);
5834 	sas_release_transport(pqi_sas_transport_template);
5835 }
5836 
5837 module_init(pqi_init);
5838 module_exit(pqi_cleanup);
5839 
5840 static void __attribute__((unused)) verify_structures(void)
5841 {
5842 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5843 		sis_host_to_ctrl_doorbell) != 0x20);
5844 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5845 		sis_interrupt_mask) != 0x34);
5846 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5847 		sis_ctrl_to_host_doorbell) != 0x9c);
5848 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5849 		sis_ctrl_to_host_doorbell_clear) != 0xa0);
5850 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5851 		sis_driver_scratch) != 0xb0);
5852 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5853 		sis_firmware_status) != 0xbc);
5854 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5855 		sis_mailbox) != 0x1000);
5856 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5857 		pqi_registers) != 0x4000);
5858 
5859 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5860 		iu_type) != 0x0);
5861 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5862 		iu_length) != 0x2);
5863 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5864 		response_queue_id) != 0x4);
5865 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5866 		work_area) != 0x6);
5867 	BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5868 
5869 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5870 		status) != 0x0);
5871 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5872 		service_response) != 0x1);
5873 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5874 		data_present) != 0x2);
5875 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5876 		reserved) != 0x3);
5877 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5878 		residual_count) != 0x4);
5879 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5880 		data_length) != 0x8);
5881 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5882 		reserved1) != 0xa);
5883 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5884 		data) != 0xc);
5885 	BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5886 
5887 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5888 		data_in_result) != 0x0);
5889 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5890 		data_out_result) != 0x1);
5891 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5892 		reserved) != 0x2);
5893 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5894 		status) != 0x5);
5895 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5896 		status_qualifier) != 0x6);
5897 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5898 		sense_data_length) != 0x8);
5899 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5900 		response_data_length) != 0xa);
5901 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5902 		data_in_transferred) != 0xc);
5903 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5904 		data_out_transferred) != 0x10);
5905 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5906 		data) != 0x14);
5907 	BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5908 
5909 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5910 		signature) != 0x0);
5911 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5912 		function_and_status_code) != 0x8);
5913 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5914 		max_admin_iq_elements) != 0x10);
5915 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5916 		max_admin_oq_elements) != 0x11);
5917 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5918 		admin_iq_element_length) != 0x12);
5919 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5920 		admin_oq_element_length) != 0x13);
5921 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5922 		max_reset_timeout) != 0x14);
5923 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5924 		legacy_intx_status) != 0x18);
5925 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5926 		legacy_intx_mask_set) != 0x1c);
5927 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5928 		legacy_intx_mask_clear) != 0x20);
5929 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5930 		device_status) != 0x40);
5931 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5932 		admin_iq_pi_offset) != 0x48);
5933 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5934 		admin_oq_ci_offset) != 0x50);
5935 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5936 		admin_iq_element_array_addr) != 0x58);
5937 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5938 		admin_oq_element_array_addr) != 0x60);
5939 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5940 		admin_iq_ci_addr) != 0x68);
5941 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5942 		admin_oq_pi_addr) != 0x70);
5943 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5944 		admin_iq_num_elements) != 0x78);
5945 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5946 		admin_oq_num_elements) != 0x79);
5947 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5948 		admin_queue_int_msg_num) != 0x7a);
5949 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5950 		device_error) != 0x80);
5951 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5952 		error_details) != 0x88);
5953 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5954 		device_reset) != 0x90);
5955 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5956 		power_action) != 0x94);
5957 	BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
5958 
5959 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5960 		header.iu_type) != 0);
5961 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5962 		header.iu_length) != 2);
5963 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5964 		header.work_area) != 6);
5965 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5966 		request_id) != 8);
5967 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5968 		function_code) != 10);
5969 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5970 		data.report_device_capability.buffer_length) != 44);
5971 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5972 		data.report_device_capability.sg_descriptor) != 48);
5973 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5974 		data.create_operational_iq.queue_id) != 12);
5975 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5976 		data.create_operational_iq.element_array_addr) != 16);
5977 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5978 		data.create_operational_iq.ci_addr) != 24);
5979 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5980 		data.create_operational_iq.num_elements) != 32);
5981 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5982 		data.create_operational_iq.element_length) != 34);
5983 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5984 		data.create_operational_iq.queue_protocol) != 36);
5985 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5986 		data.create_operational_oq.queue_id) != 12);
5987 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5988 		data.create_operational_oq.element_array_addr) != 16);
5989 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5990 		data.create_operational_oq.pi_addr) != 24);
5991 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5992 		data.create_operational_oq.num_elements) != 32);
5993 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5994 		data.create_operational_oq.element_length) != 34);
5995 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5996 		data.create_operational_oq.queue_protocol) != 36);
5997 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5998 		data.create_operational_oq.int_msg_num) != 40);
5999 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6000 		data.create_operational_oq.coalescing_count) != 42);
6001 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6002 		data.create_operational_oq.min_coalescing_time) != 44);
6003 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6004 		data.create_operational_oq.max_coalescing_time) != 48);
6005 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6006 		data.delete_operational_queue.queue_id) != 12);
6007 	BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6008 	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6009 		data.create_operational_iq) != 64 - 11);
6010 	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6011 		data.create_operational_oq) != 64 - 11);
6012 	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6013 		data.delete_operational_queue) != 64 - 11);
6014 
6015 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6016 		header.iu_type) != 0);
6017 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6018 		header.iu_length) != 2);
6019 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6020 		header.work_area) != 6);
6021 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6022 		request_id) != 8);
6023 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6024 		function_code) != 10);
6025 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6026 		status) != 11);
6027 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6028 		data.create_operational_iq.status_descriptor) != 12);
6029 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6030 		data.create_operational_iq.iq_pi_offset) != 16);
6031 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6032 		data.create_operational_oq.status_descriptor) != 12);
6033 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6034 		data.create_operational_oq.oq_ci_offset) != 16);
6035 	BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6036 
6037 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6038 		header.iu_type) != 0);
6039 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6040 		header.iu_length) != 2);
6041 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6042 		header.response_queue_id) != 4);
6043 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6044 		header.work_area) != 6);
6045 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6046 		request_id) != 8);
6047 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6048 		nexus_id) != 10);
6049 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6050 		buffer_length) != 12);
6051 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6052 		lun_number) != 16);
6053 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6054 		protocol_specific) != 24);
6055 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6056 		error_index) != 27);
6057 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6058 		cdb) != 32);
6059 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6060 		sg_descriptors) != 64);
6061 	BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6062 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6063 
6064 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6065 		header.iu_type) != 0);
6066 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6067 		header.iu_length) != 2);
6068 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6069 		header.response_queue_id) != 4);
6070 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6071 		header.work_area) != 6);
6072 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6073 		request_id) != 8);
6074 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6075 		nexus_id) != 12);
6076 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6077 		buffer_length) != 16);
6078 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6079 		data_encryption_key_index) != 22);
6080 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6081 		encrypt_tweak_lower) != 24);
6082 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6083 		encrypt_tweak_upper) != 28);
6084 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6085 		cdb) != 32);
6086 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6087 		error_index) != 48);
6088 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6089 		num_sg_descriptors) != 50);
6090 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6091 		cdb_length) != 51);
6092 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6093 		lun_number) != 52);
6094 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6095 		sg_descriptors) != 64);
6096 	BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6097 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6098 
6099 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
6100 		header.iu_type) != 0);
6101 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
6102 		header.iu_length) != 2);
6103 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
6104 		request_id) != 8);
6105 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
6106 		error_index) != 10);
6107 
6108 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6109 		header.iu_type) != 0);
6110 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6111 		header.iu_length) != 2);
6112 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6113 		header.response_queue_id) != 4);
6114 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6115 		request_id) != 8);
6116 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6117 		data.report_event_configuration.buffer_length) != 12);
6118 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6119 		data.report_event_configuration.sg_descriptors) != 16);
6120 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6121 		data.set_event_configuration.global_event_oq_id) != 10);
6122 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6123 		data.set_event_configuration.buffer_length) != 12);
6124 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6125 		data.set_event_configuration.sg_descriptors) != 16);
6126 
6127 	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6128 		max_inbound_iu_length) != 6);
6129 	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6130 		max_outbound_iu_length) != 14);
6131 	BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6132 
6133 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6134 		data_length) != 0);
6135 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6136 		iq_arbitration_priority_support_bitmask) != 8);
6137 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6138 		maximum_aw_a) != 9);
6139 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6140 		maximum_aw_b) != 10);
6141 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6142 		maximum_aw_c) != 11);
6143 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6144 		max_inbound_queues) != 16);
6145 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6146 		max_elements_per_iq) != 18);
6147 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6148 		max_iq_element_length) != 24);
6149 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6150 		min_iq_element_length) != 26);
6151 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6152 		max_outbound_queues) != 30);
6153 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6154 		max_elements_per_oq) != 32);
6155 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6156 		intr_coalescing_time_granularity) != 34);
6157 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6158 		max_oq_element_length) != 36);
6159 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6160 		min_oq_element_length) != 38);
6161 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6162 		iu_layer_descriptors) != 64);
6163 	BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6164 
6165 	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6166 		event_type) != 0);
6167 	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6168 		oq_id) != 2);
6169 	BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6170 
6171 	BUILD_BUG_ON(offsetof(struct pqi_event_config,
6172 		num_event_descriptors) != 2);
6173 	BUILD_BUG_ON(offsetof(struct pqi_event_config,
6174 		descriptors) != 4);
6175 
6176 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
6177 		header.iu_type) != 0);
6178 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
6179 		header.iu_length) != 2);
6180 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
6181 		event_type) != 8);
6182 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
6183 		event_id) != 10);
6184 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
6185 		additional_event_id) != 12);
6186 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
6187 		data) != 16);
6188 	BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6189 
6190 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6191 		header.iu_type) != 0);
6192 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6193 		header.iu_length) != 2);
6194 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6195 		event_type) != 8);
6196 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6197 		event_id) != 10);
6198 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6199 		additional_event_id) != 12);
6200 	BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6201 
6202 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6203 		header.iu_type) != 0);
6204 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6205 		header.iu_length) != 2);
6206 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6207 		request_id) != 8);
6208 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6209 		nexus_id) != 10);
6210 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6211 		lun_number) != 16);
6212 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6213 		protocol_specific) != 24);
6214 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6215 		outbound_queue_id_to_manage) != 26);
6216 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6217 		request_id_to_manage) != 28);
6218 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6219 		task_management_function) != 30);
6220 	BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6221 
6222 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6223 		header.iu_type) != 0);
6224 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6225 		header.iu_length) != 2);
6226 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6227 		request_id) != 8);
6228 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6229 		nexus_id) != 10);
6230 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6231 		additional_response_info) != 12);
6232 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6233 		response_code) != 15);
6234 	BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6235 
6236 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6237 		configured_logical_drive_count) != 0);
6238 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6239 		configuration_signature) != 1);
6240 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6241 		firmware_version) != 5);
6242 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6243 		extended_logical_unit_count) != 154);
6244 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6245 		firmware_build_number) != 190);
6246 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6247 		controller_mode) != 292);
6248 
6249 	BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6250 	BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6251 	BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6252 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6253 	BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6254 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6255 	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6256 	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6257 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6258 	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6259 	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6260 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6261 
6262 	BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6263 }
6264