xref: /openbmc/linux/drivers/scsi/mpt3sas/mpt3sas_ctl.c (revision 7b73a9c8e26ce5769c41d4b787767c10fe7269db)
1 /*
2  * Management Module Support for MPT (Message Passing Technology) based
3  * controllers
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30 
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45 
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
56 
57 #include <linux/io.h>
58 #include <linux/uaccess.h>
59 
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
62 
63 
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66 
67 
68 /**
69  * enum block_state - blocking state
70  * @NON_BLOCKING: non blocking
71  * @BLOCKING: blocking
72  *
73  * These states are for ioctls that need to wait for a response
74  * from firmware, so they probably require sleep.
75  */
76 enum block_state {
77 	NON_BLOCKING,
78 	BLOCKING,
79 };
80 
81 /**
82  * _ctl_display_some_debug - debug routine
83  * @ioc: per adapter object
84  * @smid: system request message index
85  * @calling_function_name: string pass from calling function
86  * @mpi_reply: reply message frame
87  * Context: none.
88  *
89  * Function for displaying debug info helpful when debugging issues
90  * in this module.
91  */
92 static void
93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
94 	char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
95 {
96 	Mpi2ConfigRequest_t *mpi_request;
97 	char *desc = NULL;
98 
99 	if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
100 		return;
101 
102 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
103 	switch (mpi_request->Function) {
104 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
105 	{
106 		Mpi2SCSIIORequest_t *scsi_request =
107 		    (Mpi2SCSIIORequest_t *)mpi_request;
108 
109 		snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110 		    "scsi_io, cmd(0x%02x), cdb_len(%d)",
111 		    scsi_request->CDB.CDB32[0],
112 		    le16_to_cpu(scsi_request->IoFlags) & 0xF);
113 		desc = ioc->tmp_string;
114 		break;
115 	}
116 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
117 		desc = "task_mgmt";
118 		break;
119 	case MPI2_FUNCTION_IOC_INIT:
120 		desc = "ioc_init";
121 		break;
122 	case MPI2_FUNCTION_IOC_FACTS:
123 		desc = "ioc_facts";
124 		break;
125 	case MPI2_FUNCTION_CONFIG:
126 	{
127 		Mpi2ConfigRequest_t *config_request =
128 		    (Mpi2ConfigRequest_t *)mpi_request;
129 
130 		snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131 		    "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132 		    (config_request->Header.PageType &
133 		     MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134 		    config_request->Header.PageNumber);
135 		desc = ioc->tmp_string;
136 		break;
137 	}
138 	case MPI2_FUNCTION_PORT_FACTS:
139 		desc = "port_facts";
140 		break;
141 	case MPI2_FUNCTION_PORT_ENABLE:
142 		desc = "port_enable";
143 		break;
144 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
145 		desc = "event_notification";
146 		break;
147 	case MPI2_FUNCTION_FW_DOWNLOAD:
148 		desc = "fw_download";
149 		break;
150 	case MPI2_FUNCTION_FW_UPLOAD:
151 		desc = "fw_upload";
152 		break;
153 	case MPI2_FUNCTION_RAID_ACTION:
154 		desc = "raid_action";
155 		break;
156 	case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
157 	{
158 		Mpi2SCSIIORequest_t *scsi_request =
159 		    (Mpi2SCSIIORequest_t *)mpi_request;
160 
161 		snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162 		    "raid_pass, cmd(0x%02x), cdb_len(%d)",
163 		    scsi_request->CDB.CDB32[0],
164 		    le16_to_cpu(scsi_request->IoFlags) & 0xF);
165 		desc = ioc->tmp_string;
166 		break;
167 	}
168 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169 		desc = "sas_iounit_cntl";
170 		break;
171 	case MPI2_FUNCTION_SATA_PASSTHROUGH:
172 		desc = "sata_pass";
173 		break;
174 	case MPI2_FUNCTION_DIAG_BUFFER_POST:
175 		desc = "diag_buffer_post";
176 		break;
177 	case MPI2_FUNCTION_DIAG_RELEASE:
178 		desc = "diag_release";
179 		break;
180 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
181 		desc = "smp_passthrough";
182 		break;
183 	}
184 
185 	if (!desc)
186 		return;
187 
188 	ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
189 
190 	if (!mpi_reply)
191 		return;
192 
193 	if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
194 		ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
195 			 le16_to_cpu(mpi_reply->IOCStatus),
196 			 le32_to_cpu(mpi_reply->IOCLogInfo));
197 
198 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
199 	    mpi_request->Function ==
200 	    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
201 		Mpi2SCSIIOReply_t *scsi_reply =
202 		    (Mpi2SCSIIOReply_t *)mpi_reply;
203 		struct _sas_device *sas_device = NULL;
204 		struct _pcie_device *pcie_device = NULL;
205 
206 		sas_device = mpt3sas_get_sdev_by_handle(ioc,
207 		    le16_to_cpu(scsi_reply->DevHandle));
208 		if (sas_device) {
209 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
210 				 (u64)sas_device->sas_address,
211 				 sas_device->phy);
212 			ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
213 				 (u64)sas_device->enclosure_logical_id,
214 				 sas_device->slot);
215 			sas_device_put(sas_device);
216 		}
217 		if (!sas_device) {
218 			pcie_device = mpt3sas_get_pdev_by_handle(ioc,
219 				le16_to_cpu(scsi_reply->DevHandle));
220 			if (pcie_device) {
221 				ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
222 					 (unsigned long long)pcie_device->wwid,
223 					 pcie_device->port_num);
224 				if (pcie_device->enclosure_handle != 0)
225 					ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
226 						 (u64)pcie_device->enclosure_logical_id,
227 						 pcie_device->slot);
228 				pcie_device_put(pcie_device);
229 			}
230 		}
231 		if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
232 			ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
233 				 scsi_reply->SCSIState,
234 				 scsi_reply->SCSIStatus);
235 	}
236 }
237 
238 /**
239  * mpt3sas_ctl_done - ctl module completion routine
240  * @ioc: per adapter object
241  * @smid: system request message index
242  * @msix_index: MSIX table index supplied by the OS
243  * @reply: reply message frame(lower 32bit addr)
244  * Context: none.
245  *
246  * The callback handler when using ioc->ctl_cb_idx.
247  *
248  * Return: 1 meaning mf should be freed from _base_interrupt
249  *         0 means the mf is freed from this function.
250  */
251 u8
252 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
253 	u32 reply)
254 {
255 	MPI2DefaultReply_t *mpi_reply;
256 	Mpi2SCSIIOReply_t *scsiio_reply;
257 	Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
258 	const void *sense_data;
259 	u32 sz;
260 
261 	if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
262 		return 1;
263 	if (ioc->ctl_cmds.smid != smid)
264 		return 1;
265 	ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
266 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
267 	if (mpi_reply) {
268 		memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
269 		ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
270 		/* get sense data */
271 		if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
272 		    mpi_reply->Function ==
273 		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
274 			scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
275 			if (scsiio_reply->SCSIState &
276 			    MPI2_SCSI_STATE_AUTOSENSE_VALID) {
277 				sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
278 				    le32_to_cpu(scsiio_reply->SenseCount));
279 				sense_data = mpt3sas_base_get_sense_buffer(ioc,
280 				    smid);
281 				memcpy(ioc->ctl_cmds.sense, sense_data, sz);
282 			}
283 		}
284 		/*
285 		 * Get Error Response data for NVMe device. The ctl_cmds.sense
286 		 * buffer is used to store the Error Response data.
287 		 */
288 		if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
289 			nvme_error_reply =
290 			    (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
291 			sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
292 			    le16_to_cpu(nvme_error_reply->ErrorResponseCount));
293 			sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
294 			memcpy(ioc->ctl_cmds.sense, sense_data, sz);
295 		}
296 	}
297 
298 	_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
299 	ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
300 	complete(&ioc->ctl_cmds.done);
301 	return 1;
302 }
303 
304 /**
305  * _ctl_check_event_type - determines when an event needs logging
306  * @ioc: per adapter object
307  * @event: firmware event
308  *
309  * The bitmask in ioc->event_type[] indicates which events should be
310  * be saved in the driver event_log.  This bitmask is set by application.
311  *
312  * Return: 1 when event should be captured, or zero means no match.
313  */
314 static int
315 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
316 {
317 	u16 i;
318 	u32 desired_event;
319 
320 	if (event >= 128 || !event || !ioc->event_log)
321 		return 0;
322 
323 	desired_event = (1 << (event % 32));
324 	if (!desired_event)
325 		desired_event = 1;
326 	i = event / 32;
327 	return desired_event & ioc->event_type[i];
328 }
329 
330 /**
331  * mpt3sas_ctl_add_to_event_log - add event
332  * @ioc: per adapter object
333  * @mpi_reply: reply message frame
334  */
335 void
336 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
337 	Mpi2EventNotificationReply_t *mpi_reply)
338 {
339 	struct MPT3_IOCTL_EVENTS *event_log;
340 	u16 event;
341 	int i;
342 	u32 sz, event_data_sz;
343 	u8 send_aen = 0;
344 
345 	if (!ioc->event_log)
346 		return;
347 
348 	event = le16_to_cpu(mpi_reply->Event);
349 
350 	if (_ctl_check_event_type(ioc, event)) {
351 
352 		/* insert entry into circular event_log */
353 		i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
354 		event_log = ioc->event_log;
355 		event_log[i].event = event;
356 		event_log[i].context = ioc->event_context++;
357 
358 		event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
359 		sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
360 		memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
361 		memcpy(event_log[i].data, mpi_reply->EventData, sz);
362 		send_aen = 1;
363 	}
364 
365 	/* This aen_event_read_flag flag is set until the
366 	 * application has read the event log.
367 	 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
368 	 */
369 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
370 	    (send_aen && !ioc->aen_event_read_flag)) {
371 		ioc->aen_event_read_flag = 1;
372 		wake_up_interruptible(&ctl_poll_wait);
373 		if (async_queue)
374 			kill_fasync(&async_queue, SIGIO, POLL_IN);
375 	}
376 }
377 
378 /**
379  * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
380  * @ioc: per adapter object
381  * @msix_index: MSIX table index supplied by the OS
382  * @reply: reply message frame(lower 32bit addr)
383  * Context: interrupt.
384  *
385  * This function merely adds a new work task into ioc->firmware_event_thread.
386  * The tasks are worked from _firmware_event_work in user context.
387  *
388  * Return: 1 meaning mf should be freed from _base_interrupt
389  *         0 means the mf is freed from this function.
390  */
391 u8
392 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
393 	u32 reply)
394 {
395 	Mpi2EventNotificationReply_t *mpi_reply;
396 
397 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
398 	if (mpi_reply)
399 		mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
400 	return 1;
401 }
402 
403 /**
404  * _ctl_verify_adapter - validates ioc_number passed from application
405  * @ioc_number: ?
406  * @iocpp: The ioc pointer is returned in this.
407  * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
408  * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
409  *
410  * Return: (-1) means error, else ioc_number.
411  */
412 static int
413 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
414 							int mpi_version)
415 {
416 	struct MPT3SAS_ADAPTER *ioc;
417 	int version = 0;
418 	/* global ioc lock to protect controller on list operations */
419 	spin_lock(&gioc_lock);
420 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
421 		if (ioc->id != ioc_number)
422 			continue;
423 		/* Check whether this ioctl command is from right
424 		 * ioctl device or not, if not continue the search.
425 		 */
426 		version = ioc->hba_mpi_version_belonged;
427 		/* MPI25_VERSION and MPI26_VERSION uses same ioctl
428 		 * device.
429 		 */
430 		if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
431 			if ((version == MPI25_VERSION) ||
432 				(version == MPI26_VERSION))
433 				goto out;
434 			else
435 				continue;
436 		} else {
437 			if (version != mpi_version)
438 				continue;
439 		}
440 out:
441 		spin_unlock(&gioc_lock);
442 		*iocpp = ioc;
443 		return ioc_number;
444 	}
445 	spin_unlock(&gioc_lock);
446 	*iocpp = NULL;
447 	return -1;
448 }
449 
450 /**
451  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
452  * @ioc: per adapter object
453  *
454  * The handler for doing any required cleanup or initialization.
455  */
456 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
457 {
458 	int i;
459 	u8 issue_reset;
460 
461 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
462 	for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
463 		if (!(ioc->diag_buffer_status[i] &
464 		      MPT3_DIAG_BUFFER_IS_REGISTERED))
465 			continue;
466 		if ((ioc->diag_buffer_status[i] &
467 		     MPT3_DIAG_BUFFER_IS_RELEASED))
468 			continue;
469 
470 		/*
471 		 * add a log message to indicate the release
472 		 */
473 		ioc_info(ioc,
474 		    "%s: Releasing the trace buffer due to adapter reset.",
475 		    __func__);
476 		mpt3sas_send_diag_release(ioc, i, &issue_reset);
477 	}
478 }
479 
480 /**
481  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
482  * @ioc: per adapter object
483  *
484  * The handler for doing any required cleanup or initialization.
485  */
486 void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
487 {
488 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
489 	if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
490 		ioc->ctl_cmds.status |= MPT3_CMD_RESET;
491 		mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
492 		complete(&ioc->ctl_cmds.done);
493 	}
494 }
495 
496 /**
497  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
498  * @ioc: per adapter object
499  *
500  * The handler for doing any required cleanup or initialization.
501  */
502 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
503 {
504 	int i;
505 
506 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
507 
508 	for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
509 		if (!(ioc->diag_buffer_status[i] &
510 		      MPT3_DIAG_BUFFER_IS_REGISTERED))
511 			continue;
512 		if ((ioc->diag_buffer_status[i] &
513 		     MPT3_DIAG_BUFFER_IS_RELEASED))
514 			continue;
515 		ioc->diag_buffer_status[i] |=
516 			MPT3_DIAG_BUFFER_IS_DIAG_RESET;
517 	}
518 }
519 
520 /**
521  * _ctl_fasync -
522  * @fd: ?
523  * @filep: ?
524  * @mode: ?
525  *
526  * Called when application request fasyn callback handler.
527  */
528 static int
529 _ctl_fasync(int fd, struct file *filep, int mode)
530 {
531 	return fasync_helper(fd, filep, mode, &async_queue);
532 }
533 
534 /**
535  * _ctl_poll -
536  * @filep: ?
537  * @wait: ?
538  *
539  */
540 static __poll_t
541 _ctl_poll(struct file *filep, poll_table *wait)
542 {
543 	struct MPT3SAS_ADAPTER *ioc;
544 
545 	poll_wait(filep, &ctl_poll_wait, wait);
546 
547 	/* global ioc lock to protect controller on list operations */
548 	spin_lock(&gioc_lock);
549 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
550 		if (ioc->aen_event_read_flag) {
551 			spin_unlock(&gioc_lock);
552 			return EPOLLIN | EPOLLRDNORM;
553 		}
554 	}
555 	spin_unlock(&gioc_lock);
556 	return 0;
557 }
558 
559 /**
560  * _ctl_set_task_mid - assign an active smid to tm request
561  * @ioc: per adapter object
562  * @karg: (struct mpt3_ioctl_command)
563  * @tm_request: pointer to mf from user space
564  *
565  * Return: 0 when an smid if found, else fail.
566  * during failure, the reply frame is filled.
567  */
568 static int
569 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
570 	Mpi2SCSITaskManagementRequest_t *tm_request)
571 {
572 	u8 found = 0;
573 	u16 smid;
574 	u16 handle;
575 	struct scsi_cmnd *scmd;
576 	struct MPT3SAS_DEVICE *priv_data;
577 	Mpi2SCSITaskManagementReply_t *tm_reply;
578 	u32 sz;
579 	u32 lun;
580 	char *desc = NULL;
581 
582 	if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
583 		desc = "abort_task";
584 	else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
585 		desc = "query_task";
586 	else
587 		return 0;
588 
589 	lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
590 
591 	handle = le16_to_cpu(tm_request->DevHandle);
592 	for (smid = ioc->scsiio_depth; smid && !found; smid--) {
593 		struct scsiio_tracker *st;
594 
595 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
596 		if (!scmd)
597 			continue;
598 		if (lun != scmd->device->lun)
599 			continue;
600 		priv_data = scmd->device->hostdata;
601 		if (priv_data->sas_target == NULL)
602 			continue;
603 		if (priv_data->sas_target->handle != handle)
604 			continue;
605 		st = scsi_cmd_priv(scmd);
606 
607 		/*
608 		 * If the given TaskMID from the user space is zero, then the
609 		 * first outstanding smid will be picked up.  Otherwise,
610 		 * targeted smid will be the one.
611 		 */
612 		if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
613 			tm_request->TaskMID = cpu_to_le16(st->smid);
614 			found = 1;
615 		}
616 	}
617 
618 	if (!found) {
619 		dctlprintk(ioc,
620 			   ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
621 				    desc, le16_to_cpu(tm_request->DevHandle),
622 				    lun));
623 		tm_reply = ioc->ctl_cmds.reply;
624 		tm_reply->DevHandle = tm_request->DevHandle;
625 		tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
626 		tm_reply->TaskType = tm_request->TaskType;
627 		tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
628 		tm_reply->VP_ID = tm_request->VP_ID;
629 		tm_reply->VF_ID = tm_request->VF_ID;
630 		sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
631 		if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
632 		    sz))
633 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
634 			    __LINE__, __func__);
635 		return 1;
636 	}
637 
638 	dctlprintk(ioc,
639 		   ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
640 			    desc, le16_to_cpu(tm_request->DevHandle), lun,
641 			    le16_to_cpu(tm_request->TaskMID)));
642 	return 0;
643 }
644 
645 /**
646  * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
647  * @ioc: per adapter object
648  * @karg: (struct mpt3_ioctl_command)
649  * @mf: pointer to mf in user space
650  */
651 static long
652 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
653 	void __user *mf)
654 {
655 	MPI2RequestHeader_t *mpi_request = NULL, *request;
656 	MPI2DefaultReply_t *mpi_reply;
657 	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
658 	struct _pcie_device *pcie_device = NULL;
659 	u16 smid;
660 	u8 timeout;
661 	u8 issue_reset;
662 	u32 sz, sz_arg;
663 	void *psge;
664 	void *data_out = NULL;
665 	dma_addr_t data_out_dma = 0;
666 	size_t data_out_sz = 0;
667 	void *data_in = NULL;
668 	dma_addr_t data_in_dma = 0;
669 	size_t data_in_sz = 0;
670 	long ret;
671 	u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
672 
673 	issue_reset = 0;
674 
675 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
676 		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
677 		ret = -EAGAIN;
678 		goto out;
679 	}
680 
681 	ret = mpt3sas_wait_for_ioc(ioc,	IOC_OPERATIONAL_WAIT_COUNT);
682 	if (ret)
683 		goto out;
684 
685 	mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
686 	if (!mpi_request) {
687 		ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
688 			__func__);
689 		ret = -ENOMEM;
690 		goto out;
691 	}
692 
693 	/* Check for overflow and wraparound */
694 	if (karg.data_sge_offset * 4 > ioc->request_sz ||
695 	    karg.data_sge_offset > (UINT_MAX / 4)) {
696 		ret = -EINVAL;
697 		goto out;
698 	}
699 
700 	/* copy in request message frame from user */
701 	if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
702 		pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
703 		    __func__);
704 		ret = -EFAULT;
705 		goto out;
706 	}
707 
708 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
709 		smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
710 		if (!smid) {
711 			ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
712 			ret = -EAGAIN;
713 			goto out;
714 		}
715 	} else {
716 		/* Use first reserved smid for passthrough ioctls */
717 		smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
718 	}
719 
720 	ret = 0;
721 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
722 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
723 	request = mpt3sas_base_get_msg_frame(ioc, smid);
724 	memset(request, 0, ioc->request_sz);
725 	memcpy(request, mpi_request, karg.data_sge_offset*4);
726 	ioc->ctl_cmds.smid = smid;
727 	data_out_sz = karg.data_out_size;
728 	data_in_sz = karg.data_in_size;
729 
730 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
731 	    mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
732 	    mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
733 	    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
734 	    mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
735 
736 		device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
737 		if (!device_handle || (device_handle >
738 		    ioc->facts.MaxDevHandle)) {
739 			ret = -EINVAL;
740 			mpt3sas_base_free_smid(ioc, smid);
741 			goto out;
742 		}
743 	}
744 
745 	/* obtain dma-able memory for data transfer */
746 	if (data_out_sz) /* WRITE */ {
747 		data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
748 				&data_out_dma, GFP_KERNEL);
749 		if (!data_out) {
750 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
751 			    __LINE__, __func__);
752 			ret = -ENOMEM;
753 			mpt3sas_base_free_smid(ioc, smid);
754 			goto out;
755 		}
756 		if (copy_from_user(data_out, karg.data_out_buf_ptr,
757 			data_out_sz)) {
758 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
759 			    __LINE__, __func__);
760 			ret =  -EFAULT;
761 			mpt3sas_base_free_smid(ioc, smid);
762 			goto out;
763 		}
764 	}
765 
766 	if (data_in_sz) /* READ */ {
767 		data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
768 				&data_in_dma, GFP_KERNEL);
769 		if (!data_in) {
770 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
771 			    __LINE__, __func__);
772 			ret = -ENOMEM;
773 			mpt3sas_base_free_smid(ioc, smid);
774 			goto out;
775 		}
776 	}
777 
778 	psge = (void *)request + (karg.data_sge_offset*4);
779 
780 	/* send command to firmware */
781 	_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
782 
783 	init_completion(&ioc->ctl_cmds.done);
784 	switch (mpi_request->Function) {
785 	case MPI2_FUNCTION_NVME_ENCAPSULATED:
786 	{
787 		nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
788 		if (!ioc->pcie_sg_lookup) {
789 			dtmprintk(ioc, ioc_info(ioc,
790 			    "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
791 			    ));
792 
793 			if (ioc->logging_level & MPT_DEBUG_TM)
794 				_debug_dump_mf(nvme_encap_request,
795 				    ioc->request_sz/4);
796 			mpt3sas_base_free_smid(ioc, smid);
797 			ret = -EINVAL;
798 			goto out;
799 		}
800 		/*
801 		 * Get the Physical Address of the sense buffer.
802 		 * Use Error Response buffer address field to hold the sense
803 		 * buffer address.
804 		 * Clear the internal sense buffer, which will potentially hold
805 		 * the Completion Queue Entry on return, or 0 if no Entry.
806 		 * Build the PRPs and set direction bits.
807 		 * Send the request.
808 		 */
809 		nvme_encap_request->ErrorResponseBaseAddress =
810 		    cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
811 		nvme_encap_request->ErrorResponseBaseAddress |=
812 		   cpu_to_le64(le32_to_cpu(
813 		   mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
814 		nvme_encap_request->ErrorResponseAllocationLength =
815 					cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
816 		memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
817 		ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
818 		    data_out_dma, data_out_sz, data_in_dma, data_in_sz);
819 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
820 			dtmprintk(ioc,
821 				  ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
822 					   device_handle));
823 			mpt3sas_base_free_smid(ioc, smid);
824 			ret = -EINVAL;
825 			goto out;
826 		}
827 		mpt3sas_base_put_smid_nvme_encap(ioc, smid);
828 		break;
829 	}
830 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
831 	case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
832 	{
833 		Mpi2SCSIIORequest_t *scsiio_request =
834 		    (Mpi2SCSIIORequest_t *)request;
835 		scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
836 		scsiio_request->SenseBufferLowAddress =
837 		    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
838 		memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
839 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
840 			dtmprintk(ioc,
841 				  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
842 					   device_handle));
843 			mpt3sas_base_free_smid(ioc, smid);
844 			ret = -EINVAL;
845 			goto out;
846 		}
847 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
848 		    data_in_dma, data_in_sz);
849 		if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
850 			ioc->put_smid_scsi_io(ioc, smid, device_handle);
851 		else
852 			ioc->put_smid_default(ioc, smid);
853 		break;
854 	}
855 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
856 	{
857 		Mpi2SCSITaskManagementRequest_t *tm_request =
858 		    (Mpi2SCSITaskManagementRequest_t *)request;
859 
860 		dtmprintk(ioc,
861 			  ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
862 				   le16_to_cpu(tm_request->DevHandle),
863 				   tm_request->TaskType));
864 		ioc->got_task_abort_from_ioctl = 1;
865 		if (tm_request->TaskType ==
866 		    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
867 		    tm_request->TaskType ==
868 		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
869 			if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
870 				mpt3sas_base_free_smid(ioc, smid);
871 				ioc->got_task_abort_from_ioctl = 0;
872 				goto out;
873 			}
874 		}
875 		ioc->got_task_abort_from_ioctl = 0;
876 
877 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
878 			dtmprintk(ioc,
879 				  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
880 					   device_handle));
881 			mpt3sas_base_free_smid(ioc, smid);
882 			ret = -EINVAL;
883 			goto out;
884 		}
885 		mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
886 		    tm_request->DevHandle));
887 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
888 		    data_in_dma, data_in_sz);
889 		ioc->put_smid_hi_priority(ioc, smid, 0);
890 		break;
891 	}
892 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
893 	{
894 		Mpi2SmpPassthroughRequest_t *smp_request =
895 		    (Mpi2SmpPassthroughRequest_t *)mpi_request;
896 		u8 *data;
897 
898 		/* ioc determines which port to use */
899 		smp_request->PhysicalPort = 0xFF;
900 		if (smp_request->PassthroughFlags &
901 		    MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
902 			data = (u8 *)&smp_request->SGL;
903 		else {
904 			if (unlikely(data_out == NULL)) {
905 				pr_err("failure at %s:%d/%s()!\n",
906 				    __FILE__, __LINE__, __func__);
907 				mpt3sas_base_free_smid(ioc, smid);
908 				ret = -EINVAL;
909 				goto out;
910 			}
911 			data = data_out;
912 		}
913 
914 		if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
915 			ioc->ioc_link_reset_in_progress = 1;
916 			ioc->ignore_loginfos = 1;
917 		}
918 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
919 		    data_in_sz);
920 		ioc->put_smid_default(ioc, smid);
921 		break;
922 	}
923 	case MPI2_FUNCTION_SATA_PASSTHROUGH:
924 	{
925 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
926 			dtmprintk(ioc,
927 				  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
928 					   device_handle));
929 			mpt3sas_base_free_smid(ioc, smid);
930 			ret = -EINVAL;
931 			goto out;
932 		}
933 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
934 		    data_in_sz);
935 		ioc->put_smid_default(ioc, smid);
936 		break;
937 	}
938 	case MPI2_FUNCTION_FW_DOWNLOAD:
939 	case MPI2_FUNCTION_FW_UPLOAD:
940 	{
941 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
942 		    data_in_sz);
943 		ioc->put_smid_default(ioc, smid);
944 		break;
945 	}
946 	case MPI2_FUNCTION_TOOLBOX:
947 	{
948 		Mpi2ToolboxCleanRequest_t *toolbox_request =
949 			(Mpi2ToolboxCleanRequest_t *)mpi_request;
950 
951 		if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
952 		    || (toolbox_request->Tool ==
953 		    MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
954 			ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
955 				data_in_dma, data_in_sz);
956 		else if (toolbox_request->Tool ==
957 				MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
958 			Mpi2ToolboxMemMoveRequest_t *mem_move_request =
959 					(Mpi2ToolboxMemMoveRequest_t *)request;
960 			Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
961 
962 			ioc->build_sg_mpi(ioc, psge, data_out_dma,
963 					data_out_sz, data_in_dma, data_in_sz);
964 			if (data_out_sz && !data_in_sz) {
965 				dst =
966 				    (Mpi2SGESimple64_t *)&mem_move_request->SGL;
967 				src = (void *)dst + ioc->sge_size;
968 
969 				memcpy(&tmp, src, ioc->sge_size);
970 				memcpy(src, dst, ioc->sge_size);
971 				memcpy(dst, &tmp, ioc->sge_size);
972 			}
973 			if (ioc->logging_level & MPT_DEBUG_TM) {
974 				ioc_info(ioc,
975 				  "Mpi2ToolboxMemMoveRequest_t request msg\n");
976 				_debug_dump_mf(mem_move_request,
977 							ioc->request_sz/4);
978 			}
979 		} else
980 			ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
981 			    data_in_dma, data_in_sz);
982 		ioc->put_smid_default(ioc, smid);
983 		break;
984 	}
985 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
986 	{
987 		Mpi2SasIoUnitControlRequest_t *sasiounit_request =
988 		    (Mpi2SasIoUnitControlRequest_t *)mpi_request;
989 
990 		if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
991 		    || sasiounit_request->Operation ==
992 		    MPI2_SAS_OP_PHY_LINK_RESET) {
993 			ioc->ioc_link_reset_in_progress = 1;
994 			ioc->ignore_loginfos = 1;
995 		}
996 		/* drop to default case for posting the request */
997 	}
998 		/* fall through */
999 	default:
1000 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
1001 		    data_in_dma, data_in_sz);
1002 		ioc->put_smid_default(ioc, smid);
1003 		break;
1004 	}
1005 
1006 	if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
1007 		timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
1008 	else
1009 		timeout = karg.timeout;
1010 	wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
1011 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
1012 		Mpi2SCSITaskManagementRequest_t *tm_request =
1013 		    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
1014 		mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
1015 		    tm_request->DevHandle));
1016 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
1017 	} else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
1018 	    mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
1019 		ioc->ioc_link_reset_in_progress) {
1020 		ioc->ioc_link_reset_in_progress = 0;
1021 		ioc->ignore_loginfos = 0;
1022 	}
1023 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1024 		issue_reset =
1025 			mpt3sas_base_check_cmd_timeout(ioc,
1026 				ioc->ctl_cmds.status, mpi_request,
1027 				karg.data_sge_offset);
1028 		goto issue_host_reset;
1029 	}
1030 
1031 	mpi_reply = ioc->ctl_cmds.reply;
1032 
1033 	if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
1034 	    (ioc->logging_level & MPT_DEBUG_TM)) {
1035 		Mpi2SCSITaskManagementReply_t *tm_reply =
1036 		    (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1037 
1038 		ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
1039 			 le16_to_cpu(tm_reply->IOCStatus),
1040 			 le32_to_cpu(tm_reply->IOCLogInfo),
1041 			 le32_to_cpu(tm_reply->TerminationCount));
1042 	}
1043 
1044 	/* copy out xdata to user */
1045 	if (data_in_sz) {
1046 		if (copy_to_user(karg.data_in_buf_ptr, data_in,
1047 		    data_in_sz)) {
1048 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
1049 			    __LINE__, __func__);
1050 			ret = -ENODATA;
1051 			goto out;
1052 		}
1053 	}
1054 
1055 	/* copy out reply message frame to user */
1056 	if (karg.max_reply_bytes) {
1057 		sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1058 		if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1059 		    sz)) {
1060 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
1061 			    __LINE__, __func__);
1062 			ret = -ENODATA;
1063 			goto out;
1064 		}
1065 	}
1066 
1067 	/* copy out sense/NVMe Error Response to user */
1068 	if (karg.max_sense_bytes && (mpi_request->Function ==
1069 	    MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1070 	    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1071 	    MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1072 		if (karg.sense_data_ptr == NULL) {
1073 			ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
1074 			goto out;
1075 		}
1076 		sz_arg = (mpi_request->Function ==
1077 		MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1078 							SCSI_SENSE_BUFFERSIZE;
1079 		sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1080 		if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1081 		    sz)) {
1082 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
1083 				__LINE__, __func__);
1084 			ret = -ENODATA;
1085 			goto out;
1086 		}
1087 	}
1088 
1089  issue_host_reset:
1090 	if (issue_reset) {
1091 		ret = -ENODATA;
1092 		if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1093 		    mpi_request->Function ==
1094 		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1095 		    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1096 			ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
1097 				 le16_to_cpu(mpi_request->FunctionDependent1));
1098 			mpt3sas_halt_firmware(ioc);
1099 			pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1100 				le16_to_cpu(mpi_request->FunctionDependent1));
1101 			if (pcie_device && (!ioc->tm_custom_handling) &&
1102 			    (!(mpt3sas_scsih_is_pcie_scsi_device(
1103 			    pcie_device->device_info))))
1104 				mpt3sas_scsih_issue_locked_tm(ioc,
1105 				  le16_to_cpu(mpi_request->FunctionDependent1),
1106 				  0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1107 				  0, pcie_device->reset_timeout,
1108 			MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
1109 			else
1110 				mpt3sas_scsih_issue_locked_tm(ioc,
1111 				  le16_to_cpu(mpi_request->FunctionDependent1),
1112 				  0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1113 				  0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1114 		} else
1115 			mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1116 	}
1117 
1118  out:
1119 	if (pcie_device)
1120 		pcie_device_put(pcie_device);
1121 
1122 	/* free memory associated with sg buffers */
1123 	if (data_in)
1124 		dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
1125 		    data_in_dma);
1126 
1127 	if (data_out)
1128 		dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
1129 		    data_out_dma);
1130 
1131 	kfree(mpi_request);
1132 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1133 	return ret;
1134 }
1135 
1136 /**
1137  * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1138  * @ioc: per adapter object
1139  * @arg: user space buffer containing ioctl content
1140  */
1141 static long
1142 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1143 {
1144 	struct mpt3_ioctl_iocinfo karg;
1145 
1146 	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1147 				 __func__));
1148 
1149 	memset(&karg, 0 , sizeof(karg));
1150 	if (ioc->pfacts)
1151 		karg.port_number = ioc->pfacts[0].PortNumber;
1152 	karg.hw_rev = ioc->pdev->revision;
1153 	karg.pci_id = ioc->pdev->device;
1154 	karg.subsystem_device = ioc->pdev->subsystem_device;
1155 	karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1156 	karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1157 	karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1158 	karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1159 	karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1160 	karg.firmware_version = ioc->facts.FWVersion.Word;
1161 	strcpy(karg.driver_version, ioc->driver_name);
1162 	strcat(karg.driver_version, "-");
1163 	switch  (ioc->hba_mpi_version_belonged) {
1164 	case MPI2_VERSION:
1165 		if (ioc->is_warpdrive)
1166 			karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1167 		else
1168 			karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1169 		strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1170 		break;
1171 	case MPI25_VERSION:
1172 	case MPI26_VERSION:
1173 		if (ioc->is_gen35_ioc)
1174 			karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1175 		else
1176 			karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1177 		strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1178 		break;
1179 	}
1180 	karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1181 
1182 	if (copy_to_user(arg, &karg, sizeof(karg))) {
1183 		pr_err("failure at %s:%d/%s()!\n",
1184 		    __FILE__, __LINE__, __func__);
1185 		return -EFAULT;
1186 	}
1187 	return 0;
1188 }
1189 
1190 /**
1191  * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1192  * @ioc: per adapter object
1193  * @arg: user space buffer containing ioctl content
1194  */
1195 static long
1196 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1197 {
1198 	struct mpt3_ioctl_eventquery karg;
1199 
1200 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1201 		pr_err("failure at %s:%d/%s()!\n",
1202 		    __FILE__, __LINE__, __func__);
1203 		return -EFAULT;
1204 	}
1205 
1206 	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1207 				 __func__));
1208 
1209 	karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1210 	memcpy(karg.event_types, ioc->event_type,
1211 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1212 
1213 	if (copy_to_user(arg, &karg, sizeof(karg))) {
1214 		pr_err("failure at %s:%d/%s()!\n",
1215 		    __FILE__, __LINE__, __func__);
1216 		return -EFAULT;
1217 	}
1218 	return 0;
1219 }
1220 
1221 /**
1222  * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1223  * @ioc: per adapter object
1224  * @arg: user space buffer containing ioctl content
1225  */
1226 static long
1227 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1228 {
1229 	struct mpt3_ioctl_eventenable karg;
1230 
1231 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1232 		pr_err("failure at %s:%d/%s()!\n",
1233 		    __FILE__, __LINE__, __func__);
1234 		return -EFAULT;
1235 	}
1236 
1237 	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1238 				 __func__));
1239 
1240 	memcpy(ioc->event_type, karg.event_types,
1241 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1242 	mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1243 
1244 	if (ioc->event_log)
1245 		return 0;
1246 	/* initialize event_log */
1247 	ioc->event_context = 0;
1248 	ioc->aen_event_read_flag = 0;
1249 	ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1250 	    sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1251 	if (!ioc->event_log) {
1252 		pr_err("failure at %s:%d/%s()!\n",
1253 		    __FILE__, __LINE__, __func__);
1254 		return -ENOMEM;
1255 	}
1256 	return 0;
1257 }
1258 
1259 /**
1260  * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1261  * @ioc: per adapter object
1262  * @arg: user space buffer containing ioctl content
1263  */
1264 static long
1265 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1266 {
1267 	struct mpt3_ioctl_eventreport karg;
1268 	u32 number_bytes, max_events, max;
1269 	struct mpt3_ioctl_eventreport __user *uarg = arg;
1270 
1271 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1272 		pr_err("failure at %s:%d/%s()!\n",
1273 		    __FILE__, __LINE__, __func__);
1274 		return -EFAULT;
1275 	}
1276 
1277 	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1278 				 __func__));
1279 
1280 	number_bytes = karg.hdr.max_data_size -
1281 	    sizeof(struct mpt3_ioctl_header);
1282 	max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1283 	max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1284 
1285 	/* If fewer than 1 event is requested, there must have
1286 	 * been some type of error.
1287 	 */
1288 	if (!max || !ioc->event_log)
1289 		return -ENODATA;
1290 
1291 	number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1292 	if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1293 		pr_err("failure at %s:%d/%s()!\n",
1294 		    __FILE__, __LINE__, __func__);
1295 		return -EFAULT;
1296 	}
1297 
1298 	/* reset flag so SIGIO can restart */
1299 	ioc->aen_event_read_flag = 0;
1300 	return 0;
1301 }
1302 
1303 /**
1304  * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1305  * @ioc: per adapter object
1306  * @arg: user space buffer containing ioctl content
1307  */
1308 static long
1309 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1310 {
1311 	struct mpt3_ioctl_diag_reset karg;
1312 	int retval;
1313 
1314 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1315 		pr_err("failure at %s:%d/%s()!\n",
1316 		    __FILE__, __LINE__, __func__);
1317 		return -EFAULT;
1318 	}
1319 
1320 	if (ioc->shost_recovery || ioc->pci_error_recovery ||
1321 	    ioc->is_driver_loading)
1322 		return -EAGAIN;
1323 
1324 	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1325 				 __func__));
1326 
1327 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1328 	ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
1329 	return 0;
1330 }
1331 
1332 /**
1333  * _ctl_btdh_search_sas_device - searching for sas device
1334  * @ioc: per adapter object
1335  * @btdh: btdh ioctl payload
1336  */
1337 static int
1338 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1339 	struct mpt3_ioctl_btdh_mapping *btdh)
1340 {
1341 	struct _sas_device *sas_device;
1342 	unsigned long flags;
1343 	int rc = 0;
1344 
1345 	if (list_empty(&ioc->sas_device_list))
1346 		return rc;
1347 
1348 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1349 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1350 		if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1351 		    btdh->handle == sas_device->handle) {
1352 			btdh->bus = sas_device->channel;
1353 			btdh->id = sas_device->id;
1354 			rc = 1;
1355 			goto out;
1356 		} else if (btdh->bus == sas_device->channel && btdh->id ==
1357 		    sas_device->id && btdh->handle == 0xFFFF) {
1358 			btdh->handle = sas_device->handle;
1359 			rc = 1;
1360 			goto out;
1361 		}
1362 	}
1363  out:
1364 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1365 	return rc;
1366 }
1367 
1368 /**
1369  * _ctl_btdh_search_pcie_device - searching for pcie device
1370  * @ioc: per adapter object
1371  * @btdh: btdh ioctl payload
1372  */
1373 static int
1374 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1375 	struct mpt3_ioctl_btdh_mapping *btdh)
1376 {
1377 	struct _pcie_device *pcie_device;
1378 	unsigned long flags;
1379 	int rc = 0;
1380 
1381 	if (list_empty(&ioc->pcie_device_list))
1382 		return rc;
1383 
1384 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1385 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1386 		if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1387 			   btdh->handle == pcie_device->handle) {
1388 			btdh->bus = pcie_device->channel;
1389 			btdh->id = pcie_device->id;
1390 			rc = 1;
1391 			goto out;
1392 		} else if (btdh->bus == pcie_device->channel && btdh->id ==
1393 			   pcie_device->id && btdh->handle == 0xFFFF) {
1394 			btdh->handle = pcie_device->handle;
1395 			rc = 1;
1396 			goto out;
1397 		}
1398 	}
1399  out:
1400 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1401 	return rc;
1402 }
1403 
1404 /**
1405  * _ctl_btdh_search_raid_device - searching for raid device
1406  * @ioc: per adapter object
1407  * @btdh: btdh ioctl payload
1408  */
1409 static int
1410 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1411 	struct mpt3_ioctl_btdh_mapping *btdh)
1412 {
1413 	struct _raid_device *raid_device;
1414 	unsigned long flags;
1415 	int rc = 0;
1416 
1417 	if (list_empty(&ioc->raid_device_list))
1418 		return rc;
1419 
1420 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1421 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1422 		if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1423 		    btdh->handle == raid_device->handle) {
1424 			btdh->bus = raid_device->channel;
1425 			btdh->id = raid_device->id;
1426 			rc = 1;
1427 			goto out;
1428 		} else if (btdh->bus == raid_device->channel && btdh->id ==
1429 		    raid_device->id && btdh->handle == 0xFFFF) {
1430 			btdh->handle = raid_device->handle;
1431 			rc = 1;
1432 			goto out;
1433 		}
1434 	}
1435  out:
1436 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1437 	return rc;
1438 }
1439 
1440 /**
1441  * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1442  * @ioc: per adapter object
1443  * @arg: user space buffer containing ioctl content
1444  */
1445 static long
1446 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1447 {
1448 	struct mpt3_ioctl_btdh_mapping karg;
1449 	int rc;
1450 
1451 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1452 		pr_err("failure at %s:%d/%s()!\n",
1453 		    __FILE__, __LINE__, __func__);
1454 		return -EFAULT;
1455 	}
1456 
1457 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
1458 				 __func__));
1459 
1460 	rc = _ctl_btdh_search_sas_device(ioc, &karg);
1461 	if (!rc)
1462 		rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1463 	if (!rc)
1464 		_ctl_btdh_search_raid_device(ioc, &karg);
1465 
1466 	if (copy_to_user(arg, &karg, sizeof(karg))) {
1467 		pr_err("failure at %s:%d/%s()!\n",
1468 		    __FILE__, __LINE__, __func__);
1469 		return -EFAULT;
1470 	}
1471 	return 0;
1472 }
1473 
1474 /**
1475  * _ctl_diag_capability - return diag buffer capability
1476  * @ioc: per adapter object
1477  * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1478  *
1479  * returns 1 when diag buffer support is enabled in firmware
1480  */
1481 static u8
1482 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1483 {
1484 	u8 rc = 0;
1485 
1486 	switch (buffer_type) {
1487 	case MPI2_DIAG_BUF_TYPE_TRACE:
1488 		if (ioc->facts.IOCCapabilities &
1489 		    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1490 			rc = 1;
1491 		break;
1492 	case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1493 		if (ioc->facts.IOCCapabilities &
1494 		    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1495 			rc = 1;
1496 		break;
1497 	case MPI2_DIAG_BUF_TYPE_EXTENDED:
1498 		if (ioc->facts.IOCCapabilities &
1499 		    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1500 			rc = 1;
1501 	}
1502 
1503 	return rc;
1504 }
1505 
1506 /**
1507  * _ctl_diag_get_bufftype - return diag buffer type
1508  *              either TRACE, SNAPSHOT, or EXTENDED
1509  * @ioc: per adapter object
1510  * @unique_id: specifies the unique_id for the buffer
1511  *
1512  * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
1513  */
1514 static u8
1515 _ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
1516 {
1517 	u8  index;
1518 
1519 	for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
1520 		if (ioc->unique_id[index] == unique_id)
1521 			return index;
1522 	}
1523 
1524 	return MPT3_DIAG_UID_NOT_FOUND;
1525 }
1526 
1527 /**
1528  * _ctl_diag_register_2 - wrapper for registering diag buffer support
1529  * @ioc: per adapter object
1530  * @diag_register: the diag_register struct passed in from user space
1531  *
1532  */
1533 static long
1534 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1535 	struct mpt3_diag_register *diag_register)
1536 {
1537 	int rc, i;
1538 	void *request_data = NULL;
1539 	dma_addr_t request_data_dma;
1540 	u32 request_data_sz = 0;
1541 	Mpi2DiagBufferPostRequest_t *mpi_request;
1542 	Mpi2DiagBufferPostReply_t *mpi_reply;
1543 	u8 buffer_type;
1544 	u16 smid;
1545 	u16 ioc_status;
1546 	u32 ioc_state;
1547 	u8 issue_reset = 0;
1548 
1549 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
1550 				 __func__));
1551 
1552 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1553 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1554 		ioc_err(ioc, "%s: failed due to ioc not operational\n",
1555 			__func__);
1556 		rc = -EAGAIN;
1557 		goto out;
1558 	}
1559 
1560 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1561 		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1562 		rc = -EAGAIN;
1563 		goto out;
1564 	}
1565 
1566 	buffer_type = diag_register->buffer_type;
1567 	if (!_ctl_diag_capability(ioc, buffer_type)) {
1568 		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1569 			__func__, buffer_type);
1570 		return -EPERM;
1571 	}
1572 
1573 	if (diag_register->unique_id == 0) {
1574 		ioc_err(ioc,
1575 		    "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
1576 		    diag_register->unique_id, buffer_type);
1577 		return -EINVAL;
1578 	}
1579 
1580 	if ((ioc->diag_buffer_status[buffer_type] &
1581 	    MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
1582 	    !(ioc->diag_buffer_status[buffer_type] &
1583 	    MPT3_DIAG_BUFFER_IS_RELEASED)) {
1584 		ioc_err(ioc,
1585 		    "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
1586 		    __func__, buffer_type, ioc->unique_id[buffer_type]);
1587 		return -EINVAL;
1588 	}
1589 
1590 	if (ioc->diag_buffer_status[buffer_type] &
1591 	    MPT3_DIAG_BUFFER_IS_REGISTERED) {
1592 		/*
1593 		 * If driver posts buffer initially, then an application wants
1594 		 * to Register that buffer (own it) without Releasing first,
1595 		 * the application Register command MUST have the same buffer
1596 		 * type and size in the Register command (obtained from the
1597 		 * Query command). Otherwise that Register command will be
1598 		 * failed. If the application has released the buffer but wants
1599 		 * to re-register it, it should be allowed as long as the
1600 		 * Unique-Id/Size match.
1601 		 */
1602 
1603 		if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
1604 		    ioc->diag_buffer_sz[buffer_type] ==
1605 		    diag_register->requested_buffer_size) {
1606 
1607 			if (!(ioc->diag_buffer_status[buffer_type] &
1608 			     MPT3_DIAG_BUFFER_IS_RELEASED)) {
1609 				dctlprintk(ioc, ioc_info(ioc,
1610 				    "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
1611 				    __func__, buffer_type,
1612 				    ioc->unique_id[buffer_type],
1613 				    diag_register->unique_id));
1614 
1615 				/*
1616 				 * Application wants to own the buffer with
1617 				 * the same size.
1618 				 */
1619 				ioc->unique_id[buffer_type] =
1620 				    diag_register->unique_id;
1621 				rc = 0; /* success */
1622 				goto out;
1623 			}
1624 		} else if (ioc->unique_id[buffer_type] !=
1625 		    MPT3DIAGBUFFUNIQUEID) {
1626 			if (ioc->unique_id[buffer_type] !=
1627 			    diag_register->unique_id ||
1628 			    ioc->diag_buffer_sz[buffer_type] !=
1629 			    diag_register->requested_buffer_size ||
1630 			    !(ioc->diag_buffer_status[buffer_type] &
1631 			    MPT3_DIAG_BUFFER_IS_RELEASED)) {
1632 				ioc_err(ioc,
1633 				    "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1634 				    __func__, buffer_type);
1635 				return -EINVAL;
1636 			}
1637 		} else {
1638 			ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1639 			    __func__, buffer_type);
1640 			return -EINVAL;
1641 		}
1642 	} else if (ioc->diag_buffer_status[buffer_type] &
1643 	    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
1644 
1645 		if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
1646 		    ioc->diag_buffer_sz[buffer_type] !=
1647 		    diag_register->requested_buffer_size) {
1648 
1649 			ioc_err(ioc,
1650 			    "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
1651 			     __func__, buffer_type,
1652 			    ioc->diag_buffer_sz[buffer_type]);
1653 			return -EINVAL;
1654 		}
1655 	}
1656 
1657 	if (diag_register->requested_buffer_size % 4)  {
1658 		ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
1659 			__func__);
1660 		return -EINVAL;
1661 	}
1662 
1663 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1664 	if (!smid) {
1665 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1666 		rc = -EAGAIN;
1667 		goto out;
1668 	}
1669 
1670 	rc = 0;
1671 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1672 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1673 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1674 	ioc->ctl_cmds.smid = smid;
1675 
1676 	request_data = ioc->diag_buffer[buffer_type];
1677 	request_data_sz = diag_register->requested_buffer_size;
1678 	ioc->unique_id[buffer_type] = diag_register->unique_id;
1679 	ioc->diag_buffer_status[buffer_type] &=
1680 	    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1681 	memcpy(ioc->product_specific[buffer_type],
1682 	    diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1683 	ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1684 
1685 	if (request_data) {
1686 		request_data_dma = ioc->diag_buffer_dma[buffer_type];
1687 		if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1688 			dma_free_coherent(&ioc->pdev->dev,
1689 					ioc->diag_buffer_sz[buffer_type],
1690 					request_data, request_data_dma);
1691 			request_data = NULL;
1692 		}
1693 	}
1694 
1695 	if (request_data == NULL) {
1696 		ioc->diag_buffer_sz[buffer_type] = 0;
1697 		ioc->diag_buffer_dma[buffer_type] = 0;
1698 		request_data = dma_alloc_coherent(&ioc->pdev->dev,
1699 				request_data_sz, &request_data_dma, GFP_KERNEL);
1700 		if (request_data == NULL) {
1701 			ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
1702 				__func__, request_data_sz);
1703 			mpt3sas_base_free_smid(ioc, smid);
1704 			rc = -ENOMEM;
1705 			goto out;
1706 		}
1707 		ioc->diag_buffer[buffer_type] = request_data;
1708 		ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1709 		ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1710 	}
1711 
1712 	mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1713 	mpi_request->BufferType = diag_register->buffer_type;
1714 	mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1715 	mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1716 	mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1717 	mpi_request->VF_ID = 0; /* TODO */
1718 	mpi_request->VP_ID = 0;
1719 
1720 	dctlprintk(ioc,
1721 		   ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1722 			    __func__, request_data,
1723 			    (unsigned long long)request_data_dma,
1724 			    le32_to_cpu(mpi_request->BufferLength)));
1725 
1726 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1727 		mpi_request->ProductSpecific[i] =
1728 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
1729 
1730 	init_completion(&ioc->ctl_cmds.done);
1731 	ioc->put_smid_default(ioc, smid);
1732 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
1733 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1734 
1735 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1736 		issue_reset =
1737 			mpt3sas_base_check_cmd_timeout(ioc,
1738 				ioc->ctl_cmds.status, mpi_request,
1739 				sizeof(Mpi2DiagBufferPostRequest_t)/4);
1740 		goto issue_host_reset;
1741 	}
1742 
1743 	/* process the completed Reply Message Frame */
1744 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1745 		ioc_err(ioc, "%s: no reply message\n", __func__);
1746 		rc = -EFAULT;
1747 		goto out;
1748 	}
1749 
1750 	mpi_reply = ioc->ctl_cmds.reply;
1751 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1752 
1753 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1754 		ioc->diag_buffer_status[buffer_type] |=
1755 			MPT3_DIAG_BUFFER_IS_REGISTERED;
1756 		dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1757 	} else {
1758 		ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1759 			 __func__,
1760 			 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1761 		rc = -EFAULT;
1762 	}
1763 
1764  issue_host_reset:
1765 	if (issue_reset)
1766 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1767 
1768  out:
1769 
1770 	if (rc && request_data) {
1771 		dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1772 		    request_data, request_data_dma);
1773 		ioc->diag_buffer_status[buffer_type] &=
1774 		    ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1775 	}
1776 
1777 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1778 	return rc;
1779 }
1780 
1781 /**
1782  * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1783  * @ioc: per adapter object
1784  * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1785  *
1786  * This is called when command line option diag_buffer_enable is enabled
1787  * at driver load time.
1788  */
1789 void
1790 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1791 {
1792 	struct mpt3_diag_register diag_register;
1793 	u32 ret_val;
1794 	u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
1795 	u32 min_trace_buff_size = 0;
1796 	u32 decr_trace_buff_size = 0;
1797 
1798 	memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1799 
1800 	if (bits_to_register & 1) {
1801 		ioc_info(ioc, "registering trace buffer support\n");
1802 		ioc->diag_trigger_master.MasterData =
1803 		    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1804 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1805 		diag_register.unique_id =
1806 		    (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
1807 		    (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
1808 
1809 		if (trace_buff_size != 0) {
1810 			diag_register.requested_buffer_size = trace_buff_size;
1811 			min_trace_buff_size =
1812 			    ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
1813 			decr_trace_buff_size =
1814 			    ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
1815 
1816 			if (min_trace_buff_size > trace_buff_size) {
1817 				/* The buff size is not set correctly */
1818 				ioc_err(ioc,
1819 				    "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
1820 				     min_trace_buff_size>>10,
1821 				     trace_buff_size>>10);
1822 				ioc_err(ioc,
1823 				    "Using zero Min Trace Buff Size\n");
1824 				min_trace_buff_size = 0;
1825 			}
1826 
1827 			if (decr_trace_buff_size == 0) {
1828 				/*
1829 				 * retry the min size if decrement
1830 				 * is not available.
1831 				 */
1832 				decr_trace_buff_size =
1833 				    trace_buff_size - min_trace_buff_size;
1834 			}
1835 		} else {
1836 			/* register for 2MB buffers  */
1837 			diag_register.requested_buffer_size = 2 * (1024 * 1024);
1838 		}
1839 
1840 		do {
1841 			ret_val = _ctl_diag_register_2(ioc,  &diag_register);
1842 
1843 			if (ret_val == -ENOMEM && min_trace_buff_size &&
1844 			    (trace_buff_size - decr_trace_buff_size) >=
1845 			    min_trace_buff_size) {
1846 				/* adjust the buffer size */
1847 				trace_buff_size -= decr_trace_buff_size;
1848 				diag_register.requested_buffer_size =
1849 				    trace_buff_size;
1850 			} else
1851 				break;
1852 		} while (true);
1853 
1854 		if (ret_val == -ENOMEM)
1855 			ioc_err(ioc,
1856 			    "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
1857 			    diag_register.requested_buffer_size>>10);
1858 		else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
1859 		    & MPT3_DIAG_BUFFER_IS_REGISTERED) {
1860 			ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
1861 			    diag_register.requested_buffer_size>>10);
1862 			if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
1863 				ioc->diag_buffer_status[
1864 				    MPI2_DIAG_BUF_TYPE_TRACE] |=
1865 				    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1866 		}
1867 	}
1868 
1869 	if (bits_to_register & 2) {
1870 		ioc_info(ioc, "registering snapshot buffer support\n");
1871 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1872 		/* register for 2MB buffers  */
1873 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
1874 		diag_register.unique_id = 0x7075901;
1875 		_ctl_diag_register_2(ioc,  &diag_register);
1876 	}
1877 
1878 	if (bits_to_register & 4) {
1879 		ioc_info(ioc, "registering extended buffer support\n");
1880 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1881 		/* register for 2MB buffers  */
1882 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
1883 		diag_register.unique_id = 0x7075901;
1884 		_ctl_diag_register_2(ioc,  &diag_register);
1885 	}
1886 }
1887 
1888 /**
1889  * _ctl_diag_register - application register with driver
1890  * @ioc: per adapter object
1891  * @arg: user space buffer containing ioctl content
1892  *
1893  * This will allow the driver to setup any required buffers that will be
1894  * needed by firmware to communicate with the driver.
1895  */
1896 static long
1897 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1898 {
1899 	struct mpt3_diag_register karg;
1900 	long rc;
1901 
1902 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1903 		pr_err("failure at %s:%d/%s()!\n",
1904 		    __FILE__, __LINE__, __func__);
1905 		return -EFAULT;
1906 	}
1907 
1908 	rc = _ctl_diag_register_2(ioc, &karg);
1909 
1910 	if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
1911 	    MPT3_DIAG_BUFFER_IS_REGISTERED))
1912 		ioc->diag_buffer_status[karg.buffer_type] |=
1913 		    MPT3_DIAG_BUFFER_IS_APP_OWNED;
1914 
1915 	return rc;
1916 }
1917 
1918 /**
1919  * _ctl_diag_unregister - application unregister with driver
1920  * @ioc: per adapter object
1921  * @arg: user space buffer containing ioctl content
1922  *
1923  * This will allow the driver to cleanup any memory allocated for diag
1924  * messages and to free up any resources.
1925  */
1926 static long
1927 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1928 {
1929 	struct mpt3_diag_unregister karg;
1930 	void *request_data;
1931 	dma_addr_t request_data_dma;
1932 	u32 request_data_sz;
1933 	u8 buffer_type;
1934 
1935 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1936 		pr_err("failure at %s:%d/%s()!\n",
1937 		    __FILE__, __LINE__, __func__);
1938 		return -EFAULT;
1939 	}
1940 
1941 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
1942 				 __func__));
1943 
1944 	buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
1945 	if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
1946 		ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
1947 		    __func__, karg.unique_id);
1948 		return -EINVAL;
1949 	}
1950 
1951 	if (!_ctl_diag_capability(ioc, buffer_type)) {
1952 		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1953 			__func__, buffer_type);
1954 		return -EPERM;
1955 	}
1956 
1957 	if ((ioc->diag_buffer_status[buffer_type] &
1958 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1959 		ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1960 			__func__, buffer_type);
1961 		return -EINVAL;
1962 	}
1963 	if ((ioc->diag_buffer_status[buffer_type] &
1964 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1965 		ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
1966 			__func__, buffer_type);
1967 		return -EINVAL;
1968 	}
1969 
1970 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
1971 		ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
1972 			__func__, karg.unique_id);
1973 		return -EINVAL;
1974 	}
1975 
1976 	request_data = ioc->diag_buffer[buffer_type];
1977 	if (!request_data) {
1978 		ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1979 			__func__, buffer_type);
1980 		return -ENOMEM;
1981 	}
1982 
1983 	if (ioc->diag_buffer_status[buffer_type] &
1984 	    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
1985 		ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
1986 		ioc->diag_buffer_status[buffer_type] &=
1987 		    ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
1988 		ioc->diag_buffer_status[buffer_type] &=
1989 		    ~MPT3_DIAG_BUFFER_IS_REGISTERED;
1990 	} else {
1991 		request_data_sz = ioc->diag_buffer_sz[buffer_type];
1992 		request_data_dma = ioc->diag_buffer_dma[buffer_type];
1993 		dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1994 				request_data, request_data_dma);
1995 		ioc->diag_buffer[buffer_type] = NULL;
1996 		ioc->diag_buffer_status[buffer_type] = 0;
1997 	}
1998 	return 0;
1999 }
2000 
2001 /**
2002  * _ctl_diag_query - query relevant info associated with diag buffers
2003  * @ioc: per adapter object
2004  * @arg: user space buffer containing ioctl content
2005  *
2006  * The application will send only buffer_type and unique_id.  Driver will
2007  * inspect unique_id first, if valid, fill in all the info.  If unique_id is
2008  * 0x00, the driver will return info specified by Buffer Type.
2009  */
2010 static long
2011 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2012 {
2013 	struct mpt3_diag_query karg;
2014 	void *request_data;
2015 	int i;
2016 	u8 buffer_type;
2017 
2018 	if (copy_from_user(&karg, arg, sizeof(karg))) {
2019 		pr_err("failure at %s:%d/%s()!\n",
2020 		    __FILE__, __LINE__, __func__);
2021 		return -EFAULT;
2022 	}
2023 
2024 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
2025 				 __func__));
2026 
2027 	karg.application_flags = 0;
2028 	buffer_type = karg.buffer_type;
2029 
2030 	if (!_ctl_diag_capability(ioc, buffer_type)) {
2031 		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2032 			__func__, buffer_type);
2033 		return -EPERM;
2034 	}
2035 
2036 	if (!(ioc->diag_buffer_status[buffer_type] &
2037 	    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
2038 		if ((ioc->diag_buffer_status[buffer_type] &
2039 		    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2040 			ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2041 				__func__, buffer_type);
2042 			return -EINVAL;
2043 		}
2044 	}
2045 
2046 	if (karg.unique_id) {
2047 		if (karg.unique_id != ioc->unique_id[buffer_type]) {
2048 			ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2049 				__func__, karg.unique_id);
2050 			return -EINVAL;
2051 		}
2052 	}
2053 
2054 	request_data = ioc->diag_buffer[buffer_type];
2055 	if (!request_data) {
2056 		ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2057 			__func__, buffer_type);
2058 		return -ENOMEM;
2059 	}
2060 
2061 	if ((ioc->diag_buffer_status[buffer_type] &
2062 	    MPT3_DIAG_BUFFER_IS_REGISTERED))
2063 		karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
2064 
2065 	if (!(ioc->diag_buffer_status[buffer_type] &
2066 	     MPT3_DIAG_BUFFER_IS_RELEASED))
2067 		karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
2068 
2069 	if (!(ioc->diag_buffer_status[buffer_type] &
2070 	    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
2071 		karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
2072 
2073 	if ((ioc->diag_buffer_status[buffer_type] &
2074 	    MPT3_DIAG_BUFFER_IS_APP_OWNED))
2075 		karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
2076 
2077 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2078 		karg.product_specific[i] =
2079 		    ioc->product_specific[buffer_type][i];
2080 
2081 	karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
2082 	karg.driver_added_buffer_size = 0;
2083 	karg.unique_id = ioc->unique_id[buffer_type];
2084 	karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
2085 
2086 	if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
2087 		ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
2088 			__func__, arg);
2089 		return -EFAULT;
2090 	}
2091 	return 0;
2092 }
2093 
2094 /**
2095  * mpt3sas_send_diag_release - Diag Release Message
2096  * @ioc: per adapter object
2097  * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
2098  * @issue_reset: specifies whether host reset is required.
2099  *
2100  */
2101 int
2102 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
2103 	u8 *issue_reset)
2104 {
2105 	Mpi2DiagReleaseRequest_t *mpi_request;
2106 	Mpi2DiagReleaseReply_t *mpi_reply;
2107 	u16 smid;
2108 	u16 ioc_status;
2109 	u32 ioc_state;
2110 	int rc;
2111 
2112 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
2113 				 __func__));
2114 
2115 	rc = 0;
2116 	*issue_reset = 0;
2117 
2118 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
2119 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2120 		if (ioc->diag_buffer_status[buffer_type] &
2121 		    MPT3_DIAG_BUFFER_IS_REGISTERED)
2122 			ioc->diag_buffer_status[buffer_type] |=
2123 			    MPT3_DIAG_BUFFER_IS_RELEASED;
2124 		dctlprintk(ioc,
2125 			   ioc_info(ioc, "%s: skipping due to FAULT state\n",
2126 				    __func__));
2127 		rc = -EAGAIN;
2128 		goto out;
2129 	}
2130 
2131 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2132 		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2133 		rc = -EAGAIN;
2134 		goto out;
2135 	}
2136 
2137 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2138 	if (!smid) {
2139 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2140 		rc = -EAGAIN;
2141 		goto out;
2142 	}
2143 
2144 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2145 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2146 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2147 	ioc->ctl_cmds.smid = smid;
2148 
2149 	mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
2150 	mpi_request->BufferType = buffer_type;
2151 	mpi_request->VF_ID = 0; /* TODO */
2152 	mpi_request->VP_ID = 0;
2153 
2154 	init_completion(&ioc->ctl_cmds.done);
2155 	ioc->put_smid_default(ioc, smid);
2156 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
2157 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2158 
2159 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2160 		*issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
2161 				ioc->ctl_cmds.status, mpi_request,
2162 				sizeof(Mpi2DiagReleaseRequest_t)/4);
2163 		rc = -EFAULT;
2164 		goto out;
2165 	}
2166 
2167 	/* process the completed Reply Message Frame */
2168 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2169 		ioc_err(ioc, "%s: no reply message\n", __func__);
2170 		rc = -EFAULT;
2171 		goto out;
2172 	}
2173 
2174 	mpi_reply = ioc->ctl_cmds.reply;
2175 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2176 
2177 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2178 		ioc->diag_buffer_status[buffer_type] |=
2179 		    MPT3_DIAG_BUFFER_IS_RELEASED;
2180 		dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2181 	} else {
2182 		ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2183 			 __func__,
2184 			 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2185 		rc = -EFAULT;
2186 	}
2187 
2188  out:
2189 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2190 	return rc;
2191 }
2192 
2193 /**
2194  * _ctl_diag_release - request to send Diag Release Message to firmware
2195  * @ioc: ?
2196  * @arg: user space buffer containing ioctl content
2197  *
2198  * This allows ownership of the specified buffer to returned to the driver,
2199  * allowing an application to read the buffer without fear that firmware is
2200  * overwriting information in the buffer.
2201  */
2202 static long
2203 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2204 {
2205 	struct mpt3_diag_release karg;
2206 	void *request_data;
2207 	int rc;
2208 	u8 buffer_type;
2209 	u8 issue_reset = 0;
2210 
2211 	if (copy_from_user(&karg, arg, sizeof(karg))) {
2212 		pr_err("failure at %s:%d/%s()!\n",
2213 		    __FILE__, __LINE__, __func__);
2214 		return -EFAULT;
2215 	}
2216 
2217 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
2218 				 __func__));
2219 
2220 	buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2221 	if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2222 		ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2223 		    __func__, karg.unique_id);
2224 		return -EINVAL;
2225 	}
2226 
2227 	if (!_ctl_diag_capability(ioc, buffer_type)) {
2228 		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2229 			__func__, buffer_type);
2230 		return -EPERM;
2231 	}
2232 
2233 	if ((ioc->diag_buffer_status[buffer_type] &
2234 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2235 		ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2236 			__func__, buffer_type);
2237 		return -EINVAL;
2238 	}
2239 
2240 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
2241 		ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2242 			__func__, karg.unique_id);
2243 		return -EINVAL;
2244 	}
2245 
2246 	if (ioc->diag_buffer_status[buffer_type] &
2247 	    MPT3_DIAG_BUFFER_IS_RELEASED) {
2248 		ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
2249 			__func__, buffer_type);
2250 		return -EINVAL;
2251 	}
2252 
2253 	request_data = ioc->diag_buffer[buffer_type];
2254 
2255 	if (!request_data) {
2256 		ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2257 			__func__, buffer_type);
2258 		return -ENOMEM;
2259 	}
2260 
2261 	/* buffers were released by due to host reset */
2262 	if ((ioc->diag_buffer_status[buffer_type] &
2263 	    MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2264 		ioc->diag_buffer_status[buffer_type] |=
2265 		    MPT3_DIAG_BUFFER_IS_RELEASED;
2266 		ioc->diag_buffer_status[buffer_type] &=
2267 		    ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2268 		ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
2269 			__func__, buffer_type);
2270 		return 0;
2271 	}
2272 
2273 	rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2274 
2275 	if (issue_reset)
2276 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2277 
2278 	return rc;
2279 }
2280 
2281 /**
2282  * _ctl_diag_read_buffer - request for copy of the diag buffer
2283  * @ioc: per adapter object
2284  * @arg: user space buffer containing ioctl content
2285  */
2286 static long
2287 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2288 {
2289 	struct mpt3_diag_read_buffer karg;
2290 	struct mpt3_diag_read_buffer __user *uarg = arg;
2291 	void *request_data, *diag_data;
2292 	Mpi2DiagBufferPostRequest_t *mpi_request;
2293 	Mpi2DiagBufferPostReply_t *mpi_reply;
2294 	int rc, i;
2295 	u8 buffer_type;
2296 	unsigned long request_size, copy_size;
2297 	u16 smid;
2298 	u16 ioc_status;
2299 	u8 issue_reset = 0;
2300 
2301 	if (copy_from_user(&karg, arg, sizeof(karg))) {
2302 		pr_err("failure at %s:%d/%s()!\n",
2303 		    __FILE__, __LINE__, __func__);
2304 		return -EFAULT;
2305 	}
2306 
2307 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
2308 				 __func__));
2309 
2310 	buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2311 	if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2312 		ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2313 		    __func__, karg.unique_id);
2314 		return -EINVAL;
2315 	}
2316 
2317 	if (!_ctl_diag_capability(ioc, buffer_type)) {
2318 		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2319 			__func__, buffer_type);
2320 		return -EPERM;
2321 	}
2322 
2323 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
2324 		ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2325 			__func__, karg.unique_id);
2326 		return -EINVAL;
2327 	}
2328 
2329 	request_data = ioc->diag_buffer[buffer_type];
2330 	if (!request_data) {
2331 		ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2332 			__func__, buffer_type);
2333 		return -ENOMEM;
2334 	}
2335 
2336 	request_size = ioc->diag_buffer_sz[buffer_type];
2337 
2338 	if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2339 		ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
2340 			__func__);
2341 		return -EINVAL;
2342 	}
2343 
2344 	if (karg.starting_offset > request_size)
2345 		return -EINVAL;
2346 
2347 	diag_data = (void *)(request_data + karg.starting_offset);
2348 	dctlprintk(ioc,
2349 		   ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2350 			    __func__, diag_data, karg.starting_offset,
2351 			    karg.bytes_to_read));
2352 
2353 	/* Truncate data on requests that are too large */
2354 	if ((diag_data + karg.bytes_to_read < diag_data) ||
2355 	    (diag_data + karg.bytes_to_read > request_data + request_size))
2356 		copy_size = request_size - karg.starting_offset;
2357 	else
2358 		copy_size = karg.bytes_to_read;
2359 
2360 	if (copy_to_user((void __user *)uarg->diagnostic_data,
2361 	    diag_data, copy_size)) {
2362 		ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2363 			__func__, diag_data);
2364 		return -EFAULT;
2365 	}
2366 
2367 	if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2368 		return 0;
2369 
2370 	dctlprintk(ioc,
2371 		   ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
2372 			    __func__, buffer_type));
2373 	if ((ioc->diag_buffer_status[buffer_type] &
2374 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2375 		dctlprintk(ioc,
2376 			   ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
2377 				    __func__, buffer_type));
2378 		return 0;
2379 	}
2380 	/* Get a free request frame and save the message context.
2381 	*/
2382 
2383 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2384 		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2385 		rc = -EAGAIN;
2386 		goto out;
2387 	}
2388 
2389 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2390 	if (!smid) {
2391 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2392 		rc = -EAGAIN;
2393 		goto out;
2394 	}
2395 
2396 	rc = 0;
2397 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2398 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2399 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2400 	ioc->ctl_cmds.smid = smid;
2401 
2402 	mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2403 	mpi_request->BufferType = buffer_type;
2404 	mpi_request->BufferLength =
2405 	    cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2406 	mpi_request->BufferAddress =
2407 	    cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2408 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2409 		mpi_request->ProductSpecific[i] =
2410 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
2411 	mpi_request->VF_ID = 0; /* TODO */
2412 	mpi_request->VP_ID = 0;
2413 
2414 	init_completion(&ioc->ctl_cmds.done);
2415 	ioc->put_smid_default(ioc, smid);
2416 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
2417 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2418 
2419 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2420 		issue_reset =
2421 			mpt3sas_base_check_cmd_timeout(ioc,
2422 				ioc->ctl_cmds.status, mpi_request,
2423 				sizeof(Mpi2DiagBufferPostRequest_t)/4);
2424 		goto issue_host_reset;
2425 	}
2426 
2427 	/* process the completed Reply Message Frame */
2428 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2429 		ioc_err(ioc, "%s: no reply message\n", __func__);
2430 		rc = -EFAULT;
2431 		goto out;
2432 	}
2433 
2434 	mpi_reply = ioc->ctl_cmds.reply;
2435 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2436 
2437 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2438 		ioc->diag_buffer_status[buffer_type] |=
2439 		    MPT3_DIAG_BUFFER_IS_REGISTERED;
2440 		ioc->diag_buffer_status[buffer_type] &=
2441 		    ~MPT3_DIAG_BUFFER_IS_RELEASED;
2442 		dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2443 	} else {
2444 		ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2445 			 __func__, ioc_status,
2446 			 le32_to_cpu(mpi_reply->IOCLogInfo));
2447 		rc = -EFAULT;
2448 	}
2449 
2450  issue_host_reset:
2451 	if (issue_reset)
2452 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2453 
2454  out:
2455 
2456 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2457 	return rc;
2458 }
2459 
2460 
2461 
2462 #ifdef CONFIG_COMPAT
2463 /**
2464  * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2465  * @ioc: per adapter object
2466  * @cmd: ioctl opcode
2467  * @arg: (struct mpt3_ioctl_command32)
2468  *
2469  * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2470  */
2471 static long
2472 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2473 	void __user *arg)
2474 {
2475 	struct mpt3_ioctl_command32 karg32;
2476 	struct mpt3_ioctl_command32 __user *uarg;
2477 	struct mpt3_ioctl_command karg;
2478 
2479 	if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2480 		return -EINVAL;
2481 
2482 	uarg = (struct mpt3_ioctl_command32 __user *) arg;
2483 
2484 	if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2485 		pr_err("failure at %s:%d/%s()!\n",
2486 		    __FILE__, __LINE__, __func__);
2487 		return -EFAULT;
2488 	}
2489 
2490 	memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2491 	karg.hdr.ioc_number = karg32.hdr.ioc_number;
2492 	karg.hdr.port_number = karg32.hdr.port_number;
2493 	karg.hdr.max_data_size = karg32.hdr.max_data_size;
2494 	karg.timeout = karg32.timeout;
2495 	karg.max_reply_bytes = karg32.max_reply_bytes;
2496 	karg.data_in_size = karg32.data_in_size;
2497 	karg.data_out_size = karg32.data_out_size;
2498 	karg.max_sense_bytes = karg32.max_sense_bytes;
2499 	karg.data_sge_offset = karg32.data_sge_offset;
2500 	karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2501 	karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2502 	karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2503 	karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2504 	return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2505 }
2506 #endif
2507 
2508 /**
2509  * _ctl_ioctl_main - main ioctl entry point
2510  * @file:  (struct file)
2511  * @cmd:  ioctl opcode
2512  * @arg:  user space data buffer
2513  * @compat:  handles 32 bit applications in 64bit os
2514  * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2515  * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2516  */
2517 static long
2518 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2519 	u8 compat, u16 mpi_version)
2520 {
2521 	struct MPT3SAS_ADAPTER *ioc;
2522 	struct mpt3_ioctl_header ioctl_header;
2523 	enum block_state state;
2524 	long ret = -EINVAL;
2525 
2526 	/* get IOCTL header */
2527 	if (copy_from_user(&ioctl_header, (char __user *)arg,
2528 	    sizeof(struct mpt3_ioctl_header))) {
2529 		pr_err("failure at %s:%d/%s()!\n",
2530 		    __FILE__, __LINE__, __func__);
2531 		return -EFAULT;
2532 	}
2533 
2534 	if (_ctl_verify_adapter(ioctl_header.ioc_number,
2535 				&ioc, mpi_version) == -1 || !ioc)
2536 		return -ENODEV;
2537 
2538 	/* pci_access_mutex lock acquired by ioctl path */
2539 	mutex_lock(&ioc->pci_access_mutex);
2540 
2541 	if (ioc->shost_recovery || ioc->pci_error_recovery ||
2542 	    ioc->is_driver_loading || ioc->remove_host) {
2543 		ret = -EAGAIN;
2544 		goto out_unlock_pciaccess;
2545 	}
2546 
2547 	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2548 	if (state == NON_BLOCKING) {
2549 		if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2550 			ret = -EAGAIN;
2551 			goto out_unlock_pciaccess;
2552 		}
2553 	} else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2554 		ret = -ERESTARTSYS;
2555 		goto out_unlock_pciaccess;
2556 	}
2557 
2558 
2559 	switch (cmd) {
2560 	case MPT3IOCINFO:
2561 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2562 			ret = _ctl_getiocinfo(ioc, arg);
2563 		break;
2564 #ifdef CONFIG_COMPAT
2565 	case MPT3COMMAND32:
2566 #endif
2567 	case MPT3COMMAND:
2568 	{
2569 		struct mpt3_ioctl_command __user *uarg;
2570 		struct mpt3_ioctl_command karg;
2571 
2572 #ifdef CONFIG_COMPAT
2573 		if (compat) {
2574 			ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2575 			break;
2576 		}
2577 #endif
2578 		if (copy_from_user(&karg, arg, sizeof(karg))) {
2579 			pr_err("failure at %s:%d/%s()!\n",
2580 			    __FILE__, __LINE__, __func__);
2581 			ret = -EFAULT;
2582 			break;
2583 		}
2584 
2585 		if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
2586 			ret = -EINVAL;
2587 			break;
2588 		}
2589 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2590 			uarg = arg;
2591 			ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2592 		}
2593 		break;
2594 	}
2595 	case MPT3EVENTQUERY:
2596 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2597 			ret = _ctl_eventquery(ioc, arg);
2598 		break;
2599 	case MPT3EVENTENABLE:
2600 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2601 			ret = _ctl_eventenable(ioc, arg);
2602 		break;
2603 	case MPT3EVENTREPORT:
2604 		ret = _ctl_eventreport(ioc, arg);
2605 		break;
2606 	case MPT3HARDRESET:
2607 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2608 			ret = _ctl_do_reset(ioc, arg);
2609 		break;
2610 	case MPT3BTDHMAPPING:
2611 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2612 			ret = _ctl_btdh_mapping(ioc, arg);
2613 		break;
2614 	case MPT3DIAGREGISTER:
2615 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2616 			ret = _ctl_diag_register(ioc, arg);
2617 		break;
2618 	case MPT3DIAGUNREGISTER:
2619 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2620 			ret = _ctl_diag_unregister(ioc, arg);
2621 		break;
2622 	case MPT3DIAGQUERY:
2623 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2624 			ret = _ctl_diag_query(ioc, arg);
2625 		break;
2626 	case MPT3DIAGRELEASE:
2627 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2628 			ret = _ctl_diag_release(ioc, arg);
2629 		break;
2630 	case MPT3DIAGREADBUFFER:
2631 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2632 			ret = _ctl_diag_read_buffer(ioc, arg);
2633 		break;
2634 	default:
2635 		dctlprintk(ioc,
2636 			   ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
2637 				    cmd));
2638 		break;
2639 	}
2640 
2641 	mutex_unlock(&ioc->ctl_cmds.mutex);
2642 out_unlock_pciaccess:
2643 	mutex_unlock(&ioc->pci_access_mutex);
2644 	return ret;
2645 }
2646 
2647 /**
2648  * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2649  * @file: (struct file)
2650  * @cmd: ioctl opcode
2651  * @arg: ?
2652  */
2653 static long
2654 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2655 {
2656 	long ret;
2657 
2658 	/* pass MPI25_VERSION | MPI26_VERSION value,
2659 	 * to indicate that this ioctl cmd
2660 	 * came from mpt3ctl ioctl device.
2661 	 */
2662 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2663 		MPI25_VERSION | MPI26_VERSION);
2664 	return ret;
2665 }
2666 
2667 /**
2668  * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2669  * @file: (struct file)
2670  * @cmd: ioctl opcode
2671  * @arg: ?
2672  */
2673 static long
2674 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2675 {
2676 	long ret;
2677 
2678 	/* pass MPI2_VERSION value, to indicate that this ioctl cmd
2679 	 * came from mpt2ctl ioctl device.
2680 	 */
2681 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2682 	return ret;
2683 }
2684 #ifdef CONFIG_COMPAT
2685 /**
2686  *_ ctl_ioctl_compat - main ioctl entry point (compat)
2687  * @file: ?
2688  * @cmd: ?
2689  * @arg: ?
2690  *
2691  * This routine handles 32 bit applications in 64bit os.
2692  */
2693 static long
2694 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2695 {
2696 	long ret;
2697 
2698 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2699 		MPI25_VERSION | MPI26_VERSION);
2700 	return ret;
2701 }
2702 
2703 /**
2704  *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2705  * @file: ?
2706  * @cmd: ?
2707  * @arg: ?
2708  *
2709  * This routine handles 32 bit applications in 64bit os.
2710  */
2711 static long
2712 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2713 {
2714 	long ret;
2715 
2716 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2717 	return ret;
2718 }
2719 #endif
2720 
2721 /* scsi host attributes */
2722 /**
2723  * version_fw_show - firmware version
2724  * @cdev: pointer to embedded class device
2725  * @attr: ?
2726  * @buf: the buffer returned
2727  *
2728  * A sysfs 'read-only' shost attribute.
2729  */
2730 static ssize_t
2731 version_fw_show(struct device *cdev, struct device_attribute *attr,
2732 	char *buf)
2733 {
2734 	struct Scsi_Host *shost = class_to_shost(cdev);
2735 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2736 
2737 	return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2738 	    (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2739 	    (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2740 	    (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2741 	    ioc->facts.FWVersion.Word & 0x000000FF);
2742 }
2743 static DEVICE_ATTR_RO(version_fw);
2744 
2745 /**
2746  * version_bios_show - bios version
2747  * @cdev: pointer to embedded class device
2748  * @attr: ?
2749  * @buf: the buffer returned
2750  *
2751  * A sysfs 'read-only' shost attribute.
2752  */
2753 static ssize_t
2754 version_bios_show(struct device *cdev, struct device_attribute *attr,
2755 	char *buf)
2756 {
2757 	struct Scsi_Host *shost = class_to_shost(cdev);
2758 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2759 
2760 	u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2761 
2762 	return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2763 	    (version & 0xFF000000) >> 24,
2764 	    (version & 0x00FF0000) >> 16,
2765 	    (version & 0x0000FF00) >> 8,
2766 	    version & 0x000000FF);
2767 }
2768 static DEVICE_ATTR_RO(version_bios);
2769 
2770 /**
2771  * version_mpi_show - MPI (message passing interface) version
2772  * @cdev: pointer to embedded class device
2773  * @attr: ?
2774  * @buf: the buffer returned
2775  *
2776  * A sysfs 'read-only' shost attribute.
2777  */
2778 static ssize_t
2779 version_mpi_show(struct device *cdev, struct device_attribute *attr,
2780 	char *buf)
2781 {
2782 	struct Scsi_Host *shost = class_to_shost(cdev);
2783 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2784 
2785 	return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2786 	    ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2787 }
2788 static DEVICE_ATTR_RO(version_mpi);
2789 
2790 /**
2791  * version_product_show - product name
2792  * @cdev: pointer to embedded class device
2793  * @attr: ?
2794  * @buf: the buffer returned
2795  *
2796  * A sysfs 'read-only' shost attribute.
2797  */
2798 static ssize_t
2799 version_product_show(struct device *cdev, struct device_attribute *attr,
2800 	char *buf)
2801 {
2802 	struct Scsi_Host *shost = class_to_shost(cdev);
2803 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2804 
2805 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2806 }
2807 static DEVICE_ATTR_RO(version_product);
2808 
2809 /**
2810  * version_nvdata_persistent_show - ndvata persistent version
2811  * @cdev: pointer to embedded class device
2812  * @attr: ?
2813  * @buf: the buffer returned
2814  *
2815  * A sysfs 'read-only' shost attribute.
2816  */
2817 static ssize_t
2818 version_nvdata_persistent_show(struct device *cdev,
2819 	struct device_attribute *attr, char *buf)
2820 {
2821 	struct Scsi_Host *shost = class_to_shost(cdev);
2822 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2823 
2824 	return snprintf(buf, PAGE_SIZE, "%08xh\n",
2825 	    le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2826 }
2827 static DEVICE_ATTR_RO(version_nvdata_persistent);
2828 
2829 /**
2830  * version_nvdata_default_show - nvdata default version
2831  * @cdev: pointer to embedded class device
2832  * @attr: ?
2833  * @buf: the buffer returned
2834  *
2835  * A sysfs 'read-only' shost attribute.
2836  */
2837 static ssize_t
2838 version_nvdata_default_show(struct device *cdev, struct device_attribute
2839 	*attr, char *buf)
2840 {
2841 	struct Scsi_Host *shost = class_to_shost(cdev);
2842 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2843 
2844 	return snprintf(buf, PAGE_SIZE, "%08xh\n",
2845 	    le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2846 }
2847 static DEVICE_ATTR_RO(version_nvdata_default);
2848 
2849 /**
2850  * board_name_show - board name
2851  * @cdev: pointer to embedded class device
2852  * @attr: ?
2853  * @buf: the buffer returned
2854  *
2855  * A sysfs 'read-only' shost attribute.
2856  */
2857 static ssize_t
2858 board_name_show(struct device *cdev, struct device_attribute *attr,
2859 	char *buf)
2860 {
2861 	struct Scsi_Host *shost = class_to_shost(cdev);
2862 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2863 
2864 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2865 }
2866 static DEVICE_ATTR_RO(board_name);
2867 
2868 /**
2869  * board_assembly_show - board assembly name
2870  * @cdev: pointer to embedded class device
2871  * @attr: ?
2872  * @buf: the buffer returned
2873  *
2874  * A sysfs 'read-only' shost attribute.
2875  */
2876 static ssize_t
2877 board_assembly_show(struct device *cdev, struct device_attribute *attr,
2878 	char *buf)
2879 {
2880 	struct Scsi_Host *shost = class_to_shost(cdev);
2881 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2882 
2883 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2884 }
2885 static DEVICE_ATTR_RO(board_assembly);
2886 
2887 /**
2888  * board_tracer_show - board tracer number
2889  * @cdev: pointer to embedded class device
2890  * @attr: ?
2891  * @buf: the buffer returned
2892  *
2893  * A sysfs 'read-only' shost attribute.
2894  */
2895 static ssize_t
2896 board_tracer_show(struct device *cdev, struct device_attribute *attr,
2897 	char *buf)
2898 {
2899 	struct Scsi_Host *shost = class_to_shost(cdev);
2900 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2901 
2902 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2903 }
2904 static DEVICE_ATTR_RO(board_tracer);
2905 
2906 /**
2907  * io_delay_show - io missing delay
2908  * @cdev: pointer to embedded class device
2909  * @attr: ?
2910  * @buf: the buffer returned
2911  *
2912  * This is for firmware implemention for deboucing device
2913  * removal events.
2914  *
2915  * A sysfs 'read-only' shost attribute.
2916  */
2917 static ssize_t
2918 io_delay_show(struct device *cdev, struct device_attribute *attr,
2919 	char *buf)
2920 {
2921 	struct Scsi_Host *shost = class_to_shost(cdev);
2922 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2923 
2924 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2925 }
2926 static DEVICE_ATTR_RO(io_delay);
2927 
2928 /**
2929  * device_delay_show - device missing delay
2930  * @cdev: pointer to embedded class device
2931  * @attr: ?
2932  * @buf: the buffer returned
2933  *
2934  * This is for firmware implemention for deboucing device
2935  * removal events.
2936  *
2937  * A sysfs 'read-only' shost attribute.
2938  */
2939 static ssize_t
2940 device_delay_show(struct device *cdev, struct device_attribute *attr,
2941 	char *buf)
2942 {
2943 	struct Scsi_Host *shost = class_to_shost(cdev);
2944 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2945 
2946 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2947 }
2948 static DEVICE_ATTR_RO(device_delay);
2949 
2950 /**
2951  * fw_queue_depth_show - global credits
2952  * @cdev: pointer to embedded class device
2953  * @attr: ?
2954  * @buf: the buffer returned
2955  *
2956  * This is firmware queue depth limit
2957  *
2958  * A sysfs 'read-only' shost attribute.
2959  */
2960 static ssize_t
2961 fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2962 	char *buf)
2963 {
2964 	struct Scsi_Host *shost = class_to_shost(cdev);
2965 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2966 
2967 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2968 }
2969 static DEVICE_ATTR_RO(fw_queue_depth);
2970 
2971 /**
2972  * sas_address_show - sas address
2973  * @cdev: pointer to embedded class device
2974  * @attr: ?
2975  * @buf: the buffer returned
2976  *
2977  * This is the controller sas address
2978  *
2979  * A sysfs 'read-only' shost attribute.
2980  */
2981 static ssize_t
2982 host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2983 	char *buf)
2984 
2985 {
2986 	struct Scsi_Host *shost = class_to_shost(cdev);
2987 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2988 
2989 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2990 	    (unsigned long long)ioc->sas_hba.sas_address);
2991 }
2992 static DEVICE_ATTR_RO(host_sas_address);
2993 
2994 /**
2995  * logging_level_show - logging level
2996  * @cdev: pointer to embedded class device
2997  * @attr: ?
2998  * @buf: the buffer returned
2999  *
3000  * A sysfs 'read/write' shost attribute.
3001  */
3002 static ssize_t
3003 logging_level_show(struct device *cdev, struct device_attribute *attr,
3004 	char *buf)
3005 {
3006 	struct Scsi_Host *shost = class_to_shost(cdev);
3007 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3008 
3009 	return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
3010 }
3011 static ssize_t
3012 logging_level_store(struct device *cdev, struct device_attribute *attr,
3013 	const char *buf, size_t count)
3014 {
3015 	struct Scsi_Host *shost = class_to_shost(cdev);
3016 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3017 	int val = 0;
3018 
3019 	if (sscanf(buf, "%x", &val) != 1)
3020 		return -EINVAL;
3021 
3022 	ioc->logging_level = val;
3023 	ioc_info(ioc, "logging_level=%08xh\n",
3024 		 ioc->logging_level);
3025 	return strlen(buf);
3026 }
3027 static DEVICE_ATTR_RW(logging_level);
3028 
3029 /**
3030  * fwfault_debug_show - show/store fwfault_debug
3031  * @cdev: pointer to embedded class device
3032  * @attr: ?
3033  * @buf: the buffer returned
3034  *
3035  * mpt3sas_fwfault_debug is command line option
3036  * A sysfs 'read/write' shost attribute.
3037  */
3038 static ssize_t
3039 fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
3040 	char *buf)
3041 {
3042 	struct Scsi_Host *shost = class_to_shost(cdev);
3043 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3044 
3045 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
3046 }
3047 static ssize_t
3048 fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
3049 	const char *buf, size_t count)
3050 {
3051 	struct Scsi_Host *shost = class_to_shost(cdev);
3052 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3053 	int val = 0;
3054 
3055 	if (sscanf(buf, "%d", &val) != 1)
3056 		return -EINVAL;
3057 
3058 	ioc->fwfault_debug = val;
3059 	ioc_info(ioc, "fwfault_debug=%d\n",
3060 		 ioc->fwfault_debug);
3061 	return strlen(buf);
3062 }
3063 static DEVICE_ATTR_RW(fwfault_debug);
3064 
3065 /**
3066  * ioc_reset_count_show - ioc reset count
3067  * @cdev: pointer to embedded class device
3068  * @attr: ?
3069  * @buf: the buffer returned
3070  *
3071  * This is firmware queue depth limit
3072  *
3073  * A sysfs 'read-only' shost attribute.
3074  */
3075 static ssize_t
3076 ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
3077 	char *buf)
3078 {
3079 	struct Scsi_Host *shost = class_to_shost(cdev);
3080 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3081 
3082 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
3083 }
3084 static DEVICE_ATTR_RO(ioc_reset_count);
3085 
3086 /**
3087  * reply_queue_count_show - number of reply queues
3088  * @cdev: pointer to embedded class device
3089  * @attr: ?
3090  * @buf: the buffer returned
3091  *
3092  * This is number of reply queues
3093  *
3094  * A sysfs 'read-only' shost attribute.
3095  */
3096 static ssize_t
3097 reply_queue_count_show(struct device *cdev,
3098 	struct device_attribute *attr, char *buf)
3099 {
3100 	u8 reply_queue_count;
3101 	struct Scsi_Host *shost = class_to_shost(cdev);
3102 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3103 
3104 	if ((ioc->facts.IOCCapabilities &
3105 	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
3106 		reply_queue_count = ioc->reply_queue_count;
3107 	else
3108 		reply_queue_count = 1;
3109 
3110 	return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
3111 }
3112 static DEVICE_ATTR_RO(reply_queue_count);
3113 
3114 /**
3115  * BRM_status_show - Backup Rail Monitor Status
3116  * @cdev: pointer to embedded class device
3117  * @attr: ?
3118  * @buf: the buffer returned
3119  *
3120  * This is number of reply queues
3121  *
3122  * A sysfs 'read-only' shost attribute.
3123  */
3124 static ssize_t
3125 BRM_status_show(struct device *cdev, struct device_attribute *attr,
3126 	char *buf)
3127 {
3128 	struct Scsi_Host *shost = class_to_shost(cdev);
3129 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3130 	Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
3131 	Mpi2ConfigReply_t mpi_reply;
3132 	u16 backup_rail_monitor_status = 0;
3133 	u16 ioc_status;
3134 	int sz;
3135 	ssize_t rc = 0;
3136 
3137 	if (!ioc->is_warpdrive) {
3138 		ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
3139 			__func__);
3140 		goto out;
3141 	}
3142 	/* pci_access_mutex lock acquired by sysfs show path */
3143 	mutex_lock(&ioc->pci_access_mutex);
3144 	if (ioc->pci_error_recovery || ioc->remove_host) {
3145 		mutex_unlock(&ioc->pci_access_mutex);
3146 		return 0;
3147 	}
3148 
3149 	/* allocate upto GPIOVal 36 entries */
3150 	sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
3151 	io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
3152 	if (!io_unit_pg3) {
3153 		ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
3154 			__func__, sz);
3155 		goto out;
3156 	}
3157 
3158 	if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
3159 	    0) {
3160 		ioc_err(ioc, "%s: failed reading iounit_pg3\n",
3161 			__func__);
3162 		goto out;
3163 	}
3164 
3165 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3166 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3167 		ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
3168 			__func__, ioc_status);
3169 		goto out;
3170 	}
3171 
3172 	if (io_unit_pg3->GPIOCount < 25) {
3173 		ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
3174 			__func__, io_unit_pg3->GPIOCount);
3175 		goto out;
3176 	}
3177 
3178 	/* BRM status is in bit zero of GPIOVal[24] */
3179 	backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
3180 	rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
3181 
3182  out:
3183 	kfree(io_unit_pg3);
3184 	mutex_unlock(&ioc->pci_access_mutex);
3185 	return rc;
3186 }
3187 static DEVICE_ATTR_RO(BRM_status);
3188 
3189 struct DIAG_BUFFER_START {
3190 	__le32	Size;
3191 	__le32	DiagVersion;
3192 	u8	BufferType;
3193 	u8	Reserved[3];
3194 	__le32	Reserved1;
3195 	__le32	Reserved2;
3196 	__le32	Reserved3;
3197 };
3198 
3199 /**
3200  * host_trace_buffer_size_show - host buffer size (trace only)
3201  * @cdev: pointer to embedded class device
3202  * @attr: ?
3203  * @buf: the buffer returned
3204  *
3205  * A sysfs 'read-only' shost attribute.
3206  */
3207 static ssize_t
3208 host_trace_buffer_size_show(struct device *cdev,
3209 	struct device_attribute *attr, char *buf)
3210 {
3211 	struct Scsi_Host *shost = class_to_shost(cdev);
3212 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3213 	u32 size = 0;
3214 	struct DIAG_BUFFER_START *request_data;
3215 
3216 	if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3217 		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3218 			__func__);
3219 		return 0;
3220 	}
3221 
3222 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3223 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3224 		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3225 			__func__);
3226 		return 0;
3227 	}
3228 
3229 	request_data = (struct DIAG_BUFFER_START *)
3230 	    ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
3231 	if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
3232 	    le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
3233 	    le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
3234 	    le32_to_cpu(request_data->Reserved3) == 0x4742444c)
3235 		size = le32_to_cpu(request_data->Size);
3236 
3237 	ioc->ring_buffer_sz = size;
3238 	return snprintf(buf, PAGE_SIZE, "%d\n", size);
3239 }
3240 static DEVICE_ATTR_RO(host_trace_buffer_size);
3241 
3242 /**
3243  * host_trace_buffer_show - firmware ring buffer (trace only)
3244  * @cdev: pointer to embedded class device
3245  * @attr: ?
3246  * @buf: the buffer returned
3247  *
3248  * A sysfs 'read/write' shost attribute.
3249  *
3250  * You will only be able to read 4k bytes of ring buffer at a time.
3251  * In order to read beyond 4k bytes, you will have to write out the
3252  * offset to the same attribute, it will move the pointer.
3253  */
3254 static ssize_t
3255 host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3256 	char *buf)
3257 {
3258 	struct Scsi_Host *shost = class_to_shost(cdev);
3259 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3260 	void *request_data;
3261 	u32 size;
3262 
3263 	if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3264 		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3265 			__func__);
3266 		return 0;
3267 	}
3268 
3269 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3270 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3271 		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3272 			__func__);
3273 		return 0;
3274 	}
3275 
3276 	if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3277 		return 0;
3278 
3279 	size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3280 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3281 	request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3282 	memcpy(buf, request_data, size);
3283 	return size;
3284 }
3285 
3286 static ssize_t
3287 host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3288 	const char *buf, size_t count)
3289 {
3290 	struct Scsi_Host *shost = class_to_shost(cdev);
3291 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3292 	int val = 0;
3293 
3294 	if (sscanf(buf, "%d", &val) != 1)
3295 		return -EINVAL;
3296 
3297 	ioc->ring_buffer_offset = val;
3298 	return strlen(buf);
3299 }
3300 static DEVICE_ATTR_RW(host_trace_buffer);
3301 
3302 
3303 /*****************************************/
3304 
3305 /**
3306  * host_trace_buffer_enable_show - firmware ring buffer (trace only)
3307  * @cdev: pointer to embedded class device
3308  * @attr: ?
3309  * @buf: the buffer returned
3310  *
3311  * A sysfs 'read/write' shost attribute.
3312  *
3313  * This is a mechnism to post/release host_trace_buffers
3314  */
3315 static ssize_t
3316 host_trace_buffer_enable_show(struct device *cdev,
3317 	struct device_attribute *attr, char *buf)
3318 {
3319 	struct Scsi_Host *shost = class_to_shost(cdev);
3320 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3321 
3322 	if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3323 	   ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3324 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3325 		return snprintf(buf, PAGE_SIZE, "off\n");
3326 	else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3327 	    MPT3_DIAG_BUFFER_IS_RELEASED))
3328 		return snprintf(buf, PAGE_SIZE, "release\n");
3329 	else
3330 		return snprintf(buf, PAGE_SIZE, "post\n");
3331 }
3332 
3333 static ssize_t
3334 host_trace_buffer_enable_store(struct device *cdev,
3335 	struct device_attribute *attr, const char *buf, size_t count)
3336 {
3337 	struct Scsi_Host *shost = class_to_shost(cdev);
3338 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3339 	char str[10] = "";
3340 	struct mpt3_diag_register diag_register;
3341 	u8 issue_reset = 0;
3342 
3343 	/* don't allow post/release occurr while recovery is active */
3344 	if (ioc->shost_recovery || ioc->remove_host ||
3345 	    ioc->pci_error_recovery || ioc->is_driver_loading)
3346 		return -EBUSY;
3347 
3348 	if (sscanf(buf, "%9s", str) != 1)
3349 		return -EINVAL;
3350 
3351 	if (!strcmp(str, "post")) {
3352 		/* exit out if host buffers are already posted */
3353 		if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3354 		    (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3355 		    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3356 		    ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3357 		    MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3358 			goto out;
3359 		memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3360 		ioc_info(ioc, "posting host trace buffers\n");
3361 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3362 
3363 		if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
3364 		    ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
3365 			/* post the same buffer allocated previously */
3366 			diag_register.requested_buffer_size =
3367 			    ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
3368 		} else {
3369 			/*
3370 			 * Free the diag buffer memory which was previously
3371 			 * allocated by an application.
3372 			 */
3373 			if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
3374 			    &&
3375 			    (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3376 			    MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
3377 				pci_free_consistent(ioc->pdev,
3378 				    ioc->diag_buffer_sz[
3379 				    MPI2_DIAG_BUF_TYPE_TRACE],
3380 				    ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
3381 				    ioc->diag_buffer_dma[
3382 				    MPI2_DIAG_BUF_TYPE_TRACE]);
3383 				ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
3384 				    NULL;
3385 			}
3386 
3387 			diag_register.requested_buffer_size = (1024 * 1024);
3388 		}
3389 
3390 		diag_register.unique_id =
3391 		    (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
3392 		    (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
3393 		ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3394 		_ctl_diag_register_2(ioc,  &diag_register);
3395 		if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3396 		    MPT3_DIAG_BUFFER_IS_REGISTERED) {
3397 			ioc_info(ioc,
3398 			    "Trace buffer %d KB allocated through sysfs\n",
3399 			    diag_register.requested_buffer_size>>10);
3400 			if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
3401 				ioc->diag_buffer_status[
3402 				    MPI2_DIAG_BUF_TYPE_TRACE] |=
3403 				    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
3404 		}
3405 	} else if (!strcmp(str, "release")) {
3406 		/* exit out if host buffers are already released */
3407 		if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3408 			goto out;
3409 		if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3410 		    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3411 			goto out;
3412 		if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3413 		    MPT3_DIAG_BUFFER_IS_RELEASED))
3414 			goto out;
3415 		ioc_info(ioc, "releasing host trace buffer\n");
3416 		mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3417 		    &issue_reset);
3418 	}
3419 
3420  out:
3421 	return strlen(buf);
3422 }
3423 static DEVICE_ATTR_RW(host_trace_buffer_enable);
3424 
3425 /*********** diagnostic trigger suppport *********************************/
3426 
3427 /**
3428  * diag_trigger_master_show - show the diag_trigger_master attribute
3429  * @cdev: pointer to embedded class device
3430  * @attr: ?
3431  * @buf: the buffer returned
3432  *
3433  * A sysfs 'read/write' shost attribute.
3434  */
3435 static ssize_t
3436 diag_trigger_master_show(struct device *cdev,
3437 	struct device_attribute *attr, char *buf)
3438 
3439 {
3440 	struct Scsi_Host *shost = class_to_shost(cdev);
3441 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3442 	unsigned long flags;
3443 	ssize_t rc;
3444 
3445 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3446 	rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3447 	memcpy(buf, &ioc->diag_trigger_master, rc);
3448 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3449 	return rc;
3450 }
3451 
3452 /**
3453  * diag_trigger_master_store - store the diag_trigger_master attribute
3454  * @cdev: pointer to embedded class device
3455  * @attr: ?
3456  * @buf: the buffer returned
3457  * @count: ?
3458  *
3459  * A sysfs 'read/write' shost attribute.
3460  */
3461 static ssize_t
3462 diag_trigger_master_store(struct device *cdev,
3463 	struct device_attribute *attr, const char *buf, size_t count)
3464 
3465 {
3466 	struct Scsi_Host *shost = class_to_shost(cdev);
3467 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3468 	unsigned long flags;
3469 	ssize_t rc;
3470 
3471 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3472 	rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3473 	memset(&ioc->diag_trigger_master, 0,
3474 	    sizeof(struct SL_WH_MASTER_TRIGGER_T));
3475 	memcpy(&ioc->diag_trigger_master, buf, rc);
3476 	ioc->diag_trigger_master.MasterData |=
3477 	    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3478 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3479 	return rc;
3480 }
3481 static DEVICE_ATTR_RW(diag_trigger_master);
3482 
3483 
3484 /**
3485  * diag_trigger_event_show - show the diag_trigger_event attribute
3486  * @cdev: pointer to embedded class device
3487  * @attr: ?
3488  * @buf: the buffer returned
3489  *
3490  * A sysfs 'read/write' shost attribute.
3491  */
3492 static ssize_t
3493 diag_trigger_event_show(struct device *cdev,
3494 	struct device_attribute *attr, char *buf)
3495 {
3496 	struct Scsi_Host *shost = class_to_shost(cdev);
3497 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3498 	unsigned long flags;
3499 	ssize_t rc;
3500 
3501 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3502 	rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3503 	memcpy(buf, &ioc->diag_trigger_event, rc);
3504 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3505 	return rc;
3506 }
3507 
3508 /**
3509  * diag_trigger_event_store - store the diag_trigger_event attribute
3510  * @cdev: pointer to embedded class device
3511  * @attr: ?
3512  * @buf: the buffer returned
3513  * @count: ?
3514  *
3515  * A sysfs 'read/write' shost attribute.
3516  */
3517 static ssize_t
3518 diag_trigger_event_store(struct device *cdev,
3519 	struct device_attribute *attr, const char *buf, size_t count)
3520 
3521 {
3522 	struct Scsi_Host *shost = class_to_shost(cdev);
3523 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3524 	unsigned long flags;
3525 	ssize_t sz;
3526 
3527 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3528 	sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3529 	memset(&ioc->diag_trigger_event, 0,
3530 	    sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3531 	memcpy(&ioc->diag_trigger_event, buf, sz);
3532 	if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3533 		ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3534 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3535 	return sz;
3536 }
3537 static DEVICE_ATTR_RW(diag_trigger_event);
3538 
3539 
3540 /**
3541  * diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3542  * @cdev: pointer to embedded class device
3543  * @attr: ?
3544  * @buf: the buffer returned
3545  *
3546  * A sysfs 'read/write' shost attribute.
3547  */
3548 static ssize_t
3549 diag_trigger_scsi_show(struct device *cdev,
3550 	struct device_attribute *attr, char *buf)
3551 {
3552 	struct Scsi_Host *shost = class_to_shost(cdev);
3553 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3554 	unsigned long flags;
3555 	ssize_t rc;
3556 
3557 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3558 	rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3559 	memcpy(buf, &ioc->diag_trigger_scsi, rc);
3560 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3561 	return rc;
3562 }
3563 
3564 /**
3565  * diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3566  * @cdev: pointer to embedded class device
3567  * @attr: ?
3568  * @buf: the buffer returned
3569  * @count: ?
3570  *
3571  * A sysfs 'read/write' shost attribute.
3572  */
3573 static ssize_t
3574 diag_trigger_scsi_store(struct device *cdev,
3575 	struct device_attribute *attr, const char *buf, size_t count)
3576 {
3577 	struct Scsi_Host *shost = class_to_shost(cdev);
3578 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3579 	unsigned long flags;
3580 	ssize_t sz;
3581 
3582 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3583 	sz = min(sizeof(ioc->diag_trigger_scsi), count);
3584 	memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
3585 	memcpy(&ioc->diag_trigger_scsi, buf, sz);
3586 	if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3587 		ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3588 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3589 	return sz;
3590 }
3591 static DEVICE_ATTR_RW(diag_trigger_scsi);
3592 
3593 
3594 /**
3595  * diag_trigger_scsi_show - show the diag_trigger_mpi attribute
3596  * @cdev: pointer to embedded class device
3597  * @attr: ?
3598  * @buf: the buffer returned
3599  *
3600  * A sysfs 'read/write' shost attribute.
3601  */
3602 static ssize_t
3603 diag_trigger_mpi_show(struct device *cdev,
3604 	struct device_attribute *attr, char *buf)
3605 {
3606 	struct Scsi_Host *shost = class_to_shost(cdev);
3607 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3608 	unsigned long flags;
3609 	ssize_t rc;
3610 
3611 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3612 	rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3613 	memcpy(buf, &ioc->diag_trigger_mpi, rc);
3614 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3615 	return rc;
3616 }
3617 
3618 /**
3619  * diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3620  * @cdev: pointer to embedded class device
3621  * @attr: ?
3622  * @buf: the buffer returned
3623  * @count: ?
3624  *
3625  * A sysfs 'read/write' shost attribute.
3626  */
3627 static ssize_t
3628 diag_trigger_mpi_store(struct device *cdev,
3629 	struct device_attribute *attr, const char *buf, size_t count)
3630 {
3631 	struct Scsi_Host *shost = class_to_shost(cdev);
3632 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3633 	unsigned long flags;
3634 	ssize_t sz;
3635 
3636 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3637 	sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3638 	memset(&ioc->diag_trigger_mpi, 0,
3639 	    sizeof(ioc->diag_trigger_mpi));
3640 	memcpy(&ioc->diag_trigger_mpi, buf, sz);
3641 	if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3642 		ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3643 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3644 	return sz;
3645 }
3646 
3647 static DEVICE_ATTR_RW(diag_trigger_mpi);
3648 
3649 /*********** diagnostic trigger suppport *** END ****************************/
3650 
3651 /*****************************************/
3652 
3653 /**
3654  * drv_support_bitmap_show - driver supported feature bitmap
3655  * @cdev - pointer to embedded class device
3656  * @buf - the buffer returned
3657  *
3658  * A sysfs 'read-only' shost attribute.
3659  */
3660 static ssize_t
3661 drv_support_bitmap_show(struct device *cdev,
3662 	struct device_attribute *attr, char *buf)
3663 {
3664 	struct Scsi_Host *shost = class_to_shost(cdev);
3665 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3666 
3667 	return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
3668 }
3669 static DEVICE_ATTR_RO(drv_support_bitmap);
3670 
3671 /**
3672  * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
3673  * @cdev - pointer to embedded class device
3674  * @buf - the buffer returned
3675  *
3676  * A sysfs read/write shost attribute. This attribute is used to set the
3677  * targets queue depth to HBA IO queue depth if this attribute is enabled.
3678  */
3679 static ssize_t
3680 enable_sdev_max_qd_show(struct device *cdev,
3681 	struct device_attribute *attr, char *buf)
3682 {
3683 	struct Scsi_Host *shost = class_to_shost(cdev);
3684 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3685 
3686 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
3687 }
3688 
3689 /**
3690  * enable_sdev_max_qd_store - Enable/disable sdev max qd
3691  * @cdev - pointer to embedded class device
3692  * @buf - the buffer returned
3693  *
3694  * A sysfs read/write shost attribute. This attribute is used to set the
3695  * targets queue depth to HBA IO queue depth if this attribute is enabled.
3696  * If this attribute is disabled then targets will have corresponding default
3697  * queue depth.
3698  */
3699 static ssize_t
3700 enable_sdev_max_qd_store(struct device *cdev,
3701 	struct device_attribute *attr, const char *buf, size_t count)
3702 {
3703 	struct Scsi_Host *shost = class_to_shost(cdev);
3704 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3705 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3706 	struct MPT3SAS_TARGET *sas_target_priv_data;
3707 	int val = 0;
3708 	struct scsi_device *sdev;
3709 	struct _raid_device *raid_device;
3710 	int qdepth;
3711 
3712 	if (kstrtoint(buf, 0, &val) != 0)
3713 		return -EINVAL;
3714 
3715 	switch (val) {
3716 	case 0:
3717 		ioc->enable_sdev_max_qd = 0;
3718 		shost_for_each_device(sdev, ioc->shost) {
3719 			sas_device_priv_data = sdev->hostdata;
3720 			if (!sas_device_priv_data)
3721 				continue;
3722 			sas_target_priv_data = sas_device_priv_data->sas_target;
3723 			if (!sas_target_priv_data)
3724 				continue;
3725 
3726 			if (sas_target_priv_data->flags &
3727 			    MPT_TARGET_FLAGS_VOLUME) {
3728 				raid_device =
3729 				    mpt3sas_raid_device_find_by_handle(ioc,
3730 				    sas_target_priv_data->handle);
3731 
3732 				switch (raid_device->volume_type) {
3733 				case MPI2_RAID_VOL_TYPE_RAID0:
3734 					if (raid_device->device_info &
3735 					    MPI2_SAS_DEVICE_INFO_SSP_TARGET)
3736 						qdepth =
3737 						    MPT3SAS_SAS_QUEUE_DEPTH;
3738 					else
3739 						qdepth =
3740 						    MPT3SAS_SATA_QUEUE_DEPTH;
3741 					break;
3742 				case MPI2_RAID_VOL_TYPE_RAID1E:
3743 				case MPI2_RAID_VOL_TYPE_RAID1:
3744 				case MPI2_RAID_VOL_TYPE_RAID10:
3745 				case MPI2_RAID_VOL_TYPE_UNKNOWN:
3746 				default:
3747 					qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
3748 				}
3749 			} else if (sas_target_priv_data->flags &
3750 			    MPT_TARGET_FLAGS_PCIE_DEVICE)
3751 				qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
3752 			else
3753 				qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
3754 
3755 			mpt3sas_scsih_change_queue_depth(sdev, qdepth);
3756 		}
3757 		break;
3758 	case 1:
3759 		ioc->enable_sdev_max_qd = 1;
3760 		shost_for_each_device(sdev, ioc->shost)
3761 			mpt3sas_scsih_change_queue_depth(sdev,
3762 			    shost->can_queue);
3763 		break;
3764 	default:
3765 		return -EINVAL;
3766 	}
3767 
3768 	return strlen(buf);
3769 }
3770 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3771 
3772 struct device_attribute *mpt3sas_host_attrs[] = {
3773 	&dev_attr_version_fw,
3774 	&dev_attr_version_bios,
3775 	&dev_attr_version_mpi,
3776 	&dev_attr_version_product,
3777 	&dev_attr_version_nvdata_persistent,
3778 	&dev_attr_version_nvdata_default,
3779 	&dev_attr_board_name,
3780 	&dev_attr_board_assembly,
3781 	&dev_attr_board_tracer,
3782 	&dev_attr_io_delay,
3783 	&dev_attr_device_delay,
3784 	&dev_attr_logging_level,
3785 	&dev_attr_fwfault_debug,
3786 	&dev_attr_fw_queue_depth,
3787 	&dev_attr_host_sas_address,
3788 	&dev_attr_ioc_reset_count,
3789 	&dev_attr_host_trace_buffer_size,
3790 	&dev_attr_host_trace_buffer,
3791 	&dev_attr_host_trace_buffer_enable,
3792 	&dev_attr_reply_queue_count,
3793 	&dev_attr_diag_trigger_master,
3794 	&dev_attr_diag_trigger_event,
3795 	&dev_attr_diag_trigger_scsi,
3796 	&dev_attr_diag_trigger_mpi,
3797 	&dev_attr_drv_support_bitmap,
3798 	&dev_attr_BRM_status,
3799 	&dev_attr_enable_sdev_max_qd,
3800 	NULL,
3801 };
3802 
3803 /* device attributes */
3804 
3805 /**
3806  * sas_address_show - sas address
3807  * @dev: pointer to embedded class device
3808  * @attr: ?
3809  * @buf: the buffer returned
3810  *
3811  * This is the sas address for the target
3812  *
3813  * A sysfs 'read-only' shost attribute.
3814  */
3815 static ssize_t
3816 sas_address_show(struct device *dev, struct device_attribute *attr,
3817 	char *buf)
3818 {
3819 	struct scsi_device *sdev = to_scsi_device(dev);
3820 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3821 
3822 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3823 	    (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3824 }
3825 static DEVICE_ATTR_RO(sas_address);
3826 
3827 /**
3828  * sas_device_handle_show - device handle
3829  * @dev: pointer to embedded class device
3830  * @attr: ?
3831  * @buf: the buffer returned
3832  *
3833  * This is the firmware assigned device handle
3834  *
3835  * A sysfs 'read-only' shost attribute.
3836  */
3837 static ssize_t
3838 sas_device_handle_show(struct device *dev, struct device_attribute *attr,
3839 	char *buf)
3840 {
3841 	struct scsi_device *sdev = to_scsi_device(dev);
3842 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3843 
3844 	return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3845 	    sas_device_priv_data->sas_target->handle);
3846 }
3847 static DEVICE_ATTR_RO(sas_device_handle);
3848 
3849 /**
3850  * sas_ncq_io_prio_show - send prioritized io commands to device
3851  * @dev: pointer to embedded device
3852  * @attr: ?
3853  * @buf: the buffer returned
3854  *
3855  * A sysfs 'read/write' sdev attribute, only works with SATA
3856  */
3857 static ssize_t
3858 sas_ncq_prio_enable_show(struct device *dev,
3859 				 struct device_attribute *attr, char *buf)
3860 {
3861 	struct scsi_device *sdev = to_scsi_device(dev);
3862 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3863 
3864 	return snprintf(buf, PAGE_SIZE, "%d\n",
3865 			sas_device_priv_data->ncq_prio_enable);
3866 }
3867 
3868 static ssize_t
3869 sas_ncq_prio_enable_store(struct device *dev,
3870 				  struct device_attribute *attr,
3871 				  const char *buf, size_t count)
3872 {
3873 	struct scsi_device *sdev = to_scsi_device(dev);
3874 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3875 	bool ncq_prio_enable = 0;
3876 
3877 	if (kstrtobool(buf, &ncq_prio_enable))
3878 		return -EINVAL;
3879 
3880 	if (!scsih_ncq_prio_supp(sdev))
3881 		return -EINVAL;
3882 
3883 	sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3884 	return strlen(buf);
3885 }
3886 static DEVICE_ATTR_RW(sas_ncq_prio_enable);
3887 
3888 struct device_attribute *mpt3sas_dev_attrs[] = {
3889 	&dev_attr_sas_address,
3890 	&dev_attr_sas_device_handle,
3891 	&dev_attr_sas_ncq_prio_enable,
3892 	NULL,
3893 };
3894 
3895 /* file operations table for mpt3ctl device */
3896 static const struct file_operations ctl_fops = {
3897 	.owner = THIS_MODULE,
3898 	.unlocked_ioctl = _ctl_ioctl,
3899 	.poll = _ctl_poll,
3900 	.fasync = _ctl_fasync,
3901 #ifdef CONFIG_COMPAT
3902 	.compat_ioctl = _ctl_ioctl_compat,
3903 #endif
3904 };
3905 
3906 /* file operations table for mpt2ctl device */
3907 static const struct file_operations ctl_gen2_fops = {
3908 	.owner = THIS_MODULE,
3909 	.unlocked_ioctl = _ctl_mpt2_ioctl,
3910 	.poll = _ctl_poll,
3911 	.fasync = _ctl_fasync,
3912 #ifdef CONFIG_COMPAT
3913 	.compat_ioctl = _ctl_mpt2_ioctl_compat,
3914 #endif
3915 };
3916 
3917 static struct miscdevice ctl_dev = {
3918 	.minor  = MPT3SAS_MINOR,
3919 	.name   = MPT3SAS_DEV_NAME,
3920 	.fops   = &ctl_fops,
3921 };
3922 
3923 static struct miscdevice gen2_ctl_dev = {
3924 	.minor  = MPT2SAS_MINOR,
3925 	.name   = MPT2SAS_DEV_NAME,
3926 	.fops   = &ctl_gen2_fops,
3927 };
3928 
3929 /**
3930  * mpt3sas_ctl_init - main entry point for ctl.
3931  * @hbas_to_enumerate: ?
3932  */
3933 void
3934 mpt3sas_ctl_init(ushort hbas_to_enumerate)
3935 {
3936 	async_queue = NULL;
3937 
3938 	/* Don't register mpt3ctl ioctl device if
3939 	 * hbas_to_enumarate is one.
3940 	 */
3941 	if (hbas_to_enumerate != 1)
3942 		if (misc_register(&ctl_dev) < 0)
3943 			pr_err("%s can't register misc device [minor=%d]\n",
3944 			    MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
3945 
3946 	/* Don't register mpt3ctl ioctl device if
3947 	 * hbas_to_enumarate is two.
3948 	 */
3949 	if (hbas_to_enumerate != 2)
3950 		if (misc_register(&gen2_ctl_dev) < 0)
3951 			pr_err("%s can't register misc device [minor=%d]\n",
3952 			    MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
3953 
3954 	init_waitqueue_head(&ctl_poll_wait);
3955 }
3956 
3957 /**
3958  * mpt3sas_ctl_exit - exit point for ctl
3959  * @hbas_to_enumerate: ?
3960  */
3961 void
3962 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3963 {
3964 	struct MPT3SAS_ADAPTER *ioc;
3965 	int i;
3966 
3967 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
3968 
3969 		/* free memory associated to diag buffers */
3970 		for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
3971 			if (!ioc->diag_buffer[i])
3972 				continue;
3973 			dma_free_coherent(&ioc->pdev->dev,
3974 					  ioc->diag_buffer_sz[i],
3975 					  ioc->diag_buffer[i],
3976 					  ioc->diag_buffer_dma[i]);
3977 			ioc->diag_buffer[i] = NULL;
3978 			ioc->diag_buffer_status[i] = 0;
3979 		}
3980 
3981 		kfree(ioc->event_log);
3982 	}
3983 	if (hbas_to_enumerate != 1)
3984 		misc_deregister(&ctl_dev);
3985 	if (hbas_to_enumerate != 2)
3986 		misc_deregister(&gen2_ctl_dev);
3987 }
3988