1 /*
2  * Management Module Support for MPT (Message Passing Technology) based
3  * controllers
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30 
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45 
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
56 
57 #include <linux/io.h>
58 #include <linux/uaccess.h>
59 
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
62 
63 
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66 
67 
68 /**
69  * enum block_state - blocking state
70  * @NON_BLOCKING: non blocking
71  * @BLOCKING: blocking
72  *
73  * These states are for ioctls that need to wait for a response
74  * from firmware, so they probably require sleep.
75  */
76 enum block_state {
77 	NON_BLOCKING,
78 	BLOCKING,
79 };
80 
81 /**
82  * _ctl_display_some_debug - debug routine
83  * @ioc: per adapter object
84  * @smid: system request message index
85  * @calling_function_name: string pass from calling function
86  * @mpi_reply: reply message frame
87  * Context: none.
88  *
89  * Function for displaying debug info helpful when debugging issues
90  * in this module.
91  */
92 static void
93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
94 	char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
95 {
96 	Mpi2ConfigRequest_t *mpi_request;
97 	char *desc = NULL;
98 
99 	if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
100 		return;
101 
102 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
103 	switch (mpi_request->Function) {
104 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
105 	{
106 		Mpi2SCSIIORequest_t *scsi_request =
107 		    (Mpi2SCSIIORequest_t *)mpi_request;
108 
109 		snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110 		    "scsi_io, cmd(0x%02x), cdb_len(%d)",
111 		    scsi_request->CDB.CDB32[0],
112 		    le16_to_cpu(scsi_request->IoFlags) & 0xF);
113 		desc = ioc->tmp_string;
114 		break;
115 	}
116 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
117 		desc = "task_mgmt";
118 		break;
119 	case MPI2_FUNCTION_IOC_INIT:
120 		desc = "ioc_init";
121 		break;
122 	case MPI2_FUNCTION_IOC_FACTS:
123 		desc = "ioc_facts";
124 		break;
125 	case MPI2_FUNCTION_CONFIG:
126 	{
127 		Mpi2ConfigRequest_t *config_request =
128 		    (Mpi2ConfigRequest_t *)mpi_request;
129 
130 		snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131 		    "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132 		    (config_request->Header.PageType &
133 		     MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134 		    config_request->Header.PageNumber);
135 		desc = ioc->tmp_string;
136 		break;
137 	}
138 	case MPI2_FUNCTION_PORT_FACTS:
139 		desc = "port_facts";
140 		break;
141 	case MPI2_FUNCTION_PORT_ENABLE:
142 		desc = "port_enable";
143 		break;
144 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
145 		desc = "event_notification";
146 		break;
147 	case MPI2_FUNCTION_FW_DOWNLOAD:
148 		desc = "fw_download";
149 		break;
150 	case MPI2_FUNCTION_FW_UPLOAD:
151 		desc = "fw_upload";
152 		break;
153 	case MPI2_FUNCTION_RAID_ACTION:
154 		desc = "raid_action";
155 		break;
156 	case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
157 	{
158 		Mpi2SCSIIORequest_t *scsi_request =
159 		    (Mpi2SCSIIORequest_t *)mpi_request;
160 
161 		snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162 		    "raid_pass, cmd(0x%02x), cdb_len(%d)",
163 		    scsi_request->CDB.CDB32[0],
164 		    le16_to_cpu(scsi_request->IoFlags) & 0xF);
165 		desc = ioc->tmp_string;
166 		break;
167 	}
168 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169 		desc = "sas_iounit_cntl";
170 		break;
171 	case MPI2_FUNCTION_SATA_PASSTHROUGH:
172 		desc = "sata_pass";
173 		break;
174 	case MPI2_FUNCTION_DIAG_BUFFER_POST:
175 		desc = "diag_buffer_post";
176 		break;
177 	case MPI2_FUNCTION_DIAG_RELEASE:
178 		desc = "diag_release";
179 		break;
180 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
181 		desc = "smp_passthrough";
182 		break;
183 	}
184 
185 	if (!desc)
186 		return;
187 
188 	pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
189 	    ioc->name, calling_function_name, desc, smid);
190 
191 	if (!mpi_reply)
192 		return;
193 
194 	if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
195 		pr_info(MPT3SAS_FMT
196 		    "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
197 		    ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
198 		    le32_to_cpu(mpi_reply->IOCLogInfo));
199 
200 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
201 	    mpi_request->Function ==
202 	    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
203 		Mpi2SCSIIOReply_t *scsi_reply =
204 		    (Mpi2SCSIIOReply_t *)mpi_reply;
205 		struct _sas_device *sas_device = NULL;
206 		struct _pcie_device *pcie_device = NULL;
207 
208 		sas_device = mpt3sas_get_sdev_by_handle(ioc,
209 		    le16_to_cpu(scsi_reply->DevHandle));
210 		if (sas_device) {
211 			pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
212 				ioc->name, (unsigned long long)
213 			    sas_device->sas_address, sas_device->phy);
214 			pr_warn(MPT3SAS_FMT
215 			    "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
216 			    ioc->name, (unsigned long long)
217 			    sas_device->enclosure_logical_id, sas_device->slot);
218 			sas_device_put(sas_device);
219 		}
220 		if (!sas_device) {
221 			pcie_device = mpt3sas_get_pdev_by_handle(ioc,
222 				le16_to_cpu(scsi_reply->DevHandle));
223 			if (pcie_device) {
224 				pr_warn(MPT3SAS_FMT
225 				    "\tWWID(0x%016llx), port(%d)\n", ioc->name,
226 				    (unsigned long long)pcie_device->wwid,
227 				    pcie_device->port_num);
228 				if (pcie_device->enclosure_handle != 0)
229 					pr_warn(MPT3SAS_FMT
230 					    "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
231 					    ioc->name, (unsigned long long)
232 					    pcie_device->enclosure_logical_id,
233 					    pcie_device->slot);
234 				pcie_device_put(pcie_device);
235 			}
236 		}
237 		if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
238 			pr_info(MPT3SAS_FMT
239 			    "\tscsi_state(0x%02x), scsi_status"
240 			    "(0x%02x)\n", ioc->name,
241 			    scsi_reply->SCSIState,
242 			    scsi_reply->SCSIStatus);
243 	}
244 }
245 
246 /**
247  * mpt3sas_ctl_done - ctl module completion routine
248  * @ioc: per adapter object
249  * @smid: system request message index
250  * @msix_index: MSIX table index supplied by the OS
251  * @reply: reply message frame(lower 32bit addr)
252  * Context: none.
253  *
254  * The callback handler when using ioc->ctl_cb_idx.
255  *
256  * Return 1 meaning mf should be freed from _base_interrupt
257  *        0 means the mf is freed from this function.
258  */
259 u8
260 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
261 	u32 reply)
262 {
263 	MPI2DefaultReply_t *mpi_reply;
264 	Mpi2SCSIIOReply_t *scsiio_reply;
265 	Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
266 	const void *sense_data;
267 	u32 sz;
268 
269 	if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
270 		return 1;
271 	if (ioc->ctl_cmds.smid != smid)
272 		return 1;
273 	ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
274 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
275 	if (mpi_reply) {
276 		memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
277 		ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
278 		/* get sense data */
279 		if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
280 		    mpi_reply->Function ==
281 		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
282 			scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
283 			if (scsiio_reply->SCSIState &
284 			    MPI2_SCSI_STATE_AUTOSENSE_VALID) {
285 				sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
286 				    le32_to_cpu(scsiio_reply->SenseCount));
287 				sense_data = mpt3sas_base_get_sense_buffer(ioc,
288 				    smid);
289 				memcpy(ioc->ctl_cmds.sense, sense_data, sz);
290 			}
291 		}
292 		/*
293 		 * Get Error Response data for NVMe device. The ctl_cmds.sense
294 		 * buffer is used to store the Error Response data.
295 		 */
296 		if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
297 			nvme_error_reply =
298 			    (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
299 			sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
300 			    le32_to_cpu(nvme_error_reply->ErrorResponseCount));
301 			sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
302 			memcpy(ioc->ctl_cmds.sense, sense_data, sz);
303 		}
304 	}
305 
306 	_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
307 	ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
308 	complete(&ioc->ctl_cmds.done);
309 	return 1;
310 }
311 
312 /**
313  * _ctl_check_event_type - determines when an event needs logging
314  * @ioc: per adapter object
315  * @event: firmware event
316  *
317  * The bitmask in ioc->event_type[] indicates which events should be
318  * be saved in the driver event_log.  This bitmask is set by application.
319  *
320  * Returns 1 when event should be captured, or zero means no match.
321  */
322 static int
323 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
324 {
325 	u16 i;
326 	u32 desired_event;
327 
328 	if (event >= 128 || !event || !ioc->event_log)
329 		return 0;
330 
331 	desired_event = (1 << (event % 32));
332 	if (!desired_event)
333 		desired_event = 1;
334 	i = event / 32;
335 	return desired_event & ioc->event_type[i];
336 }
337 
338 /**
339  * mpt3sas_ctl_add_to_event_log - add event
340  * @ioc: per adapter object
341  * @mpi_reply: reply message frame
342  *
343  * Return nothing.
344  */
345 void
346 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
347 	Mpi2EventNotificationReply_t *mpi_reply)
348 {
349 	struct MPT3_IOCTL_EVENTS *event_log;
350 	u16 event;
351 	int i;
352 	u32 sz, event_data_sz;
353 	u8 send_aen = 0;
354 
355 	if (!ioc->event_log)
356 		return;
357 
358 	event = le16_to_cpu(mpi_reply->Event);
359 
360 	if (_ctl_check_event_type(ioc, event)) {
361 
362 		/* insert entry into circular event_log */
363 		i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
364 		event_log = ioc->event_log;
365 		event_log[i].event = event;
366 		event_log[i].context = ioc->event_context++;
367 
368 		event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
369 		sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
370 		memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
371 		memcpy(event_log[i].data, mpi_reply->EventData, sz);
372 		send_aen = 1;
373 	}
374 
375 	/* This aen_event_read_flag flag is set until the
376 	 * application has read the event log.
377 	 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
378 	 */
379 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
380 	    (send_aen && !ioc->aen_event_read_flag)) {
381 		ioc->aen_event_read_flag = 1;
382 		wake_up_interruptible(&ctl_poll_wait);
383 		if (async_queue)
384 			kill_fasync(&async_queue, SIGIO, POLL_IN);
385 	}
386 }
387 
388 /**
389  * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
390  * @ioc: per adapter object
391  * @msix_index: MSIX table index supplied by the OS
392  * @reply: reply message frame(lower 32bit addr)
393  * Context: interrupt.
394  *
395  * This function merely adds a new work task into ioc->firmware_event_thread.
396  * The tasks are worked from _firmware_event_work in user context.
397  *
398  * Return 1 meaning mf should be freed from _base_interrupt
399  *        0 means the mf is freed from this function.
400  */
401 u8
402 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
403 	u32 reply)
404 {
405 	Mpi2EventNotificationReply_t *mpi_reply;
406 
407 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
408 	if (mpi_reply)
409 		mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
410 	return 1;
411 }
412 
413 /**
414  * _ctl_verify_adapter - validates ioc_number passed from application
415  * @ioc: per adapter object
416  * @iocpp: The ioc pointer is returned in this.
417  * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
418  * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
419  *
420  * Return (-1) means error, else ioc_number.
421  */
422 static int
423 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
424 							int mpi_version)
425 {
426 	struct MPT3SAS_ADAPTER *ioc;
427 	int version = 0;
428 	/* global ioc lock to protect controller on list operations */
429 	spin_lock(&gioc_lock);
430 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
431 		if (ioc->id != ioc_number)
432 			continue;
433 		/* Check whether this ioctl command is from right
434 		 * ioctl device or not, if not continue the search.
435 		 */
436 		version = ioc->hba_mpi_version_belonged;
437 		/* MPI25_VERSION and MPI26_VERSION uses same ioctl
438 		 * device.
439 		 */
440 		if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
441 			if ((version == MPI25_VERSION) ||
442 				(version == MPI26_VERSION))
443 				goto out;
444 			else
445 				continue;
446 		} else {
447 			if (version != mpi_version)
448 				continue;
449 		}
450 out:
451 		spin_unlock(&gioc_lock);
452 		*iocpp = ioc;
453 		return ioc_number;
454 	}
455 	spin_unlock(&gioc_lock);
456 	*iocpp = NULL;
457 	return -1;
458 }
459 
460 /**
461  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
462  * @ioc: per adapter object
463  * @reset_phase: phase
464  *
465  * The handler for doing any required cleanup or initialization.
466  *
467  * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
468  * MPT3_IOC_DONE_RESET
469  */
470 void
471 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
472 {
473 	int i;
474 	u8 issue_reset;
475 
476 	switch (reset_phase) {
477 	case MPT3_IOC_PRE_RESET:
478 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
479 			"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
480 		for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
481 			if (!(ioc->diag_buffer_status[i] &
482 			    MPT3_DIAG_BUFFER_IS_REGISTERED))
483 				continue;
484 			if ((ioc->diag_buffer_status[i] &
485 			    MPT3_DIAG_BUFFER_IS_RELEASED))
486 				continue;
487 			mpt3sas_send_diag_release(ioc, i, &issue_reset);
488 		}
489 		break;
490 	case MPT3_IOC_AFTER_RESET:
491 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
492 			"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
493 		if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
494 			ioc->ctl_cmds.status |= MPT3_CMD_RESET;
495 			mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
496 			complete(&ioc->ctl_cmds.done);
497 		}
498 		break;
499 	case MPT3_IOC_DONE_RESET:
500 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
501 			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
502 
503 		for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
504 			if (!(ioc->diag_buffer_status[i] &
505 			    MPT3_DIAG_BUFFER_IS_REGISTERED))
506 				continue;
507 			if ((ioc->diag_buffer_status[i] &
508 			    MPT3_DIAG_BUFFER_IS_RELEASED))
509 				continue;
510 			ioc->diag_buffer_status[i] |=
511 			    MPT3_DIAG_BUFFER_IS_DIAG_RESET;
512 		}
513 		break;
514 	}
515 }
516 
517 /**
518  * _ctl_fasync -
519  * @fd -
520  * @filep -
521  * @mode -
522  *
523  * Called when application request fasyn callback handler.
524  */
525 static int
526 _ctl_fasync(int fd, struct file *filep, int mode)
527 {
528 	return fasync_helper(fd, filep, mode, &async_queue);
529 }
530 
531 /**
532  * _ctl_poll -
533  * @file -
534  * @wait -
535  *
536  */
537 static __poll_t
538 _ctl_poll(struct file *filep, poll_table *wait)
539 {
540 	struct MPT3SAS_ADAPTER *ioc;
541 
542 	poll_wait(filep, &ctl_poll_wait, wait);
543 
544 	/* global ioc lock to protect controller on list operations */
545 	spin_lock(&gioc_lock);
546 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
547 		if (ioc->aen_event_read_flag) {
548 			spin_unlock(&gioc_lock);
549 			return EPOLLIN | EPOLLRDNORM;
550 		}
551 	}
552 	spin_unlock(&gioc_lock);
553 	return 0;
554 }
555 
556 /**
557  * _ctl_set_task_mid - assign an active smid to tm request
558  * @ioc: per adapter object
559  * @karg - (struct mpt3_ioctl_command)
560  * @tm_request - pointer to mf from user space
561  *
562  * Returns 0 when an smid if found, else fail.
563  * during failure, the reply frame is filled.
564  */
565 static int
566 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
567 	Mpi2SCSITaskManagementRequest_t *tm_request)
568 {
569 	u8 found = 0;
570 	u16 smid;
571 	u16 handle;
572 	struct scsi_cmnd *scmd;
573 	struct MPT3SAS_DEVICE *priv_data;
574 	Mpi2SCSITaskManagementReply_t *tm_reply;
575 	u32 sz;
576 	u32 lun;
577 	char *desc = NULL;
578 
579 	if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
580 		desc = "abort_task";
581 	else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
582 		desc = "query_task";
583 	else
584 		return 0;
585 
586 	lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
587 
588 	handle = le16_to_cpu(tm_request->DevHandle);
589 	for (smid = ioc->scsiio_depth; smid && !found; smid--) {
590 		struct scsiio_tracker *st;
591 
592 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
593 		if (!scmd)
594 			continue;
595 		if (lun != scmd->device->lun)
596 			continue;
597 		priv_data = scmd->device->hostdata;
598 		if (priv_data->sas_target == NULL)
599 			continue;
600 		if (priv_data->sas_target->handle != handle)
601 			continue;
602 		st = scsi_cmd_priv(scmd);
603 		tm_request->TaskMID = cpu_to_le16(st->smid);
604 		found = 1;
605 	}
606 
607 	if (!found) {
608 		dctlprintk(ioc, pr_info(MPT3SAS_FMT
609 			"%s: handle(0x%04x), lun(%d), no active mid!!\n",
610 			ioc->name,
611 		    desc, le16_to_cpu(tm_request->DevHandle), lun));
612 		tm_reply = ioc->ctl_cmds.reply;
613 		tm_reply->DevHandle = tm_request->DevHandle;
614 		tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
615 		tm_reply->TaskType = tm_request->TaskType;
616 		tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
617 		tm_reply->VP_ID = tm_request->VP_ID;
618 		tm_reply->VF_ID = tm_request->VF_ID;
619 		sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
620 		if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
621 		    sz))
622 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
623 			    __LINE__, __func__);
624 		return 1;
625 	}
626 
627 	dctlprintk(ioc, pr_info(MPT3SAS_FMT
628 		"%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
629 	    desc, le16_to_cpu(tm_request->DevHandle), lun,
630 	     le16_to_cpu(tm_request->TaskMID)));
631 	return 0;
632 }
633 
634 /**
635  * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
636  * @ioc: per adapter object
637  * @karg - (struct mpt3_ioctl_command)
638  * @mf - pointer to mf in user space
639  */
640 static long
641 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
642 	void __user *mf)
643 {
644 	MPI2RequestHeader_t *mpi_request = NULL, *request;
645 	MPI2DefaultReply_t *mpi_reply;
646 	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
647 	u32 ioc_state;
648 	u16 smid;
649 	unsigned long timeout;
650 	u8 issue_reset;
651 	u32 sz, sz_arg;
652 	void *psge;
653 	void *data_out = NULL;
654 	dma_addr_t data_out_dma = 0;
655 	size_t data_out_sz = 0;
656 	void *data_in = NULL;
657 	dma_addr_t data_in_dma = 0;
658 	size_t data_in_sz = 0;
659 	long ret;
660 	u16 wait_state_count;
661 	u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
662 
663 	issue_reset = 0;
664 
665 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
666 		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
667 		    ioc->name, __func__);
668 		ret = -EAGAIN;
669 		goto out;
670 	}
671 
672 	wait_state_count = 0;
673 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
674 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
675 		if (wait_state_count++ == 10) {
676 			pr_err(MPT3SAS_FMT
677 			    "%s: failed due to ioc not operational\n",
678 			    ioc->name, __func__);
679 			ret = -EFAULT;
680 			goto out;
681 		}
682 		ssleep(1);
683 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
684 		pr_info(MPT3SAS_FMT
685 			"%s: waiting for operational state(count=%d)\n",
686 			ioc->name,
687 		    __func__, wait_state_count);
688 	}
689 	if (wait_state_count)
690 		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
691 		    ioc->name, __func__);
692 
693 	mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
694 	if (!mpi_request) {
695 		pr_err(MPT3SAS_FMT
696 			"%s: failed obtaining a memory for mpi_request\n",
697 			ioc->name, __func__);
698 		ret = -ENOMEM;
699 		goto out;
700 	}
701 
702 	/* Check for overflow and wraparound */
703 	if (karg.data_sge_offset * 4 > ioc->request_sz ||
704 	    karg.data_sge_offset > (UINT_MAX / 4)) {
705 		ret = -EINVAL;
706 		goto out;
707 	}
708 
709 	/* copy in request message frame from user */
710 	if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
711 		pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
712 		    __func__);
713 		ret = -EFAULT;
714 		goto out;
715 	}
716 
717 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
718 		smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
719 		if (!smid) {
720 			pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
721 			    ioc->name, __func__);
722 			ret = -EAGAIN;
723 			goto out;
724 		}
725 	} else {
726 		/* Use first reserved smid for passthrough ioctls */
727 		smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
728 	}
729 
730 	ret = 0;
731 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
732 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
733 	request = mpt3sas_base_get_msg_frame(ioc, smid);
734 	memcpy(request, mpi_request, karg.data_sge_offset*4);
735 	ioc->ctl_cmds.smid = smid;
736 	data_out_sz = karg.data_out_size;
737 	data_in_sz = karg.data_in_size;
738 
739 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
740 	    mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
741 	    mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
742 	    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
743 	    mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
744 
745 		device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
746 		if (!device_handle || (device_handle >
747 		    ioc->facts.MaxDevHandle)) {
748 			ret = -EINVAL;
749 			mpt3sas_base_free_smid(ioc, smid);
750 			goto out;
751 		}
752 	}
753 
754 	/* obtain dma-able memory for data transfer */
755 	if (data_out_sz) /* WRITE */ {
756 		data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
757 		    &data_out_dma);
758 		if (!data_out) {
759 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
760 			    __LINE__, __func__);
761 			ret = -ENOMEM;
762 			mpt3sas_base_free_smid(ioc, smid);
763 			goto out;
764 		}
765 		if (copy_from_user(data_out, karg.data_out_buf_ptr,
766 			data_out_sz)) {
767 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
768 			    __LINE__, __func__);
769 			ret =  -EFAULT;
770 			mpt3sas_base_free_smid(ioc, smid);
771 			goto out;
772 		}
773 	}
774 
775 	if (data_in_sz) /* READ */ {
776 		data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
777 		    &data_in_dma);
778 		if (!data_in) {
779 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
780 			    __LINE__, __func__);
781 			ret = -ENOMEM;
782 			mpt3sas_base_free_smid(ioc, smid);
783 			goto out;
784 		}
785 	}
786 
787 	psge = (void *)request + (karg.data_sge_offset*4);
788 
789 	/* send command to firmware */
790 	_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
791 
792 	init_completion(&ioc->ctl_cmds.done);
793 	switch (mpi_request->Function) {
794 	case MPI2_FUNCTION_NVME_ENCAPSULATED:
795 	{
796 		nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
797 		/*
798 		 * Get the Physical Address of the sense buffer.
799 		 * Use Error Response buffer address field to hold the sense
800 		 * buffer address.
801 		 * Clear the internal sense buffer, which will potentially hold
802 		 * the Completion Queue Entry on return, or 0 if no Entry.
803 		 * Build the PRPs and set direction bits.
804 		 * Send the request.
805 		 */
806 		nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma &
807 		    0xFFFFFFFF00000000;
808 		nvme_encap_request->ErrorResponseBaseAddress |=
809 		    (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid);
810 		nvme_encap_request->ErrorResponseAllocationLength =
811 						NVME_ERROR_RESPONSE_SIZE;
812 		memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
813 		ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
814 		    data_out_dma, data_out_sz, data_in_dma, data_in_sz);
815 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
816 			dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
817 			    "ioctl failed due to device removal in progress\n",
818 			    ioc->name, device_handle));
819 			mpt3sas_base_free_smid(ioc, smid);
820 			ret = -EINVAL;
821 			goto out;
822 		}
823 		ioc->put_smid_nvme_encap(ioc, smid);
824 		break;
825 	}
826 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
827 	case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
828 	{
829 		Mpi2SCSIIORequest_t *scsiio_request =
830 		    (Mpi2SCSIIORequest_t *)request;
831 		scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
832 		scsiio_request->SenseBufferLowAddress =
833 		    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
834 		memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
835 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
836 			dtmprintk(ioc, pr_info(MPT3SAS_FMT
837 				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
838 				ioc->name, device_handle));
839 			mpt3sas_base_free_smid(ioc, smid);
840 			ret = -EINVAL;
841 			goto out;
842 		}
843 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
844 		    data_in_dma, data_in_sz);
845 		if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
846 			ioc->put_smid_scsi_io(ioc, smid, device_handle);
847 		else
848 			ioc->put_smid_default(ioc, smid);
849 		break;
850 	}
851 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
852 	{
853 		Mpi2SCSITaskManagementRequest_t *tm_request =
854 		    (Mpi2SCSITaskManagementRequest_t *)request;
855 
856 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
857 			"TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
858 			ioc->name,
859 		    le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
860 		ioc->got_task_abort_from_ioctl = 1;
861 		if (tm_request->TaskType ==
862 		    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
863 		    tm_request->TaskType ==
864 		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
865 			if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
866 				mpt3sas_base_free_smid(ioc, smid);
867 				ioc->got_task_abort_from_ioctl = 0;
868 				goto out;
869 			}
870 		}
871 		ioc->got_task_abort_from_ioctl = 0;
872 
873 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
874 			dtmprintk(ioc, pr_info(MPT3SAS_FMT
875 				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
876 				ioc->name, device_handle));
877 			mpt3sas_base_free_smid(ioc, smid);
878 			ret = -EINVAL;
879 			goto out;
880 		}
881 		mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
882 		    tm_request->DevHandle));
883 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
884 		    data_in_dma, data_in_sz);
885 		ioc->put_smid_hi_priority(ioc, smid, 0);
886 		break;
887 	}
888 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
889 	{
890 		Mpi2SmpPassthroughRequest_t *smp_request =
891 		    (Mpi2SmpPassthroughRequest_t *)mpi_request;
892 		u8 *data;
893 
894 		/* ioc determines which port to use */
895 		smp_request->PhysicalPort = 0xFF;
896 		if (smp_request->PassthroughFlags &
897 		    MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
898 			data = (u8 *)&smp_request->SGL;
899 		else {
900 			if (unlikely(data_out == NULL)) {
901 				pr_err("failure at %s:%d/%s()!\n",
902 				    __FILE__, __LINE__, __func__);
903 				mpt3sas_base_free_smid(ioc, smid);
904 				ret = -EINVAL;
905 				goto out;
906 			}
907 			data = data_out;
908 		}
909 
910 		if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
911 			ioc->ioc_link_reset_in_progress = 1;
912 			ioc->ignore_loginfos = 1;
913 		}
914 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
915 		    data_in_sz);
916 		ioc->put_smid_default(ioc, smid);
917 		break;
918 	}
919 	case MPI2_FUNCTION_SATA_PASSTHROUGH:
920 	{
921 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
922 			dtmprintk(ioc, pr_info(MPT3SAS_FMT
923 				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
924 				ioc->name, device_handle));
925 			mpt3sas_base_free_smid(ioc, smid);
926 			ret = -EINVAL;
927 			goto out;
928 		}
929 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
930 		    data_in_sz);
931 		ioc->put_smid_default(ioc, smid);
932 		break;
933 	}
934 	case MPI2_FUNCTION_FW_DOWNLOAD:
935 	case MPI2_FUNCTION_FW_UPLOAD:
936 	{
937 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
938 		    data_in_sz);
939 		ioc->put_smid_default(ioc, smid);
940 		break;
941 	}
942 	case MPI2_FUNCTION_TOOLBOX:
943 	{
944 		Mpi2ToolboxCleanRequest_t *toolbox_request =
945 			(Mpi2ToolboxCleanRequest_t *)mpi_request;
946 
947 		if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
948 			ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
949 				data_in_dma, data_in_sz);
950 		} else {
951 			ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
952 				data_in_dma, data_in_sz);
953 		}
954 		ioc->put_smid_default(ioc, smid);
955 		break;
956 	}
957 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
958 	{
959 		Mpi2SasIoUnitControlRequest_t *sasiounit_request =
960 		    (Mpi2SasIoUnitControlRequest_t *)mpi_request;
961 
962 		if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
963 		    || sasiounit_request->Operation ==
964 		    MPI2_SAS_OP_PHY_LINK_RESET) {
965 			ioc->ioc_link_reset_in_progress = 1;
966 			ioc->ignore_loginfos = 1;
967 		}
968 		/* drop to default case for posting the request */
969 	}
970 	default:
971 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
972 		    data_in_dma, data_in_sz);
973 		ioc->put_smid_default(ioc, smid);
974 		break;
975 	}
976 
977 	if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
978 		timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
979 	else
980 		timeout = karg.timeout;
981 	wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
982 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
983 		Mpi2SCSITaskManagementRequest_t *tm_request =
984 		    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
985 		mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
986 		    tm_request->DevHandle));
987 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
988 	} else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
989 	    mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
990 		ioc->ioc_link_reset_in_progress) {
991 		ioc->ioc_link_reset_in_progress = 0;
992 		ioc->ignore_loginfos = 0;
993 	}
994 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
995 		pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
996 		    __func__);
997 		_debug_dump_mf(mpi_request, karg.data_sge_offset);
998 		if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
999 			issue_reset = 1;
1000 		goto issue_host_reset;
1001 	}
1002 
1003 	mpi_reply = ioc->ctl_cmds.reply;
1004 
1005 	if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
1006 	    (ioc->logging_level & MPT_DEBUG_TM)) {
1007 		Mpi2SCSITaskManagementReply_t *tm_reply =
1008 		    (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1009 
1010 		pr_info(MPT3SAS_FMT "TASK_MGMT: " \
1011 		    "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
1012 		    "TerminationCount(0x%08x)\n", ioc->name,
1013 		    le16_to_cpu(tm_reply->IOCStatus),
1014 		    le32_to_cpu(tm_reply->IOCLogInfo),
1015 		    le32_to_cpu(tm_reply->TerminationCount));
1016 	}
1017 
1018 	/* copy out xdata to user */
1019 	if (data_in_sz) {
1020 		if (copy_to_user(karg.data_in_buf_ptr, data_in,
1021 		    data_in_sz)) {
1022 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
1023 			    __LINE__, __func__);
1024 			ret = -ENODATA;
1025 			goto out;
1026 		}
1027 	}
1028 
1029 	/* copy out reply message frame to user */
1030 	if (karg.max_reply_bytes) {
1031 		sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1032 		if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1033 		    sz)) {
1034 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
1035 			    __LINE__, __func__);
1036 			ret = -ENODATA;
1037 			goto out;
1038 		}
1039 	}
1040 
1041 	/* copy out sense/NVMe Error Response to user */
1042 	if (karg.max_sense_bytes && (mpi_request->Function ==
1043 	    MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1044 	    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1045 	    MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1046 		if (karg.sense_data_ptr == NULL) {
1047 			pr_info(MPT3SAS_FMT "Response buffer provided"
1048 			    " by application is NULL; Response data will"
1049 			    " not be returned.\n", ioc->name);
1050 			goto out;
1051 		}
1052 		sz_arg = (mpi_request->Function ==
1053 		MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1054 							SCSI_SENSE_BUFFERSIZE;
1055 		sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1056 		if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1057 		    sz)) {
1058 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
1059 				__LINE__, __func__);
1060 			ret = -ENODATA;
1061 			goto out;
1062 		}
1063 	}
1064 
1065  issue_host_reset:
1066 	if (issue_reset) {
1067 		ret = -ENODATA;
1068 		if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1069 		    mpi_request->Function ==
1070 		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1071 		    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1072 			pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
1073 				ioc->name,
1074 				le16_to_cpu(mpi_request->FunctionDependent1));
1075 			mpt3sas_halt_firmware(ioc);
1076 			mpt3sas_scsih_issue_locked_tm(ioc,
1077 			    le16_to_cpu(mpi_request->FunctionDependent1), 0,
1078 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
1079 		} else
1080 			mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1081 	}
1082 
1083  out:
1084 
1085 	/* free memory associated with sg buffers */
1086 	if (data_in)
1087 		pci_free_consistent(ioc->pdev, data_in_sz, data_in,
1088 		    data_in_dma);
1089 
1090 	if (data_out)
1091 		pci_free_consistent(ioc->pdev, data_out_sz, data_out,
1092 		    data_out_dma);
1093 
1094 	kfree(mpi_request);
1095 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1096 	return ret;
1097 }
1098 
1099 /**
1100  * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1101  * @ioc: per adapter object
1102  * @arg - user space buffer containing ioctl content
1103  */
1104 static long
1105 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1106 {
1107 	struct mpt3_ioctl_iocinfo karg;
1108 
1109 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1110 	    __func__));
1111 
1112 	memset(&karg, 0 , sizeof(karg));
1113 	if (ioc->pfacts)
1114 		karg.port_number = ioc->pfacts[0].PortNumber;
1115 	karg.hw_rev = ioc->pdev->revision;
1116 	karg.pci_id = ioc->pdev->device;
1117 	karg.subsystem_device = ioc->pdev->subsystem_device;
1118 	karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1119 	karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1120 	karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1121 	karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1122 	karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1123 	karg.firmware_version = ioc->facts.FWVersion.Word;
1124 	strcpy(karg.driver_version, ioc->driver_name);
1125 	strcat(karg.driver_version, "-");
1126 	switch  (ioc->hba_mpi_version_belonged) {
1127 	case MPI2_VERSION:
1128 		if (ioc->is_warpdrive)
1129 			karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1130 		else
1131 			karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1132 		strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1133 		break;
1134 	case MPI25_VERSION:
1135 	case MPI26_VERSION:
1136 		if (ioc->is_gen35_ioc)
1137 			karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1138 		else
1139 			karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1140 		strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1141 		break;
1142 	}
1143 	karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1144 
1145 	if (copy_to_user(arg, &karg, sizeof(karg))) {
1146 		pr_err("failure at %s:%d/%s()!\n",
1147 		    __FILE__, __LINE__, __func__);
1148 		return -EFAULT;
1149 	}
1150 	return 0;
1151 }
1152 
1153 /**
1154  * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1155  * @ioc: per adapter object
1156  * @arg - user space buffer containing ioctl content
1157  */
1158 static long
1159 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1160 {
1161 	struct mpt3_ioctl_eventquery karg;
1162 
1163 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1164 		pr_err("failure at %s:%d/%s()!\n",
1165 		    __FILE__, __LINE__, __func__);
1166 		return -EFAULT;
1167 	}
1168 
1169 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1170 	    __func__));
1171 
1172 	karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1173 	memcpy(karg.event_types, ioc->event_type,
1174 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1175 
1176 	if (copy_to_user(arg, &karg, sizeof(karg))) {
1177 		pr_err("failure at %s:%d/%s()!\n",
1178 		    __FILE__, __LINE__, __func__);
1179 		return -EFAULT;
1180 	}
1181 	return 0;
1182 }
1183 
1184 /**
1185  * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1186  * @ioc: per adapter object
1187  * @arg - user space buffer containing ioctl content
1188  */
1189 static long
1190 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1191 {
1192 	struct mpt3_ioctl_eventenable karg;
1193 
1194 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1195 		pr_err("failure at %s:%d/%s()!\n",
1196 		    __FILE__, __LINE__, __func__);
1197 		return -EFAULT;
1198 	}
1199 
1200 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1201 	    __func__));
1202 
1203 	memcpy(ioc->event_type, karg.event_types,
1204 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1205 	mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1206 
1207 	if (ioc->event_log)
1208 		return 0;
1209 	/* initialize event_log */
1210 	ioc->event_context = 0;
1211 	ioc->aen_event_read_flag = 0;
1212 	ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1213 	    sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1214 	if (!ioc->event_log) {
1215 		pr_err("failure at %s:%d/%s()!\n",
1216 		    __FILE__, __LINE__, __func__);
1217 		return -ENOMEM;
1218 	}
1219 	return 0;
1220 }
1221 
1222 /**
1223  * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1224  * @ioc: per adapter object
1225  * @arg - user space buffer containing ioctl content
1226  */
1227 static long
1228 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1229 {
1230 	struct mpt3_ioctl_eventreport karg;
1231 	u32 number_bytes, max_events, max;
1232 	struct mpt3_ioctl_eventreport __user *uarg = arg;
1233 
1234 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1235 		pr_err("failure at %s:%d/%s()!\n",
1236 		    __FILE__, __LINE__, __func__);
1237 		return -EFAULT;
1238 	}
1239 
1240 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1241 	    __func__));
1242 
1243 	number_bytes = karg.hdr.max_data_size -
1244 	    sizeof(struct mpt3_ioctl_header);
1245 	max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1246 	max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1247 
1248 	/* If fewer than 1 event is requested, there must have
1249 	 * been some type of error.
1250 	 */
1251 	if (!max || !ioc->event_log)
1252 		return -ENODATA;
1253 
1254 	number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1255 	if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1256 		pr_err("failure at %s:%d/%s()!\n",
1257 		    __FILE__, __LINE__, __func__);
1258 		return -EFAULT;
1259 	}
1260 
1261 	/* reset flag so SIGIO can restart */
1262 	ioc->aen_event_read_flag = 0;
1263 	return 0;
1264 }
1265 
1266 /**
1267  * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1268  * @ioc: per adapter object
1269  * @arg - user space buffer containing ioctl content
1270  */
1271 static long
1272 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1273 {
1274 	struct mpt3_ioctl_diag_reset karg;
1275 	int retval;
1276 
1277 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1278 		pr_err("failure at %s:%d/%s()!\n",
1279 		    __FILE__, __LINE__, __func__);
1280 		return -EFAULT;
1281 	}
1282 
1283 	if (ioc->shost_recovery || ioc->pci_error_recovery ||
1284 	    ioc->is_driver_loading)
1285 		return -EAGAIN;
1286 
1287 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1288 	    __func__));
1289 
1290 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1291 	pr_info(MPT3SAS_FMT "host reset: %s\n",
1292 	    ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1293 	return 0;
1294 }
1295 
1296 /**
1297  * _ctl_btdh_search_sas_device - searching for sas device
1298  * @ioc: per adapter object
1299  * @btdh: btdh ioctl payload
1300  */
1301 static int
1302 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1303 	struct mpt3_ioctl_btdh_mapping *btdh)
1304 {
1305 	struct _sas_device *sas_device;
1306 	unsigned long flags;
1307 	int rc = 0;
1308 
1309 	if (list_empty(&ioc->sas_device_list))
1310 		return rc;
1311 
1312 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1313 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1314 		if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1315 		    btdh->handle == sas_device->handle) {
1316 			btdh->bus = sas_device->channel;
1317 			btdh->id = sas_device->id;
1318 			rc = 1;
1319 			goto out;
1320 		} else if (btdh->bus == sas_device->channel && btdh->id ==
1321 		    sas_device->id && btdh->handle == 0xFFFF) {
1322 			btdh->handle = sas_device->handle;
1323 			rc = 1;
1324 			goto out;
1325 		}
1326 	}
1327  out:
1328 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1329 	return rc;
1330 }
1331 
1332 /**
1333  * _ctl_btdh_search_pcie_device - searching for pcie device
1334  * @ioc: per adapter object
1335  * @btdh: btdh ioctl payload
1336  */
1337 static int
1338 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1339 	struct mpt3_ioctl_btdh_mapping *btdh)
1340 {
1341 	struct _pcie_device *pcie_device;
1342 	unsigned long flags;
1343 	int rc = 0;
1344 
1345 	if (list_empty(&ioc->pcie_device_list))
1346 		return rc;
1347 
1348 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1349 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1350 		if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1351 			   btdh->handle == pcie_device->handle) {
1352 			btdh->bus = pcie_device->channel;
1353 			btdh->id = pcie_device->id;
1354 			rc = 1;
1355 			goto out;
1356 		} else if (btdh->bus == pcie_device->channel && btdh->id ==
1357 			   pcie_device->id && btdh->handle == 0xFFFF) {
1358 			btdh->handle = pcie_device->handle;
1359 			rc = 1;
1360 			goto out;
1361 		}
1362 	}
1363  out:
1364 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1365 	return rc;
1366 }
1367 
1368 /**
1369  * _ctl_btdh_search_raid_device - searching for raid device
1370  * @ioc: per adapter object
1371  * @btdh: btdh ioctl payload
1372  */
1373 static int
1374 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1375 	struct mpt3_ioctl_btdh_mapping *btdh)
1376 {
1377 	struct _raid_device *raid_device;
1378 	unsigned long flags;
1379 	int rc = 0;
1380 
1381 	if (list_empty(&ioc->raid_device_list))
1382 		return rc;
1383 
1384 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1385 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1386 		if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1387 		    btdh->handle == raid_device->handle) {
1388 			btdh->bus = raid_device->channel;
1389 			btdh->id = raid_device->id;
1390 			rc = 1;
1391 			goto out;
1392 		} else if (btdh->bus == raid_device->channel && btdh->id ==
1393 		    raid_device->id && btdh->handle == 0xFFFF) {
1394 			btdh->handle = raid_device->handle;
1395 			rc = 1;
1396 			goto out;
1397 		}
1398 	}
1399  out:
1400 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1401 	return rc;
1402 }
1403 
1404 /**
1405  * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1406  * @ioc: per adapter object
1407  * @arg - user space buffer containing ioctl content
1408  */
1409 static long
1410 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1411 {
1412 	struct mpt3_ioctl_btdh_mapping karg;
1413 	int rc;
1414 
1415 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1416 		pr_err("failure at %s:%d/%s()!\n",
1417 		    __FILE__, __LINE__, __func__);
1418 		return -EFAULT;
1419 	}
1420 
1421 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1422 	    __func__));
1423 
1424 	rc = _ctl_btdh_search_sas_device(ioc, &karg);
1425 	if (!rc)
1426 		rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1427 	if (!rc)
1428 		_ctl_btdh_search_raid_device(ioc, &karg);
1429 
1430 	if (copy_to_user(arg, &karg, sizeof(karg))) {
1431 		pr_err("failure at %s:%d/%s()!\n",
1432 		    __FILE__, __LINE__, __func__);
1433 		return -EFAULT;
1434 	}
1435 	return 0;
1436 }
1437 
1438 /**
1439  * _ctl_diag_capability - return diag buffer capability
1440  * @ioc: per adapter object
1441  * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1442  *
1443  * returns 1 when diag buffer support is enabled in firmware
1444  */
1445 static u8
1446 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1447 {
1448 	u8 rc = 0;
1449 
1450 	switch (buffer_type) {
1451 	case MPI2_DIAG_BUF_TYPE_TRACE:
1452 		if (ioc->facts.IOCCapabilities &
1453 		    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1454 			rc = 1;
1455 		break;
1456 	case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1457 		if (ioc->facts.IOCCapabilities &
1458 		    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1459 			rc = 1;
1460 		break;
1461 	case MPI2_DIAG_BUF_TYPE_EXTENDED:
1462 		if (ioc->facts.IOCCapabilities &
1463 		    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1464 			rc = 1;
1465 	}
1466 
1467 	return rc;
1468 }
1469 
1470 
1471 /**
1472  * _ctl_diag_register_2 - wrapper for registering diag buffer support
1473  * @ioc: per adapter object
1474  * @diag_register: the diag_register struct passed in from user space
1475  *
1476  */
1477 static long
1478 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1479 	struct mpt3_diag_register *diag_register)
1480 {
1481 	int rc, i;
1482 	void *request_data = NULL;
1483 	dma_addr_t request_data_dma;
1484 	u32 request_data_sz = 0;
1485 	Mpi2DiagBufferPostRequest_t *mpi_request;
1486 	Mpi2DiagBufferPostReply_t *mpi_reply;
1487 	u8 buffer_type;
1488 	u16 smid;
1489 	u16 ioc_status;
1490 	u32 ioc_state;
1491 	u8 issue_reset = 0;
1492 
1493 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1494 	    __func__));
1495 
1496 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1497 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1498 		pr_err(MPT3SAS_FMT
1499 		    "%s: failed due to ioc not operational\n",
1500 		    ioc->name, __func__);
1501 		rc = -EAGAIN;
1502 		goto out;
1503 	}
1504 
1505 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1506 		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1507 		    ioc->name, __func__);
1508 		rc = -EAGAIN;
1509 		goto out;
1510 	}
1511 
1512 	buffer_type = diag_register->buffer_type;
1513 	if (!_ctl_diag_capability(ioc, buffer_type)) {
1514 		pr_err(MPT3SAS_FMT
1515 			"%s: doesn't have capability for buffer_type(0x%02x)\n",
1516 			ioc->name, __func__, buffer_type);
1517 		return -EPERM;
1518 	}
1519 
1520 	if (ioc->diag_buffer_status[buffer_type] &
1521 	    MPT3_DIAG_BUFFER_IS_REGISTERED) {
1522 		pr_err(MPT3SAS_FMT
1523 			"%s: already has a registered buffer for buffer_type(0x%02x)\n",
1524 			ioc->name, __func__,
1525 		    buffer_type);
1526 		return -EINVAL;
1527 	}
1528 
1529 	if (diag_register->requested_buffer_size % 4)  {
1530 		pr_err(MPT3SAS_FMT
1531 			"%s: the requested_buffer_size is not 4 byte aligned\n",
1532 			ioc->name, __func__);
1533 		return -EINVAL;
1534 	}
1535 
1536 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1537 	if (!smid) {
1538 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1539 		    ioc->name, __func__);
1540 		rc = -EAGAIN;
1541 		goto out;
1542 	}
1543 
1544 	rc = 0;
1545 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1546 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1547 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1548 	ioc->ctl_cmds.smid = smid;
1549 
1550 	request_data = ioc->diag_buffer[buffer_type];
1551 	request_data_sz = diag_register->requested_buffer_size;
1552 	ioc->unique_id[buffer_type] = diag_register->unique_id;
1553 	ioc->diag_buffer_status[buffer_type] = 0;
1554 	memcpy(ioc->product_specific[buffer_type],
1555 	    diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1556 	ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1557 
1558 	if (request_data) {
1559 		request_data_dma = ioc->diag_buffer_dma[buffer_type];
1560 		if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1561 			pci_free_consistent(ioc->pdev,
1562 			    ioc->diag_buffer_sz[buffer_type],
1563 			    request_data, request_data_dma);
1564 			request_data = NULL;
1565 		}
1566 	}
1567 
1568 	if (request_data == NULL) {
1569 		ioc->diag_buffer_sz[buffer_type] = 0;
1570 		ioc->diag_buffer_dma[buffer_type] = 0;
1571 		request_data = pci_alloc_consistent(
1572 			ioc->pdev, request_data_sz, &request_data_dma);
1573 		if (request_data == NULL) {
1574 			pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
1575 			    " for diag buffers, requested size(%d)\n",
1576 			    ioc->name, __func__, request_data_sz);
1577 			mpt3sas_base_free_smid(ioc, smid);
1578 			return -ENOMEM;
1579 		}
1580 		ioc->diag_buffer[buffer_type] = request_data;
1581 		ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1582 		ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1583 	}
1584 
1585 	mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1586 	mpi_request->BufferType = diag_register->buffer_type;
1587 	mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1588 	mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1589 	mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1590 	mpi_request->VF_ID = 0; /* TODO */
1591 	mpi_request->VP_ID = 0;
1592 
1593 	dctlprintk(ioc, pr_info(MPT3SAS_FMT
1594 		"%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1595 		ioc->name, __func__, request_data,
1596 	    (unsigned long long)request_data_dma,
1597 	    le32_to_cpu(mpi_request->BufferLength)));
1598 
1599 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1600 		mpi_request->ProductSpecific[i] =
1601 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
1602 
1603 	init_completion(&ioc->ctl_cmds.done);
1604 	ioc->put_smid_default(ioc, smid);
1605 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
1606 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1607 
1608 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1609 		pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
1610 		    __func__);
1611 		_debug_dump_mf(mpi_request,
1612 		    sizeof(Mpi2DiagBufferPostRequest_t)/4);
1613 		if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
1614 			issue_reset = 1;
1615 		goto issue_host_reset;
1616 	}
1617 
1618 	/* process the completed Reply Message Frame */
1619 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1620 		pr_err(MPT3SAS_FMT "%s: no reply message\n",
1621 		    ioc->name, __func__);
1622 		rc = -EFAULT;
1623 		goto out;
1624 	}
1625 
1626 	mpi_reply = ioc->ctl_cmds.reply;
1627 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1628 
1629 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1630 		ioc->diag_buffer_status[buffer_type] |=
1631 			MPT3_DIAG_BUFFER_IS_REGISTERED;
1632 		dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1633 		    ioc->name, __func__));
1634 	} else {
1635 		pr_info(MPT3SAS_FMT
1636 			"%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1637 			ioc->name, __func__,
1638 		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1639 		rc = -EFAULT;
1640 	}
1641 
1642  issue_host_reset:
1643 	if (issue_reset)
1644 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1645 
1646  out:
1647 
1648 	if (rc && request_data)
1649 		pci_free_consistent(ioc->pdev, request_data_sz,
1650 		    request_data, request_data_dma);
1651 
1652 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1653 	return rc;
1654 }
1655 
1656 /**
1657  * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1658  * @ioc: per adapter object
1659  * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1660  *
1661  * This is called when command line option diag_buffer_enable is enabled
1662  * at driver load time.
1663  */
1664 void
1665 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1666 {
1667 	struct mpt3_diag_register diag_register;
1668 
1669 	memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1670 
1671 	if (bits_to_register & 1) {
1672 		pr_info(MPT3SAS_FMT "registering trace buffer support\n",
1673 		    ioc->name);
1674 		ioc->diag_trigger_master.MasterData =
1675 		    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1676 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1677 		/* register for 2MB buffers  */
1678 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
1679 		diag_register.unique_id = 0x7075900;
1680 		_ctl_diag_register_2(ioc,  &diag_register);
1681 	}
1682 
1683 	if (bits_to_register & 2) {
1684 		pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
1685 		    ioc->name);
1686 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1687 		/* register for 2MB buffers  */
1688 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
1689 		diag_register.unique_id = 0x7075901;
1690 		_ctl_diag_register_2(ioc,  &diag_register);
1691 	}
1692 
1693 	if (bits_to_register & 4) {
1694 		pr_info(MPT3SAS_FMT "registering extended buffer support\n",
1695 		    ioc->name);
1696 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1697 		/* register for 2MB buffers  */
1698 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
1699 		diag_register.unique_id = 0x7075901;
1700 		_ctl_diag_register_2(ioc,  &diag_register);
1701 	}
1702 }
1703 
1704 /**
1705  * _ctl_diag_register - application register with driver
1706  * @ioc: per adapter object
1707  * @arg - user space buffer containing ioctl content
1708  *
1709  * This will allow the driver to setup any required buffers that will be
1710  * needed by firmware to communicate with the driver.
1711  */
1712 static long
1713 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1714 {
1715 	struct mpt3_diag_register karg;
1716 	long rc;
1717 
1718 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1719 		pr_err("failure at %s:%d/%s()!\n",
1720 		    __FILE__, __LINE__, __func__);
1721 		return -EFAULT;
1722 	}
1723 
1724 	rc = _ctl_diag_register_2(ioc, &karg);
1725 	return rc;
1726 }
1727 
1728 /**
1729  * _ctl_diag_unregister - application unregister with driver
1730  * @ioc: per adapter object
1731  * @arg - user space buffer containing ioctl content
1732  *
1733  * This will allow the driver to cleanup any memory allocated for diag
1734  * messages and to free up any resources.
1735  */
1736 static long
1737 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1738 {
1739 	struct mpt3_diag_unregister karg;
1740 	void *request_data;
1741 	dma_addr_t request_data_dma;
1742 	u32 request_data_sz;
1743 	u8 buffer_type;
1744 
1745 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1746 		pr_err("failure at %s:%d/%s()!\n",
1747 		    __FILE__, __LINE__, __func__);
1748 		return -EFAULT;
1749 	}
1750 
1751 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1752 	    __func__));
1753 
1754 	buffer_type = karg.unique_id & 0x000000ff;
1755 	if (!_ctl_diag_capability(ioc, buffer_type)) {
1756 		pr_err(MPT3SAS_FMT
1757 			"%s: doesn't have capability for buffer_type(0x%02x)\n",
1758 			ioc->name, __func__, buffer_type);
1759 		return -EPERM;
1760 	}
1761 
1762 	if ((ioc->diag_buffer_status[buffer_type] &
1763 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1764 		pr_err(MPT3SAS_FMT
1765 			"%s: buffer_type(0x%02x) is not registered\n",
1766 			ioc->name, __func__, buffer_type);
1767 		return -EINVAL;
1768 	}
1769 	if ((ioc->diag_buffer_status[buffer_type] &
1770 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1771 		pr_err(MPT3SAS_FMT
1772 			"%s: buffer_type(0x%02x) has not been released\n",
1773 			ioc->name, __func__, buffer_type);
1774 		return -EINVAL;
1775 	}
1776 
1777 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
1778 		pr_err(MPT3SAS_FMT
1779 			"%s: unique_id(0x%08x) is not registered\n",
1780 			ioc->name, __func__, karg.unique_id);
1781 		return -EINVAL;
1782 	}
1783 
1784 	request_data = ioc->diag_buffer[buffer_type];
1785 	if (!request_data) {
1786 		pr_err(MPT3SAS_FMT
1787 			"%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1788 			ioc->name, __func__, buffer_type);
1789 		return -ENOMEM;
1790 	}
1791 
1792 	request_data_sz = ioc->diag_buffer_sz[buffer_type];
1793 	request_data_dma = ioc->diag_buffer_dma[buffer_type];
1794 	pci_free_consistent(ioc->pdev, request_data_sz,
1795 	    request_data, request_data_dma);
1796 	ioc->diag_buffer[buffer_type] = NULL;
1797 	ioc->diag_buffer_status[buffer_type] = 0;
1798 	return 0;
1799 }
1800 
1801 /**
1802  * _ctl_diag_query - query relevant info associated with diag buffers
1803  * @ioc: per adapter object
1804  * @arg - user space buffer containing ioctl content
1805  *
1806  * The application will send only buffer_type and unique_id.  Driver will
1807  * inspect unique_id first, if valid, fill in all the info.  If unique_id is
1808  * 0x00, the driver will return info specified by Buffer Type.
1809  */
1810 static long
1811 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1812 {
1813 	struct mpt3_diag_query karg;
1814 	void *request_data;
1815 	int i;
1816 	u8 buffer_type;
1817 
1818 	if (copy_from_user(&karg, arg, sizeof(karg))) {
1819 		pr_err("failure at %s:%d/%s()!\n",
1820 		    __FILE__, __LINE__, __func__);
1821 		return -EFAULT;
1822 	}
1823 
1824 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1825 	    __func__));
1826 
1827 	karg.application_flags = 0;
1828 	buffer_type = karg.buffer_type;
1829 
1830 	if (!_ctl_diag_capability(ioc, buffer_type)) {
1831 		pr_err(MPT3SAS_FMT
1832 			"%s: doesn't have capability for buffer_type(0x%02x)\n",
1833 			ioc->name, __func__, buffer_type);
1834 		return -EPERM;
1835 	}
1836 
1837 	if ((ioc->diag_buffer_status[buffer_type] &
1838 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1839 		pr_err(MPT3SAS_FMT
1840 			"%s: buffer_type(0x%02x) is not registered\n",
1841 			ioc->name, __func__, buffer_type);
1842 		return -EINVAL;
1843 	}
1844 
1845 	if (karg.unique_id & 0xffffff00) {
1846 		if (karg.unique_id != ioc->unique_id[buffer_type]) {
1847 			pr_err(MPT3SAS_FMT
1848 				"%s: unique_id(0x%08x) is not registered\n",
1849 				ioc->name, __func__, karg.unique_id);
1850 			return -EINVAL;
1851 		}
1852 	}
1853 
1854 	request_data = ioc->diag_buffer[buffer_type];
1855 	if (!request_data) {
1856 		pr_err(MPT3SAS_FMT
1857 			"%s: doesn't have buffer for buffer_type(0x%02x)\n",
1858 			ioc->name, __func__, buffer_type);
1859 		return -ENOMEM;
1860 	}
1861 
1862 	if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
1863 		karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1864 		    MPT3_APP_FLAGS_BUFFER_VALID);
1865 	else
1866 		karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1867 		    MPT3_APP_FLAGS_BUFFER_VALID |
1868 		    MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
1869 
1870 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1871 		karg.product_specific[i] =
1872 		    ioc->product_specific[buffer_type][i];
1873 
1874 	karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1875 	karg.driver_added_buffer_size = 0;
1876 	karg.unique_id = ioc->unique_id[buffer_type];
1877 	karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1878 
1879 	if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
1880 		pr_err(MPT3SAS_FMT
1881 			"%s: unable to write mpt3_diag_query data @ %p\n",
1882 			ioc->name, __func__, arg);
1883 		return -EFAULT;
1884 	}
1885 	return 0;
1886 }
1887 
1888 /**
1889  * mpt3sas_send_diag_release - Diag Release Message
1890  * @ioc: per adapter object
1891  * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
1892  * @issue_reset - specifies whether host reset is required.
1893  *
1894  */
1895 int
1896 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1897 	u8 *issue_reset)
1898 {
1899 	Mpi2DiagReleaseRequest_t *mpi_request;
1900 	Mpi2DiagReleaseReply_t *mpi_reply;
1901 	u16 smid;
1902 	u16 ioc_status;
1903 	u32 ioc_state;
1904 	int rc;
1905 
1906 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1907 	    __func__));
1908 
1909 	rc = 0;
1910 	*issue_reset = 0;
1911 
1912 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1913 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1914 		if (ioc->diag_buffer_status[buffer_type] &
1915 		    MPT3_DIAG_BUFFER_IS_REGISTERED)
1916 			ioc->diag_buffer_status[buffer_type] |=
1917 			    MPT3_DIAG_BUFFER_IS_RELEASED;
1918 		dctlprintk(ioc, pr_info(MPT3SAS_FMT
1919 			"%s: skipping due to FAULT state\n", ioc->name,
1920 		    __func__));
1921 		rc = -EAGAIN;
1922 		goto out;
1923 	}
1924 
1925 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1926 		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1927 		    ioc->name, __func__);
1928 		rc = -EAGAIN;
1929 		goto out;
1930 	}
1931 
1932 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1933 	if (!smid) {
1934 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1935 		    ioc->name, __func__);
1936 		rc = -EAGAIN;
1937 		goto out;
1938 	}
1939 
1940 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1941 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1942 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1943 	ioc->ctl_cmds.smid = smid;
1944 
1945 	mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1946 	mpi_request->BufferType = buffer_type;
1947 	mpi_request->VF_ID = 0; /* TODO */
1948 	mpi_request->VP_ID = 0;
1949 
1950 	init_completion(&ioc->ctl_cmds.done);
1951 	ioc->put_smid_default(ioc, smid);
1952 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
1953 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1954 
1955 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1956 		pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
1957 		    __func__);
1958 		_debug_dump_mf(mpi_request,
1959 		    sizeof(Mpi2DiagReleaseRequest_t)/4);
1960 		if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
1961 			*issue_reset = 1;
1962 		rc = -EFAULT;
1963 		goto out;
1964 	}
1965 
1966 	/* process the completed Reply Message Frame */
1967 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1968 		pr_err(MPT3SAS_FMT "%s: no reply message\n",
1969 		    ioc->name, __func__);
1970 		rc = -EFAULT;
1971 		goto out;
1972 	}
1973 
1974 	mpi_reply = ioc->ctl_cmds.reply;
1975 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1976 
1977 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1978 		ioc->diag_buffer_status[buffer_type] |=
1979 		    MPT3_DIAG_BUFFER_IS_RELEASED;
1980 		dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1981 		    ioc->name, __func__));
1982 	} else {
1983 		pr_info(MPT3SAS_FMT
1984 			"%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1985 			ioc->name, __func__,
1986 		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1987 		rc = -EFAULT;
1988 	}
1989 
1990  out:
1991 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1992 	return rc;
1993 }
1994 
1995 /**
1996  * _ctl_diag_release - request to send Diag Release Message to firmware
1997  * @arg - user space buffer containing ioctl content
1998  *
1999  * This allows ownership of the specified buffer to returned to the driver,
2000  * allowing an application to read the buffer without fear that firmware is
2001  * overwriting information in the buffer.
2002  */
2003 static long
2004 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2005 {
2006 	struct mpt3_diag_release karg;
2007 	void *request_data;
2008 	int rc;
2009 	u8 buffer_type;
2010 	u8 issue_reset = 0;
2011 
2012 	if (copy_from_user(&karg, arg, sizeof(karg))) {
2013 		pr_err("failure at %s:%d/%s()!\n",
2014 		    __FILE__, __LINE__, __func__);
2015 		return -EFAULT;
2016 	}
2017 
2018 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2019 	    __func__));
2020 
2021 	buffer_type = karg.unique_id & 0x000000ff;
2022 	if (!_ctl_diag_capability(ioc, buffer_type)) {
2023 		pr_err(MPT3SAS_FMT
2024 			"%s: doesn't have capability for buffer_type(0x%02x)\n",
2025 			ioc->name, __func__, buffer_type);
2026 		return -EPERM;
2027 	}
2028 
2029 	if ((ioc->diag_buffer_status[buffer_type] &
2030 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2031 		pr_err(MPT3SAS_FMT
2032 			"%s: buffer_type(0x%02x) is not registered\n",
2033 			ioc->name, __func__, buffer_type);
2034 		return -EINVAL;
2035 	}
2036 
2037 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
2038 		pr_err(MPT3SAS_FMT
2039 			"%s: unique_id(0x%08x) is not registered\n",
2040 			ioc->name, __func__, karg.unique_id);
2041 		return -EINVAL;
2042 	}
2043 
2044 	if (ioc->diag_buffer_status[buffer_type] &
2045 	    MPT3_DIAG_BUFFER_IS_RELEASED) {
2046 		pr_err(MPT3SAS_FMT
2047 			"%s: buffer_type(0x%02x) is already released\n",
2048 			ioc->name, __func__,
2049 		    buffer_type);
2050 		return 0;
2051 	}
2052 
2053 	request_data = ioc->diag_buffer[buffer_type];
2054 
2055 	if (!request_data) {
2056 		pr_err(MPT3SAS_FMT
2057 			"%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2058 			ioc->name, __func__, buffer_type);
2059 		return -ENOMEM;
2060 	}
2061 
2062 	/* buffers were released by due to host reset */
2063 	if ((ioc->diag_buffer_status[buffer_type] &
2064 	    MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2065 		ioc->diag_buffer_status[buffer_type] |=
2066 		    MPT3_DIAG_BUFFER_IS_RELEASED;
2067 		ioc->diag_buffer_status[buffer_type] &=
2068 		    ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2069 		pr_err(MPT3SAS_FMT
2070 			"%s: buffer_type(0x%02x) was released due to host reset\n",
2071 			ioc->name, __func__, buffer_type);
2072 		return 0;
2073 	}
2074 
2075 	rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2076 
2077 	if (issue_reset)
2078 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2079 
2080 	return rc;
2081 }
2082 
2083 /**
2084  * _ctl_diag_read_buffer - request for copy of the diag buffer
2085  * @ioc: per adapter object
2086  * @arg - user space buffer containing ioctl content
2087  */
2088 static long
2089 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2090 {
2091 	struct mpt3_diag_read_buffer karg;
2092 	struct mpt3_diag_read_buffer __user *uarg = arg;
2093 	void *request_data, *diag_data;
2094 	Mpi2DiagBufferPostRequest_t *mpi_request;
2095 	Mpi2DiagBufferPostReply_t *mpi_reply;
2096 	int rc, i;
2097 	u8 buffer_type;
2098 	unsigned long request_size, copy_size;
2099 	u16 smid;
2100 	u16 ioc_status;
2101 	u8 issue_reset = 0;
2102 
2103 	if (copy_from_user(&karg, arg, sizeof(karg))) {
2104 		pr_err("failure at %s:%d/%s()!\n",
2105 		    __FILE__, __LINE__, __func__);
2106 		return -EFAULT;
2107 	}
2108 
2109 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2110 	    __func__));
2111 
2112 	buffer_type = karg.unique_id & 0x000000ff;
2113 	if (!_ctl_diag_capability(ioc, buffer_type)) {
2114 		pr_err(MPT3SAS_FMT
2115 			"%s: doesn't have capability for buffer_type(0x%02x)\n",
2116 			ioc->name, __func__, buffer_type);
2117 		return -EPERM;
2118 	}
2119 
2120 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
2121 		pr_err(MPT3SAS_FMT
2122 			"%s: unique_id(0x%08x) is not registered\n",
2123 			ioc->name, __func__, karg.unique_id);
2124 		return -EINVAL;
2125 	}
2126 
2127 	request_data = ioc->diag_buffer[buffer_type];
2128 	if (!request_data) {
2129 		pr_err(MPT3SAS_FMT
2130 			"%s: doesn't have buffer for buffer_type(0x%02x)\n",
2131 			ioc->name, __func__, buffer_type);
2132 		return -ENOMEM;
2133 	}
2134 
2135 	request_size = ioc->diag_buffer_sz[buffer_type];
2136 
2137 	if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2138 		pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
2139 		    "or bytes_to_read are not 4 byte aligned\n", ioc->name,
2140 		    __func__);
2141 		return -EINVAL;
2142 	}
2143 
2144 	if (karg.starting_offset > request_size)
2145 		return -EINVAL;
2146 
2147 	diag_data = (void *)(request_data + karg.starting_offset);
2148 	dctlprintk(ioc, pr_info(MPT3SAS_FMT
2149 		"%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2150 		ioc->name, __func__,
2151 	    diag_data, karg.starting_offset, karg.bytes_to_read));
2152 
2153 	/* Truncate data on requests that are too large */
2154 	if ((diag_data + karg.bytes_to_read < diag_data) ||
2155 	    (diag_data + karg.bytes_to_read > request_data + request_size))
2156 		copy_size = request_size - karg.starting_offset;
2157 	else
2158 		copy_size = karg.bytes_to_read;
2159 
2160 	if (copy_to_user((void __user *)uarg->diagnostic_data,
2161 	    diag_data, copy_size)) {
2162 		pr_err(MPT3SAS_FMT
2163 			"%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2164 			ioc->name, __func__, diag_data);
2165 		return -EFAULT;
2166 	}
2167 
2168 	if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2169 		return 0;
2170 
2171 	dctlprintk(ioc, pr_info(MPT3SAS_FMT
2172 		"%s: Reregister buffer_type(0x%02x)\n",
2173 		ioc->name, __func__, buffer_type));
2174 	if ((ioc->diag_buffer_status[buffer_type] &
2175 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2176 		dctlprintk(ioc, pr_info(MPT3SAS_FMT
2177 			"%s: buffer_type(0x%02x) is still registered\n",
2178 			ioc->name, __func__, buffer_type));
2179 		return 0;
2180 	}
2181 	/* Get a free request frame and save the message context.
2182 	*/
2183 
2184 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2185 		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
2186 		    ioc->name, __func__);
2187 		rc = -EAGAIN;
2188 		goto out;
2189 	}
2190 
2191 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2192 	if (!smid) {
2193 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
2194 		    ioc->name, __func__);
2195 		rc = -EAGAIN;
2196 		goto out;
2197 	}
2198 
2199 	rc = 0;
2200 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2201 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2202 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2203 	ioc->ctl_cmds.smid = smid;
2204 
2205 	mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2206 	mpi_request->BufferType = buffer_type;
2207 	mpi_request->BufferLength =
2208 	    cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2209 	mpi_request->BufferAddress =
2210 	    cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2211 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2212 		mpi_request->ProductSpecific[i] =
2213 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
2214 	mpi_request->VF_ID = 0; /* TODO */
2215 	mpi_request->VP_ID = 0;
2216 
2217 	init_completion(&ioc->ctl_cmds.done);
2218 	ioc->put_smid_default(ioc, smid);
2219 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
2220 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2221 
2222 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2223 		pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
2224 		    __func__);
2225 		_debug_dump_mf(mpi_request,
2226 		    sizeof(Mpi2DiagBufferPostRequest_t)/4);
2227 		if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
2228 			issue_reset = 1;
2229 		goto issue_host_reset;
2230 	}
2231 
2232 	/* process the completed Reply Message Frame */
2233 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2234 		pr_err(MPT3SAS_FMT "%s: no reply message\n",
2235 		    ioc->name, __func__);
2236 		rc = -EFAULT;
2237 		goto out;
2238 	}
2239 
2240 	mpi_reply = ioc->ctl_cmds.reply;
2241 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2242 
2243 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2244 		ioc->diag_buffer_status[buffer_type] |=
2245 		    MPT3_DIAG_BUFFER_IS_REGISTERED;
2246 		dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
2247 		    ioc->name, __func__));
2248 	} else {
2249 		pr_info(MPT3SAS_FMT
2250 			"%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2251 			ioc->name, __func__,
2252 		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2253 		rc = -EFAULT;
2254 	}
2255 
2256  issue_host_reset:
2257 	if (issue_reset)
2258 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2259 
2260  out:
2261 
2262 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2263 	return rc;
2264 }
2265 
2266 
2267 
2268 #ifdef CONFIG_COMPAT
2269 /**
2270  * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2271  * @ioc: per adapter object
2272  * @cmd - ioctl opcode
2273  * @arg - (struct mpt3_ioctl_command32)
2274  *
2275  * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2276  */
2277 static long
2278 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2279 	void __user *arg)
2280 {
2281 	struct mpt3_ioctl_command32 karg32;
2282 	struct mpt3_ioctl_command32 __user *uarg;
2283 	struct mpt3_ioctl_command karg;
2284 
2285 	if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2286 		return -EINVAL;
2287 
2288 	uarg = (struct mpt3_ioctl_command32 __user *) arg;
2289 
2290 	if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2291 		pr_err("failure at %s:%d/%s()!\n",
2292 		    __FILE__, __LINE__, __func__);
2293 		return -EFAULT;
2294 	}
2295 
2296 	memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2297 	karg.hdr.ioc_number = karg32.hdr.ioc_number;
2298 	karg.hdr.port_number = karg32.hdr.port_number;
2299 	karg.hdr.max_data_size = karg32.hdr.max_data_size;
2300 	karg.timeout = karg32.timeout;
2301 	karg.max_reply_bytes = karg32.max_reply_bytes;
2302 	karg.data_in_size = karg32.data_in_size;
2303 	karg.data_out_size = karg32.data_out_size;
2304 	karg.max_sense_bytes = karg32.max_sense_bytes;
2305 	karg.data_sge_offset = karg32.data_sge_offset;
2306 	karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2307 	karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2308 	karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2309 	karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2310 	return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2311 }
2312 #endif
2313 
2314 /**
2315  * _ctl_ioctl_main - main ioctl entry point
2316  * @file - (struct file)
2317  * @cmd - ioctl opcode
2318  * @arg - user space data buffer
2319  * @compat - handles 32 bit applications in 64bit os
2320  * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2321  * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2322  */
2323 static long
2324 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2325 	u8 compat, u16 mpi_version)
2326 {
2327 	struct MPT3SAS_ADAPTER *ioc;
2328 	struct mpt3_ioctl_header ioctl_header;
2329 	enum block_state state;
2330 	long ret = -EINVAL;
2331 
2332 	/* get IOCTL header */
2333 	if (copy_from_user(&ioctl_header, (char __user *)arg,
2334 	    sizeof(struct mpt3_ioctl_header))) {
2335 		pr_err("failure at %s:%d/%s()!\n",
2336 		    __FILE__, __LINE__, __func__);
2337 		return -EFAULT;
2338 	}
2339 
2340 	if (_ctl_verify_adapter(ioctl_header.ioc_number,
2341 				&ioc, mpi_version) == -1 || !ioc)
2342 		return -ENODEV;
2343 
2344 	/* pci_access_mutex lock acquired by ioctl path */
2345 	mutex_lock(&ioc->pci_access_mutex);
2346 
2347 	if (ioc->shost_recovery || ioc->pci_error_recovery ||
2348 	    ioc->is_driver_loading || ioc->remove_host) {
2349 		ret = -EAGAIN;
2350 		goto out_unlock_pciaccess;
2351 	}
2352 
2353 	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2354 	if (state == NON_BLOCKING) {
2355 		if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2356 			ret = -EAGAIN;
2357 			goto out_unlock_pciaccess;
2358 		}
2359 	} else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2360 		ret = -ERESTARTSYS;
2361 		goto out_unlock_pciaccess;
2362 	}
2363 
2364 
2365 	switch (cmd) {
2366 	case MPT3IOCINFO:
2367 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2368 			ret = _ctl_getiocinfo(ioc, arg);
2369 		break;
2370 #ifdef CONFIG_COMPAT
2371 	case MPT3COMMAND32:
2372 #endif
2373 	case MPT3COMMAND:
2374 	{
2375 		struct mpt3_ioctl_command __user *uarg;
2376 		struct mpt3_ioctl_command karg;
2377 
2378 #ifdef CONFIG_COMPAT
2379 		if (compat) {
2380 			ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2381 			break;
2382 		}
2383 #endif
2384 		if (copy_from_user(&karg, arg, sizeof(karg))) {
2385 			pr_err("failure at %s:%d/%s()!\n",
2386 			    __FILE__, __LINE__, __func__);
2387 			ret = -EFAULT;
2388 			break;
2389 		}
2390 
2391 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2392 			uarg = arg;
2393 			ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2394 		}
2395 		break;
2396 	}
2397 	case MPT3EVENTQUERY:
2398 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2399 			ret = _ctl_eventquery(ioc, arg);
2400 		break;
2401 	case MPT3EVENTENABLE:
2402 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2403 			ret = _ctl_eventenable(ioc, arg);
2404 		break;
2405 	case MPT3EVENTREPORT:
2406 		ret = _ctl_eventreport(ioc, arg);
2407 		break;
2408 	case MPT3HARDRESET:
2409 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2410 			ret = _ctl_do_reset(ioc, arg);
2411 		break;
2412 	case MPT3BTDHMAPPING:
2413 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2414 			ret = _ctl_btdh_mapping(ioc, arg);
2415 		break;
2416 	case MPT3DIAGREGISTER:
2417 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2418 			ret = _ctl_diag_register(ioc, arg);
2419 		break;
2420 	case MPT3DIAGUNREGISTER:
2421 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2422 			ret = _ctl_diag_unregister(ioc, arg);
2423 		break;
2424 	case MPT3DIAGQUERY:
2425 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2426 			ret = _ctl_diag_query(ioc, arg);
2427 		break;
2428 	case MPT3DIAGRELEASE:
2429 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2430 			ret = _ctl_diag_release(ioc, arg);
2431 		break;
2432 	case MPT3DIAGREADBUFFER:
2433 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2434 			ret = _ctl_diag_read_buffer(ioc, arg);
2435 		break;
2436 	default:
2437 		dctlprintk(ioc, pr_info(MPT3SAS_FMT
2438 		    "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2439 		break;
2440 	}
2441 
2442 	mutex_unlock(&ioc->ctl_cmds.mutex);
2443 out_unlock_pciaccess:
2444 	mutex_unlock(&ioc->pci_access_mutex);
2445 	return ret;
2446 }
2447 
2448 /**
2449  * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2450  * @file - (struct file)
2451  * @cmd - ioctl opcode
2452  * @arg -
2453  */
2454 static long
2455 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2456 {
2457 	long ret;
2458 
2459 	/* pass MPI25_VERSION | MPI26_VERSION value,
2460 	 * to indicate that this ioctl cmd
2461 	 * came from mpt3ctl ioctl device.
2462 	 */
2463 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2464 		MPI25_VERSION | MPI26_VERSION);
2465 	return ret;
2466 }
2467 
2468 /**
2469  * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2470  * @file - (struct file)
2471  * @cmd - ioctl opcode
2472  * @arg -
2473  */
2474 static long
2475 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2476 {
2477 	long ret;
2478 
2479 	/* pass MPI2_VERSION value, to indicate that this ioctl cmd
2480 	 * came from mpt2ctl ioctl device.
2481 	 */
2482 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2483 	return ret;
2484 }
2485 #ifdef CONFIG_COMPAT
2486 /**
2487  *_ ctl_ioctl_compat - main ioctl entry point (compat)
2488  * @file -
2489  * @cmd -
2490  * @arg -
2491  *
2492  * This routine handles 32 bit applications in 64bit os.
2493  */
2494 static long
2495 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2496 {
2497 	long ret;
2498 
2499 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2500 		MPI25_VERSION | MPI26_VERSION);
2501 	return ret;
2502 }
2503 
2504 /**
2505  *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2506  * @file -
2507  * @cmd -
2508  * @arg -
2509  *
2510  * This routine handles 32 bit applications in 64bit os.
2511  */
2512 static long
2513 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2514 {
2515 	long ret;
2516 
2517 	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2518 	return ret;
2519 }
2520 #endif
2521 
2522 /* scsi host attributes */
2523 /**
2524  * _ctl_version_fw_show - firmware version
2525  * @cdev - pointer to embedded class device
2526  * @buf - the buffer returned
2527  *
2528  * A sysfs 'read-only' shost attribute.
2529  */
2530 static ssize_t
2531 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
2532 	char *buf)
2533 {
2534 	struct Scsi_Host *shost = class_to_shost(cdev);
2535 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2536 
2537 	return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2538 	    (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2539 	    (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2540 	    (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2541 	    ioc->facts.FWVersion.Word & 0x000000FF);
2542 }
2543 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
2544 
2545 /**
2546  * _ctl_version_bios_show - bios version
2547  * @cdev - pointer to embedded class device
2548  * @buf - the buffer returned
2549  *
2550  * A sysfs 'read-only' shost attribute.
2551  */
2552 static ssize_t
2553 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
2554 	char *buf)
2555 {
2556 	struct Scsi_Host *shost = class_to_shost(cdev);
2557 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2558 
2559 	u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2560 
2561 	return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2562 	    (version & 0xFF000000) >> 24,
2563 	    (version & 0x00FF0000) >> 16,
2564 	    (version & 0x0000FF00) >> 8,
2565 	    version & 0x000000FF);
2566 }
2567 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
2568 
2569 /**
2570  * _ctl_version_mpi_show - MPI (message passing interface) version
2571  * @cdev - pointer to embedded class device
2572  * @buf - the buffer returned
2573  *
2574  * A sysfs 'read-only' shost attribute.
2575  */
2576 static ssize_t
2577 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
2578 	char *buf)
2579 {
2580 	struct Scsi_Host *shost = class_to_shost(cdev);
2581 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2582 
2583 	return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2584 	    ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2585 }
2586 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
2587 
2588 /**
2589  * _ctl_version_product_show - product name
2590  * @cdev - pointer to embedded class device
2591  * @buf - the buffer returned
2592  *
2593  * A sysfs 'read-only' shost attribute.
2594  */
2595 static ssize_t
2596 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
2597 	char *buf)
2598 {
2599 	struct Scsi_Host *shost = class_to_shost(cdev);
2600 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2601 
2602 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2603 }
2604 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
2605 
2606 /**
2607  * _ctl_version_nvdata_persistent_show - ndvata persistent version
2608  * @cdev - pointer to embedded class device
2609  * @buf - the buffer returned
2610  *
2611  * A sysfs 'read-only' shost attribute.
2612  */
2613 static ssize_t
2614 _ctl_version_nvdata_persistent_show(struct device *cdev,
2615 	struct device_attribute *attr, char *buf)
2616 {
2617 	struct Scsi_Host *shost = class_to_shost(cdev);
2618 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2619 
2620 	return snprintf(buf, PAGE_SIZE, "%08xh\n",
2621 	    le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2622 }
2623 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2624 	_ctl_version_nvdata_persistent_show, NULL);
2625 
2626 /**
2627  * _ctl_version_nvdata_default_show - nvdata default version
2628  * @cdev - pointer to embedded class device
2629  * @buf - the buffer returned
2630  *
2631  * A sysfs 'read-only' shost attribute.
2632  */
2633 static ssize_t
2634 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
2635 	*attr, char *buf)
2636 {
2637 	struct Scsi_Host *shost = class_to_shost(cdev);
2638 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2639 
2640 	return snprintf(buf, PAGE_SIZE, "%08xh\n",
2641 	    le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2642 }
2643 static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2644 	_ctl_version_nvdata_default_show, NULL);
2645 
2646 /**
2647  * _ctl_board_name_show - board name
2648  * @cdev - pointer to embedded class device
2649  * @buf - the buffer returned
2650  *
2651  * A sysfs 'read-only' shost attribute.
2652  */
2653 static ssize_t
2654 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
2655 	char *buf)
2656 {
2657 	struct Scsi_Host *shost = class_to_shost(cdev);
2658 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2659 
2660 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2661 }
2662 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
2663 
2664 /**
2665  * _ctl_board_assembly_show - board assembly name
2666  * @cdev - pointer to embedded class device
2667  * @buf - the buffer returned
2668  *
2669  * A sysfs 'read-only' shost attribute.
2670  */
2671 static ssize_t
2672 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
2673 	char *buf)
2674 {
2675 	struct Scsi_Host *shost = class_to_shost(cdev);
2676 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2677 
2678 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2679 }
2680 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
2681 
2682 /**
2683  * _ctl_board_tracer_show - board tracer number
2684  * @cdev - pointer to embedded class device
2685  * @buf - the buffer returned
2686  *
2687  * A sysfs 'read-only' shost attribute.
2688  */
2689 static ssize_t
2690 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
2691 	char *buf)
2692 {
2693 	struct Scsi_Host *shost = class_to_shost(cdev);
2694 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2695 
2696 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2697 }
2698 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
2699 
2700 /**
2701  * _ctl_io_delay_show - io missing delay
2702  * @cdev - pointer to embedded class device
2703  * @buf - the buffer returned
2704  *
2705  * This is for firmware implemention for deboucing device
2706  * removal events.
2707  *
2708  * A sysfs 'read-only' shost attribute.
2709  */
2710 static ssize_t
2711 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
2712 	char *buf)
2713 {
2714 	struct Scsi_Host *shost = class_to_shost(cdev);
2715 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2716 
2717 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2718 }
2719 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
2720 
2721 /**
2722  * _ctl_device_delay_show - device missing delay
2723  * @cdev - pointer to embedded class device
2724  * @buf - the buffer returned
2725  *
2726  * This is for firmware implemention for deboucing device
2727  * removal events.
2728  *
2729  * A sysfs 'read-only' shost attribute.
2730  */
2731 static ssize_t
2732 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
2733 	char *buf)
2734 {
2735 	struct Scsi_Host *shost = class_to_shost(cdev);
2736 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2737 
2738 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2739 }
2740 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
2741 
2742 /**
2743  * _ctl_fw_queue_depth_show - global credits
2744  * @cdev - pointer to embedded class device
2745  * @buf - the buffer returned
2746  *
2747  * This is firmware queue depth limit
2748  *
2749  * A sysfs 'read-only' shost attribute.
2750  */
2751 static ssize_t
2752 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2753 	char *buf)
2754 {
2755 	struct Scsi_Host *shost = class_to_shost(cdev);
2756 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2757 
2758 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2759 }
2760 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
2761 
2762 /**
2763  * _ctl_sas_address_show - sas address
2764  * @cdev - pointer to embedded class device
2765  * @buf - the buffer returned
2766  *
2767  * This is the controller sas address
2768  *
2769  * A sysfs 'read-only' shost attribute.
2770  */
2771 static ssize_t
2772 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2773 	char *buf)
2774 
2775 {
2776 	struct Scsi_Host *shost = class_to_shost(cdev);
2777 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2778 
2779 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2780 	    (unsigned long long)ioc->sas_hba.sas_address);
2781 }
2782 static DEVICE_ATTR(host_sas_address, S_IRUGO,
2783 	_ctl_host_sas_address_show, NULL);
2784 
2785 /**
2786  * _ctl_logging_level_show - logging level
2787  * @cdev - pointer to embedded class device
2788  * @buf - the buffer returned
2789  *
2790  * A sysfs 'read/write' shost attribute.
2791  */
2792 static ssize_t
2793 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
2794 	char *buf)
2795 {
2796 	struct Scsi_Host *shost = class_to_shost(cdev);
2797 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2798 
2799 	return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2800 }
2801 static ssize_t
2802 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2803 	const char *buf, size_t count)
2804 {
2805 	struct Scsi_Host *shost = class_to_shost(cdev);
2806 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2807 	int val = 0;
2808 
2809 	if (sscanf(buf, "%x", &val) != 1)
2810 		return -EINVAL;
2811 
2812 	ioc->logging_level = val;
2813 	pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
2814 	    ioc->logging_level);
2815 	return strlen(buf);
2816 }
2817 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
2818 	_ctl_logging_level_store);
2819 
2820 /**
2821  * _ctl_fwfault_debug_show - show/store fwfault_debug
2822  * @cdev - pointer to embedded class device
2823  * @buf - the buffer returned
2824  *
2825  * mpt3sas_fwfault_debug is command line option
2826  * A sysfs 'read/write' shost attribute.
2827  */
2828 static ssize_t
2829 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
2830 	char *buf)
2831 {
2832 	struct Scsi_Host *shost = class_to_shost(cdev);
2833 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2834 
2835 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2836 }
2837 static ssize_t
2838 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
2839 	const char *buf, size_t count)
2840 {
2841 	struct Scsi_Host *shost = class_to_shost(cdev);
2842 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2843 	int val = 0;
2844 
2845 	if (sscanf(buf, "%d", &val) != 1)
2846 		return -EINVAL;
2847 
2848 	ioc->fwfault_debug = val;
2849 	pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
2850 	    ioc->fwfault_debug);
2851 	return strlen(buf);
2852 }
2853 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2854 	_ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2855 
2856 /**
2857  * _ctl_ioc_reset_count_show - ioc reset count
2858  * @cdev - pointer to embedded class device
2859  * @buf - the buffer returned
2860  *
2861  * This is firmware queue depth limit
2862  *
2863  * A sysfs 'read-only' shost attribute.
2864  */
2865 static ssize_t
2866 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2867 	char *buf)
2868 {
2869 	struct Scsi_Host *shost = class_to_shost(cdev);
2870 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2871 
2872 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
2873 }
2874 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
2875 
2876 /**
2877  * _ctl_ioc_reply_queue_count_show - number of reply queues
2878  * @cdev - pointer to embedded class device
2879  * @buf - the buffer returned
2880  *
2881  * This is number of reply queues
2882  *
2883  * A sysfs 'read-only' shost attribute.
2884  */
2885 static ssize_t
2886 _ctl_ioc_reply_queue_count_show(struct device *cdev,
2887 	struct device_attribute *attr, char *buf)
2888 {
2889 	u8 reply_queue_count;
2890 	struct Scsi_Host *shost = class_to_shost(cdev);
2891 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2892 
2893 	if ((ioc->facts.IOCCapabilities &
2894 	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2895 		reply_queue_count = ioc->reply_queue_count;
2896 	else
2897 		reply_queue_count = 1;
2898 
2899 	return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2900 }
2901 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
2902 	NULL);
2903 
2904 /**
2905  * _ctl_BRM_status_show - Backup Rail Monitor Status
2906  * @cdev - pointer to embedded class device
2907  * @buf - the buffer returned
2908  *
2909  * This is number of reply queues
2910  *
2911  * A sysfs 'read-only' shost attribute.
2912  */
2913 static ssize_t
2914 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2915 	char *buf)
2916 {
2917 	struct Scsi_Host *shost = class_to_shost(cdev);
2918 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2919 	Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
2920 	Mpi2ConfigReply_t mpi_reply;
2921 	u16 backup_rail_monitor_status = 0;
2922 	u16 ioc_status;
2923 	int sz;
2924 	ssize_t rc = 0;
2925 
2926 	if (!ioc->is_warpdrive) {
2927 		pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
2928 		    " warpdrive\n", ioc->name, __func__);
2929 		goto out;
2930 	}
2931 	/* pci_access_mutex lock acquired by sysfs show path */
2932 	mutex_lock(&ioc->pci_access_mutex);
2933 	if (ioc->pci_error_recovery || ioc->remove_host) {
2934 		mutex_unlock(&ioc->pci_access_mutex);
2935 		return 0;
2936 	}
2937 
2938 	/* allocate upto GPIOVal 36 entries */
2939 	sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2940 	io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2941 	if (!io_unit_pg3) {
2942 		pr_err(MPT3SAS_FMT "%s: failed allocating memory "
2943 		    "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
2944 		goto out;
2945 	}
2946 
2947 	if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2948 	    0) {
2949 		pr_err(MPT3SAS_FMT
2950 		    "%s: failed reading iounit_pg3\n", ioc->name,
2951 		    __func__);
2952 		goto out;
2953 	}
2954 
2955 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2956 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2957 		pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
2958 		    "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
2959 		goto out;
2960 	}
2961 
2962 	if (io_unit_pg3->GPIOCount < 25) {
2963 		pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
2964 		     "25 entries, detected (%d) entries\n", ioc->name, __func__,
2965 		    io_unit_pg3->GPIOCount);
2966 		goto out;
2967 	}
2968 
2969 	/* BRM status is in bit zero of GPIOVal[24] */
2970 	backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
2971 	rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
2972 
2973  out:
2974 	kfree(io_unit_pg3);
2975 	mutex_unlock(&ioc->pci_access_mutex);
2976 	return rc;
2977 }
2978 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
2979 
2980 struct DIAG_BUFFER_START {
2981 	__le32	Size;
2982 	__le32	DiagVersion;
2983 	u8	BufferType;
2984 	u8	Reserved[3];
2985 	__le32	Reserved1;
2986 	__le32	Reserved2;
2987 	__le32	Reserved3;
2988 };
2989 
2990 /**
2991  * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
2992  * @cdev - pointer to embedded class device
2993  * @buf - the buffer returned
2994  *
2995  * A sysfs 'read-only' shost attribute.
2996  */
2997 static ssize_t
2998 _ctl_host_trace_buffer_size_show(struct device *cdev,
2999 	struct device_attribute *attr, char *buf)
3000 {
3001 	struct Scsi_Host *shost = class_to_shost(cdev);
3002 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3003 	u32 size = 0;
3004 	struct DIAG_BUFFER_START *request_data;
3005 
3006 	if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3007 		pr_err(MPT3SAS_FMT
3008 			"%s: host_trace_buffer is not registered\n",
3009 			ioc->name, __func__);
3010 		return 0;
3011 	}
3012 
3013 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3014 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3015 		pr_err(MPT3SAS_FMT
3016 			"%s: host_trace_buffer is not registered\n",
3017 			ioc->name, __func__);
3018 		return 0;
3019 	}
3020 
3021 	request_data = (struct DIAG_BUFFER_START *)
3022 	    ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
3023 	if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
3024 	    le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
3025 	    le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
3026 	    le32_to_cpu(request_data->Reserved3) == 0x4742444c)
3027 		size = le32_to_cpu(request_data->Size);
3028 
3029 	ioc->ring_buffer_sz = size;
3030 	return snprintf(buf, PAGE_SIZE, "%d\n", size);
3031 }
3032 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
3033 	_ctl_host_trace_buffer_size_show, NULL);
3034 
3035 /**
3036  * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
3037  * @cdev - pointer to embedded class device
3038  * @buf - the buffer returned
3039  *
3040  * A sysfs 'read/write' shost attribute.
3041  *
3042  * You will only be able to read 4k bytes of ring buffer at a time.
3043  * In order to read beyond 4k bytes, you will have to write out the
3044  * offset to the same attribute, it will move the pointer.
3045  */
3046 static ssize_t
3047 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3048 	char *buf)
3049 {
3050 	struct Scsi_Host *shost = class_to_shost(cdev);
3051 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3052 	void *request_data;
3053 	u32 size;
3054 
3055 	if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3056 		pr_err(MPT3SAS_FMT
3057 			"%s: host_trace_buffer is not registered\n",
3058 			ioc->name, __func__);
3059 		return 0;
3060 	}
3061 
3062 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3063 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3064 		pr_err(MPT3SAS_FMT
3065 			"%s: host_trace_buffer is not registered\n",
3066 			ioc->name, __func__);
3067 		return 0;
3068 	}
3069 
3070 	if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3071 		return 0;
3072 
3073 	size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3074 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3075 	request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3076 	memcpy(buf, request_data, size);
3077 	return size;
3078 }
3079 
3080 static ssize_t
3081 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3082 	const char *buf, size_t count)
3083 {
3084 	struct Scsi_Host *shost = class_to_shost(cdev);
3085 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3086 	int val = 0;
3087 
3088 	if (sscanf(buf, "%d", &val) != 1)
3089 		return -EINVAL;
3090 
3091 	ioc->ring_buffer_offset = val;
3092 	return strlen(buf);
3093 }
3094 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
3095 	_ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
3096 
3097 
3098 /*****************************************/
3099 
3100 /**
3101  * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
3102  * @cdev - pointer to embedded class device
3103  * @buf - the buffer returned
3104  *
3105  * A sysfs 'read/write' shost attribute.
3106  *
3107  * This is a mechnism to post/release host_trace_buffers
3108  */
3109 static ssize_t
3110 _ctl_host_trace_buffer_enable_show(struct device *cdev,
3111 	struct device_attribute *attr, char *buf)
3112 {
3113 	struct Scsi_Host *shost = class_to_shost(cdev);
3114 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3115 
3116 	if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3117 	   ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3118 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3119 		return snprintf(buf, PAGE_SIZE, "off\n");
3120 	else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3121 	    MPT3_DIAG_BUFFER_IS_RELEASED))
3122 		return snprintf(buf, PAGE_SIZE, "release\n");
3123 	else
3124 		return snprintf(buf, PAGE_SIZE, "post\n");
3125 }
3126 
3127 static ssize_t
3128 _ctl_host_trace_buffer_enable_store(struct device *cdev,
3129 	struct device_attribute *attr, const char *buf, size_t count)
3130 {
3131 	struct Scsi_Host *shost = class_to_shost(cdev);
3132 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3133 	char str[10] = "";
3134 	struct mpt3_diag_register diag_register;
3135 	u8 issue_reset = 0;
3136 
3137 	/* don't allow post/release occurr while recovery is active */
3138 	if (ioc->shost_recovery || ioc->remove_host ||
3139 	    ioc->pci_error_recovery || ioc->is_driver_loading)
3140 		return -EBUSY;
3141 
3142 	if (sscanf(buf, "%9s", str) != 1)
3143 		return -EINVAL;
3144 
3145 	if (!strcmp(str, "post")) {
3146 		/* exit out if host buffers are already posted */
3147 		if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3148 		    (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3149 		    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3150 		    ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3151 		    MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3152 			goto out;
3153 		memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3154 		pr_info(MPT3SAS_FMT "posting host trace buffers\n",
3155 		    ioc->name);
3156 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3157 		diag_register.requested_buffer_size = (1024 * 1024);
3158 		diag_register.unique_id = 0x7075900;
3159 		ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3160 		_ctl_diag_register_2(ioc,  &diag_register);
3161 	} else if (!strcmp(str, "release")) {
3162 		/* exit out if host buffers are already released */
3163 		if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3164 			goto out;
3165 		if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3166 		    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3167 			goto out;
3168 		if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3169 		    MPT3_DIAG_BUFFER_IS_RELEASED))
3170 			goto out;
3171 		pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
3172 		    ioc->name);
3173 		mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3174 		    &issue_reset);
3175 	}
3176 
3177  out:
3178 	return strlen(buf);
3179 }
3180 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
3181 	_ctl_host_trace_buffer_enable_show,
3182 	_ctl_host_trace_buffer_enable_store);
3183 
3184 /*********** diagnostic trigger suppport *********************************/
3185 
3186 /**
3187  * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
3188  * @cdev - pointer to embedded class device
3189  * @buf - the buffer returned
3190  *
3191  * A sysfs 'read/write' shost attribute.
3192  */
3193 static ssize_t
3194 _ctl_diag_trigger_master_show(struct device *cdev,
3195 	struct device_attribute *attr, char *buf)
3196 
3197 {
3198 	struct Scsi_Host *shost = class_to_shost(cdev);
3199 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3200 	unsigned long flags;
3201 	ssize_t rc;
3202 
3203 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3204 	rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3205 	memcpy(buf, &ioc->diag_trigger_master, rc);
3206 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3207 	return rc;
3208 }
3209 
3210 /**
3211  * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
3212  * @cdev - pointer to embedded class device
3213  * @buf - the buffer returned
3214  *
3215  * A sysfs 'read/write' shost attribute.
3216  */
3217 static ssize_t
3218 _ctl_diag_trigger_master_store(struct device *cdev,
3219 	struct device_attribute *attr, const char *buf, size_t count)
3220 
3221 {
3222 	struct Scsi_Host *shost = class_to_shost(cdev);
3223 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3224 	unsigned long flags;
3225 	ssize_t rc;
3226 
3227 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3228 	rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3229 	memset(&ioc->diag_trigger_master, 0,
3230 	    sizeof(struct SL_WH_MASTER_TRIGGER_T));
3231 	memcpy(&ioc->diag_trigger_master, buf, rc);
3232 	ioc->diag_trigger_master.MasterData |=
3233 	    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3234 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3235 	return rc;
3236 }
3237 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
3238 	_ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
3239 
3240 
3241 /**
3242  * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
3243  * @cdev - pointer to embedded class device
3244  * @buf - the buffer returned
3245  *
3246  * A sysfs 'read/write' shost attribute.
3247  */
3248 static ssize_t
3249 _ctl_diag_trigger_event_show(struct device *cdev,
3250 	struct device_attribute *attr, char *buf)
3251 {
3252 	struct Scsi_Host *shost = class_to_shost(cdev);
3253 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3254 	unsigned long flags;
3255 	ssize_t rc;
3256 
3257 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3258 	rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3259 	memcpy(buf, &ioc->diag_trigger_event, rc);
3260 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3261 	return rc;
3262 }
3263 
3264 /**
3265  * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
3266  * @cdev - pointer to embedded class device
3267  * @buf - the buffer returned
3268  *
3269  * A sysfs 'read/write' shost attribute.
3270  */
3271 static ssize_t
3272 _ctl_diag_trigger_event_store(struct device *cdev,
3273 	struct device_attribute *attr, const char *buf, size_t count)
3274 
3275 {
3276 	struct Scsi_Host *shost = class_to_shost(cdev);
3277 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3278 	unsigned long flags;
3279 	ssize_t sz;
3280 
3281 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3282 	sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3283 	memset(&ioc->diag_trigger_event, 0,
3284 	    sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3285 	memcpy(&ioc->diag_trigger_event, buf, sz);
3286 	if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3287 		ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3288 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3289 	return sz;
3290 }
3291 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
3292 	_ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
3293 
3294 
3295 /**
3296  * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3297  * @cdev - pointer to embedded class device
3298  * @buf - the buffer returned
3299  *
3300  * A sysfs 'read/write' shost attribute.
3301  */
3302 static ssize_t
3303 _ctl_diag_trigger_scsi_show(struct device *cdev,
3304 	struct device_attribute *attr, char *buf)
3305 {
3306 	struct Scsi_Host *shost = class_to_shost(cdev);
3307 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3308 	unsigned long flags;
3309 	ssize_t rc;
3310 
3311 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3312 	rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3313 	memcpy(buf, &ioc->diag_trigger_scsi, rc);
3314 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3315 	return rc;
3316 }
3317 
3318 /**
3319  * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3320  * @cdev - pointer to embedded class device
3321  * @buf - the buffer returned
3322  *
3323  * A sysfs 'read/write' shost attribute.
3324  */
3325 static ssize_t
3326 _ctl_diag_trigger_scsi_store(struct device *cdev,
3327 	struct device_attribute *attr, const char *buf, size_t count)
3328 {
3329 	struct Scsi_Host *shost = class_to_shost(cdev);
3330 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3331 	unsigned long flags;
3332 	ssize_t sz;
3333 
3334 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3335 	sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
3336 	memset(&ioc->diag_trigger_scsi, 0,
3337 	    sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3338 	memcpy(&ioc->diag_trigger_scsi, buf, sz);
3339 	if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3340 		ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3341 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3342 	return sz;
3343 }
3344 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
3345 	_ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
3346 
3347 
3348 /**
3349  * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
3350  * @cdev - pointer to embedded class device
3351  * @buf - the buffer returned
3352  *
3353  * A sysfs 'read/write' shost attribute.
3354  */
3355 static ssize_t
3356 _ctl_diag_trigger_mpi_show(struct device *cdev,
3357 	struct device_attribute *attr, char *buf)
3358 {
3359 	struct Scsi_Host *shost = class_to_shost(cdev);
3360 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3361 	unsigned long flags;
3362 	ssize_t rc;
3363 
3364 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3365 	rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3366 	memcpy(buf, &ioc->diag_trigger_mpi, rc);
3367 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3368 	return rc;
3369 }
3370 
3371 /**
3372  * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3373  * @cdev - pointer to embedded class device
3374  * @buf - the buffer returned
3375  *
3376  * A sysfs 'read/write' shost attribute.
3377  */
3378 static ssize_t
3379 _ctl_diag_trigger_mpi_store(struct device *cdev,
3380 	struct device_attribute *attr, const char *buf, size_t count)
3381 {
3382 	struct Scsi_Host *shost = class_to_shost(cdev);
3383 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3384 	unsigned long flags;
3385 	ssize_t sz;
3386 
3387 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3388 	sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3389 	memset(&ioc->diag_trigger_mpi, 0,
3390 	    sizeof(ioc->diag_trigger_mpi));
3391 	memcpy(&ioc->diag_trigger_mpi, buf, sz);
3392 	if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3393 		ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3394 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3395 	return sz;
3396 }
3397 
3398 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
3399 	_ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
3400 
3401 /*********** diagnostic trigger suppport *** END ****************************/
3402 
3403 /*****************************************/
3404 
3405 struct device_attribute *mpt3sas_host_attrs[] = {
3406 	&dev_attr_version_fw,
3407 	&dev_attr_version_bios,
3408 	&dev_attr_version_mpi,
3409 	&dev_attr_version_product,
3410 	&dev_attr_version_nvdata_persistent,
3411 	&dev_attr_version_nvdata_default,
3412 	&dev_attr_board_name,
3413 	&dev_attr_board_assembly,
3414 	&dev_attr_board_tracer,
3415 	&dev_attr_io_delay,
3416 	&dev_attr_device_delay,
3417 	&dev_attr_logging_level,
3418 	&dev_attr_fwfault_debug,
3419 	&dev_attr_fw_queue_depth,
3420 	&dev_attr_host_sas_address,
3421 	&dev_attr_ioc_reset_count,
3422 	&dev_attr_host_trace_buffer_size,
3423 	&dev_attr_host_trace_buffer,
3424 	&dev_attr_host_trace_buffer_enable,
3425 	&dev_attr_reply_queue_count,
3426 	&dev_attr_diag_trigger_master,
3427 	&dev_attr_diag_trigger_event,
3428 	&dev_attr_diag_trigger_scsi,
3429 	&dev_attr_diag_trigger_mpi,
3430 	&dev_attr_BRM_status,
3431 	NULL,
3432 };
3433 
3434 /* device attributes */
3435 
3436 /**
3437  * _ctl_device_sas_address_show - sas address
3438  * @cdev - pointer to embedded class device
3439  * @buf - the buffer returned
3440  *
3441  * This is the sas address for the target
3442  *
3443  * A sysfs 'read-only' shost attribute.
3444  */
3445 static ssize_t
3446 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
3447 	char *buf)
3448 {
3449 	struct scsi_device *sdev = to_scsi_device(dev);
3450 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3451 
3452 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3453 	    (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3454 }
3455 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
3456 
3457 /**
3458  * _ctl_device_handle_show - device handle
3459  * @cdev - pointer to embedded class device
3460  * @buf - the buffer returned
3461  *
3462  * This is the firmware assigned device handle
3463  *
3464  * A sysfs 'read-only' shost attribute.
3465  */
3466 static ssize_t
3467 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
3468 	char *buf)
3469 {
3470 	struct scsi_device *sdev = to_scsi_device(dev);
3471 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3472 
3473 	return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3474 	    sas_device_priv_data->sas_target->handle);
3475 }
3476 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
3477 
3478 /**
3479  * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
3480  * @dev - pointer to embedded device
3481  * @buf - the buffer returned
3482  *
3483  * A sysfs 'read/write' sdev attribute, only works with SATA
3484  */
3485 static ssize_t
3486 _ctl_device_ncq_prio_enable_show(struct device *dev,
3487 				 struct device_attribute *attr, char *buf)
3488 {
3489 	struct scsi_device *sdev = to_scsi_device(dev);
3490 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3491 
3492 	return snprintf(buf, PAGE_SIZE, "%d\n",
3493 			sas_device_priv_data->ncq_prio_enable);
3494 }
3495 
3496 static ssize_t
3497 _ctl_device_ncq_prio_enable_store(struct device *dev,
3498 				  struct device_attribute *attr,
3499 				  const char *buf, size_t count)
3500 {
3501 	struct scsi_device *sdev = to_scsi_device(dev);
3502 	struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3503 	bool ncq_prio_enable = 0;
3504 
3505 	if (kstrtobool(buf, &ncq_prio_enable))
3506 		return -EINVAL;
3507 
3508 	if (!scsih_ncq_prio_supp(sdev))
3509 		return -EINVAL;
3510 
3511 	sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3512 	return strlen(buf);
3513 }
3514 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
3515 		   _ctl_device_ncq_prio_enable_show,
3516 		   _ctl_device_ncq_prio_enable_store);
3517 
3518 struct device_attribute *mpt3sas_dev_attrs[] = {
3519 	&dev_attr_sas_address,
3520 	&dev_attr_sas_device_handle,
3521 	&dev_attr_sas_ncq_prio_enable,
3522 	NULL,
3523 };
3524 
3525 /* file operations table for mpt3ctl device */
3526 static const struct file_operations ctl_fops = {
3527 	.owner = THIS_MODULE,
3528 	.unlocked_ioctl = _ctl_ioctl,
3529 	.poll = _ctl_poll,
3530 	.fasync = _ctl_fasync,
3531 #ifdef CONFIG_COMPAT
3532 	.compat_ioctl = _ctl_ioctl_compat,
3533 #endif
3534 };
3535 
3536 /* file operations table for mpt2ctl device */
3537 static const struct file_operations ctl_gen2_fops = {
3538 	.owner = THIS_MODULE,
3539 	.unlocked_ioctl = _ctl_mpt2_ioctl,
3540 	.poll = _ctl_poll,
3541 	.fasync = _ctl_fasync,
3542 #ifdef CONFIG_COMPAT
3543 	.compat_ioctl = _ctl_mpt2_ioctl_compat,
3544 #endif
3545 };
3546 
3547 static struct miscdevice ctl_dev = {
3548 	.minor  = MPT3SAS_MINOR,
3549 	.name   = MPT3SAS_DEV_NAME,
3550 	.fops   = &ctl_fops,
3551 };
3552 
3553 static struct miscdevice gen2_ctl_dev = {
3554 	.minor  = MPT2SAS_MINOR,
3555 	.name   = MPT2SAS_DEV_NAME,
3556 	.fops   = &ctl_gen2_fops,
3557 };
3558 
3559 /**
3560  * mpt3sas_ctl_init - main entry point for ctl.
3561  *
3562  */
3563 void
3564 mpt3sas_ctl_init(ushort hbas_to_enumerate)
3565 {
3566 	async_queue = NULL;
3567 
3568 	/* Don't register mpt3ctl ioctl device if
3569 	 * hbas_to_enumarate is one.
3570 	 */
3571 	if (hbas_to_enumerate != 1)
3572 		if (misc_register(&ctl_dev) < 0)
3573 			pr_err("%s can't register misc device [minor=%d]\n",
3574 			    MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
3575 
3576 	/* Don't register mpt3ctl ioctl device if
3577 	 * hbas_to_enumarate is two.
3578 	 */
3579 	if (hbas_to_enumerate != 2)
3580 		if (misc_register(&gen2_ctl_dev) < 0)
3581 			pr_err("%s can't register misc device [minor=%d]\n",
3582 			    MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
3583 
3584 	init_waitqueue_head(&ctl_poll_wait);
3585 }
3586 
3587 /**
3588  * mpt3sas_ctl_exit - exit point for ctl
3589  *
3590  */
3591 void
3592 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3593 {
3594 	struct MPT3SAS_ADAPTER *ioc;
3595 	int i;
3596 
3597 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
3598 
3599 		/* free memory associated to diag buffers */
3600 		for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
3601 			if (!ioc->diag_buffer[i])
3602 				continue;
3603 			if (!(ioc->diag_buffer_status[i] &
3604 			    MPT3_DIAG_BUFFER_IS_REGISTERED))
3605 				continue;
3606 			if ((ioc->diag_buffer_status[i] &
3607 			    MPT3_DIAG_BUFFER_IS_RELEASED))
3608 				continue;
3609 			pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
3610 			ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
3611 			ioc->diag_buffer[i] = NULL;
3612 			ioc->diag_buffer_status[i] = 0;
3613 		}
3614 
3615 		kfree(ioc->event_log);
3616 	}
3617 	if (hbas_to_enumerate != 1)
3618 		misc_deregister(&ctl_dev);
3619 	if (hbas_to_enumerate != 2)
3620 		misc_deregister(&gen2_ctl_dev);
3621 }
3622