xref: /openbmc/linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 87832e937c808a7ebc41254b408362e3255c87c9)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Driver for Broadcom MPI3 Storage Controllers
4   *
5   * Copyright (C) 2017-2023 Broadcom Inc.
6   *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7   *
8   */
9  
10  #include "mpi3mr.h"
11  #include <linux/io-64-nonatomic-lo-hi.h>
12  
13  static int
14  mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
15  static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16  static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17  	struct mpi3_ioc_facts_data *facts_data);
18  static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19  	struct mpi3mr_drv_cmd *drv_cmd);
20  
21  static int poll_queues;
22  module_param(poll_queues, int, 0444);
23  MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24  
25  #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,volatile void __iomem * addr)26  static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
27  {
28  	writeq(b, addr);
29  }
30  #else
mpi3mr_writeq(__u64 b,volatile void __iomem * addr)31  static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
32  {
33  	__u64 data_out = b;
34  
35  	writel((u32)(data_out), addr);
36  	writel((u32)(data_out >> 32), (addr + 4));
37  }
38  #endif
39  
40  static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)41  mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
42  {
43  	u16 pi, ci, max_entries;
44  	bool is_qfull = false;
45  
46  	pi = op_req_q->pi;
47  	ci = READ_ONCE(op_req_q->ci);
48  	max_entries = op_req_q->num_requests;
49  
50  	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
51  		is_qfull = true;
52  
53  	return is_qfull;
54  }
55  
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)56  static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
57  {
58  	u16 i, max_vectors;
59  
60  	max_vectors = mrioc->intr_info_count;
61  
62  	for (i = 0; i < max_vectors; i++)
63  		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
64  }
65  
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)66  void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
67  {
68  	mrioc->intr_enabled = 0;
69  	mpi3mr_sync_irqs(mrioc);
70  }
71  
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)72  void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
73  {
74  	mrioc->intr_enabled = 1;
75  }
76  
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)77  static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
78  {
79  	u16 i;
80  
81  	mpi3mr_ioc_disable_intr(mrioc);
82  
83  	if (!mrioc->intr_info)
84  		return;
85  
86  	for (i = 0; i < mrioc->intr_info_count; i++)
87  		free_irq(pci_irq_vector(mrioc->pdev, i),
88  		    (mrioc->intr_info + i));
89  
90  	kfree(mrioc->intr_info);
91  	mrioc->intr_info = NULL;
92  	mrioc->intr_info_count = 0;
93  	mrioc->is_intr_info_set = false;
94  	pci_free_irq_vectors(mrioc->pdev);
95  }
96  
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)97  void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
98  	dma_addr_t dma_addr)
99  {
100  	struct mpi3_sge_common *sgel = paddr;
101  
102  	sgel->flags = flags;
103  	sgel->length = cpu_to_le32(length);
104  	sgel->address = cpu_to_le64(dma_addr);
105  }
106  
mpi3mr_build_zero_len_sge(void * paddr)107  void mpi3mr_build_zero_len_sge(void *paddr)
108  {
109  	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
110  
111  	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
112  }
113  
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)114  void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
115  	dma_addr_t phys_addr)
116  {
117  	if (!phys_addr)
118  		return NULL;
119  
120  	if ((phys_addr < mrioc->reply_buf_dma) ||
121  	    (phys_addr > mrioc->reply_buf_dma_max_address))
122  		return NULL;
123  
124  	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
125  }
126  
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)127  void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
128  	dma_addr_t phys_addr)
129  {
130  	if (!phys_addr)
131  		return NULL;
132  
133  	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
134  }
135  
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)136  static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
137  	u64 reply_dma)
138  {
139  	u32 old_idx = 0;
140  	unsigned long flags;
141  
142  	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
143  	old_idx  =  mrioc->reply_free_queue_host_index;
144  	mrioc->reply_free_queue_host_index = (
145  	    (mrioc->reply_free_queue_host_index ==
146  	    (mrioc->reply_free_qsz - 1)) ? 0 :
147  	    (mrioc->reply_free_queue_host_index + 1));
148  	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
149  	writel(mrioc->reply_free_queue_host_index,
150  	    &mrioc->sysif_regs->reply_free_host_index);
151  	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
152  }
153  
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)154  void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
155  	u64 sense_buf_dma)
156  {
157  	u32 old_idx = 0;
158  	unsigned long flags;
159  
160  	spin_lock_irqsave(&mrioc->sbq_lock, flags);
161  	old_idx  =  mrioc->sbq_host_index;
162  	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
163  	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
164  	    (mrioc->sbq_host_index + 1));
165  	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
166  	writel(mrioc->sbq_host_index,
167  	    &mrioc->sysif_regs->sense_buffer_free_host_index);
168  	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
169  }
170  
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)171  static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
172  	struct mpi3_event_notification_reply *event_reply)
173  {
174  	char *desc = NULL;
175  	u16 event;
176  
177  	event = event_reply->event;
178  
179  	switch (event) {
180  	case MPI3_EVENT_LOG_DATA:
181  		desc = "Log Data";
182  		break;
183  	case MPI3_EVENT_CHANGE:
184  		desc = "Event Change";
185  		break;
186  	case MPI3_EVENT_GPIO_INTERRUPT:
187  		desc = "GPIO Interrupt";
188  		break;
189  	case MPI3_EVENT_CABLE_MGMT:
190  		desc = "Cable Management";
191  		break;
192  	case MPI3_EVENT_ENERGY_PACK_CHANGE:
193  		desc = "Energy Pack Change";
194  		break;
195  	case MPI3_EVENT_DEVICE_ADDED:
196  	{
197  		struct mpi3_device_page0 *event_data =
198  		    (struct mpi3_device_page0 *)event_reply->event_data;
199  		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
200  		    event_data->dev_handle, event_data->device_form);
201  		return;
202  	}
203  	case MPI3_EVENT_DEVICE_INFO_CHANGED:
204  	{
205  		struct mpi3_device_page0 *event_data =
206  		    (struct mpi3_device_page0 *)event_reply->event_data;
207  		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
208  		    event_data->dev_handle, event_data->device_form);
209  		return;
210  	}
211  	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
212  	{
213  		struct mpi3_event_data_device_status_change *event_data =
214  		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
215  		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
216  		    event_data->dev_handle, event_data->reason_code);
217  		return;
218  	}
219  	case MPI3_EVENT_SAS_DISCOVERY:
220  	{
221  		struct mpi3_event_data_sas_discovery *event_data =
222  		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
223  		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
224  		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
225  		    "start" : "stop",
226  		    le32_to_cpu(event_data->discovery_status));
227  		return;
228  	}
229  	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
230  		desc = "SAS Broadcast Primitive";
231  		break;
232  	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
233  		desc = "SAS Notify Primitive";
234  		break;
235  	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
236  		desc = "SAS Init Device Status Change";
237  		break;
238  	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
239  		desc = "SAS Init Table Overflow";
240  		break;
241  	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
242  		desc = "SAS Topology Change List";
243  		break;
244  	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
245  		desc = "Enclosure Device Status Change";
246  		break;
247  	case MPI3_EVENT_ENCL_DEVICE_ADDED:
248  		desc = "Enclosure Added";
249  		break;
250  	case MPI3_EVENT_HARD_RESET_RECEIVED:
251  		desc = "Hard Reset Received";
252  		break;
253  	case MPI3_EVENT_SAS_PHY_COUNTER:
254  		desc = "SAS PHY Counter";
255  		break;
256  	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
257  		desc = "SAS Device Discovery Error";
258  		break;
259  	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
260  		desc = "PCIE Topology Change List";
261  		break;
262  	case MPI3_EVENT_PCIE_ENUMERATION:
263  	{
264  		struct mpi3_event_data_pcie_enumeration *event_data =
265  		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
266  		ioc_info(mrioc, "PCIE Enumeration: (%s)",
267  		    (event_data->reason_code ==
268  		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
269  		if (event_data->enumeration_status)
270  			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
271  			    le32_to_cpu(event_data->enumeration_status));
272  		return;
273  	}
274  	case MPI3_EVENT_PREPARE_FOR_RESET:
275  		desc = "Prepare For Reset";
276  		break;
277  	}
278  
279  	if (!desc)
280  		return;
281  
282  	ioc_info(mrioc, "%s\n", desc);
283  }
284  
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)285  static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
286  	struct mpi3_default_reply *def_reply)
287  {
288  	struct mpi3_event_notification_reply *event_reply =
289  	    (struct mpi3_event_notification_reply *)def_reply;
290  
291  	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
292  	mpi3mr_print_event_data(mrioc, event_reply);
293  	mpi3mr_os_handle_events(mrioc, event_reply);
294  }
295  
296  static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)297  mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
298  	struct mpi3_default_reply *def_reply)
299  {
300  	u16 idx;
301  
302  	switch (host_tag) {
303  	case MPI3MR_HOSTTAG_INITCMDS:
304  		return &mrioc->init_cmds;
305  	case MPI3MR_HOSTTAG_CFG_CMDS:
306  		return &mrioc->cfg_cmds;
307  	case MPI3MR_HOSTTAG_BSG_CMDS:
308  		return &mrioc->bsg_cmds;
309  	case MPI3MR_HOSTTAG_BLK_TMS:
310  		return &mrioc->host_tm_cmds;
311  	case MPI3MR_HOSTTAG_PEL_ABORT:
312  		return &mrioc->pel_abort_cmd;
313  	case MPI3MR_HOSTTAG_PEL_WAIT:
314  		return &mrioc->pel_cmds;
315  	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
316  		return &mrioc->transport_cmds;
317  	case MPI3MR_HOSTTAG_INVALID:
318  		if (def_reply && def_reply->function ==
319  		    MPI3_FUNCTION_EVENT_NOTIFICATION)
320  			mpi3mr_handle_events(mrioc, def_reply);
321  		return NULL;
322  	default:
323  		break;
324  	}
325  	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
326  	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
327  		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
328  		return &mrioc->dev_rmhs_cmds[idx];
329  	}
330  
331  	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
332  	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
333  		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
334  		return &mrioc->evtack_cmds[idx];
335  	}
336  
337  	return NULL;
338  }
339  
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)340  static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
341  	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
342  {
343  	u16 reply_desc_type, host_tag = 0;
344  	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
345  	u32 ioc_loginfo = 0;
346  	struct mpi3_status_reply_descriptor *status_desc;
347  	struct mpi3_address_reply_descriptor *addr_desc;
348  	struct mpi3_success_reply_descriptor *success_desc;
349  	struct mpi3_default_reply *def_reply = NULL;
350  	struct mpi3mr_drv_cmd *cmdptr = NULL;
351  	struct mpi3_scsi_io_reply *scsi_reply;
352  	u8 *sense_buf = NULL;
353  
354  	*reply_dma = 0;
355  	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
356  	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
357  	switch (reply_desc_type) {
358  	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
359  		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
360  		host_tag = le16_to_cpu(status_desc->host_tag);
361  		ioc_status = le16_to_cpu(status_desc->ioc_status);
362  		if (ioc_status &
363  		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
364  			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
365  		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
366  		break;
367  	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
368  		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
369  		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
370  		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
371  		if (!def_reply)
372  			goto out;
373  		host_tag = le16_to_cpu(def_reply->host_tag);
374  		ioc_status = le16_to_cpu(def_reply->ioc_status);
375  		if (ioc_status &
376  		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
377  			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
378  		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
379  		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
380  			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
381  			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
382  			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
383  		}
384  		break;
385  	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
386  		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
387  		host_tag = le16_to_cpu(success_desc->host_tag);
388  		break;
389  	default:
390  		break;
391  	}
392  
393  	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
394  	if (cmdptr) {
395  		if (cmdptr->state & MPI3MR_CMD_PENDING) {
396  			cmdptr->state |= MPI3MR_CMD_COMPLETE;
397  			cmdptr->ioc_loginfo = ioc_loginfo;
398  			cmdptr->ioc_status = ioc_status;
399  			cmdptr->state &= ~MPI3MR_CMD_PENDING;
400  			if (def_reply) {
401  				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
402  				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
403  				    mrioc->reply_sz);
404  			}
405  			if (sense_buf && cmdptr->sensebuf) {
406  				cmdptr->is_sense = 1;
407  				memcpy(cmdptr->sensebuf, sense_buf,
408  				       MPI3MR_SENSE_BUF_SZ);
409  			}
410  			if (cmdptr->is_waiting) {
411  				complete(&cmdptr->done);
412  				cmdptr->is_waiting = 0;
413  			} else if (cmdptr->callback)
414  				cmdptr->callback(mrioc, cmdptr);
415  		}
416  	}
417  out:
418  	if (sense_buf)
419  		mpi3mr_repost_sense_buf(mrioc,
420  		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
421  }
422  
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)423  int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
424  {
425  	u32 exp_phase = mrioc->admin_reply_ephase;
426  	u32 admin_reply_ci = mrioc->admin_reply_ci;
427  	u32 num_admin_replies = 0;
428  	u64 reply_dma = 0;
429  	struct mpi3_default_reply_descriptor *reply_desc;
430  
431  	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
432  		return 0;
433  
434  	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
435  	    admin_reply_ci;
436  
437  	if ((le16_to_cpu(reply_desc->reply_flags) &
438  	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
439  		atomic_dec(&mrioc->admin_reply_q_in_use);
440  		return 0;
441  	}
442  
443  	do {
444  		if (mrioc->unrecoverable)
445  			break;
446  
447  		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
448  		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
449  		if (reply_dma)
450  			mpi3mr_repost_reply_buf(mrioc, reply_dma);
451  		num_admin_replies++;
452  		if (++admin_reply_ci == mrioc->num_admin_replies) {
453  			admin_reply_ci = 0;
454  			exp_phase ^= 1;
455  		}
456  		reply_desc =
457  		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
458  		    admin_reply_ci;
459  		if ((le16_to_cpu(reply_desc->reply_flags) &
460  		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
461  			break;
462  	} while (1);
463  
464  	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
465  	mrioc->admin_reply_ci = admin_reply_ci;
466  	mrioc->admin_reply_ephase = exp_phase;
467  	atomic_dec(&mrioc->admin_reply_q_in_use);
468  
469  	return num_admin_replies;
470  }
471  
472  /**
473   * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
474   *	queue's consumer index from operational reply descriptor queue.
475   * @op_reply_q: op_reply_qinfo object
476   * @reply_ci: operational reply descriptor's queue consumer index
477   *
478   * Returns reply descriptor frame address
479   */
480  static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)481  mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
482  {
483  	void *segment_base_addr;
484  	struct segments *segments = op_reply_q->q_segments;
485  	struct mpi3_default_reply_descriptor *reply_desc = NULL;
486  
487  	segment_base_addr =
488  	    segments[reply_ci / op_reply_q->segment_qd].segment;
489  	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
490  	    (reply_ci % op_reply_q->segment_qd);
491  	return reply_desc;
492  }
493  
494  /**
495   * mpi3mr_process_op_reply_q - Operational reply queue handler
496   * @mrioc: Adapter instance reference
497   * @op_reply_q: Operational reply queue info
498   *
499   * Checks the specific operational reply queue and drains the
500   * reply queue entries until the queue is empty and process the
501   * individual reply descriptors.
502   *
503   * Return: 0 if queue is already processed,or number of reply
504   *	    descriptors processed.
505   */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)506  int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
507  	struct op_reply_qinfo *op_reply_q)
508  {
509  	struct op_req_qinfo *op_req_q;
510  	u32 exp_phase;
511  	u32 reply_ci;
512  	u32 num_op_reply = 0;
513  	u64 reply_dma = 0;
514  	struct mpi3_default_reply_descriptor *reply_desc;
515  	u16 req_q_idx = 0, reply_qidx;
516  
517  	reply_qidx = op_reply_q->qid - 1;
518  
519  	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
520  		return 0;
521  
522  	exp_phase = op_reply_q->ephase;
523  	reply_ci = op_reply_q->ci;
524  
525  	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
526  	if ((le16_to_cpu(reply_desc->reply_flags) &
527  	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
528  		atomic_dec(&op_reply_q->in_use);
529  		return 0;
530  	}
531  
532  	do {
533  		if (mrioc->unrecoverable)
534  			break;
535  
536  		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
537  		op_req_q = &mrioc->req_qinfo[req_q_idx];
538  
539  		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
540  		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
541  		    reply_qidx);
542  		atomic_dec(&op_reply_q->pend_ios);
543  		if (reply_dma)
544  			mpi3mr_repost_reply_buf(mrioc, reply_dma);
545  		num_op_reply++;
546  
547  		if (++reply_ci == op_reply_q->num_replies) {
548  			reply_ci = 0;
549  			exp_phase ^= 1;
550  		}
551  
552  		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
553  
554  		if ((le16_to_cpu(reply_desc->reply_flags) &
555  		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
556  			break;
557  #ifndef CONFIG_PREEMPT_RT
558  		/*
559  		 * Exit completion loop to avoid CPU lockup
560  		 * Ensure remaining completion happens from threaded ISR.
561  		 */
562  		if (num_op_reply > mrioc->max_host_ios) {
563  			op_reply_q->enable_irq_poll = true;
564  			break;
565  		}
566  #endif
567  	} while (1);
568  
569  	writel(reply_ci,
570  	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
571  	op_reply_q->ci = reply_ci;
572  	op_reply_q->ephase = exp_phase;
573  
574  	atomic_dec(&op_reply_q->in_use);
575  	return num_op_reply;
576  }
577  
578  /**
579   * mpi3mr_blk_mq_poll - Operational reply queue handler
580   * @shost: SCSI Host reference
581   * @queue_num: Request queue number (w.r.t OS it is hardware context number)
582   *
583   * Checks the specific operational reply queue and drains the
584   * reply queue entries until the queue is empty and process the
585   * individual reply descriptors.
586   *
587   * Return: 0 if queue is already processed,or number of reply
588   *	    descriptors processed.
589   */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)590  int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
591  {
592  	int num_entries = 0;
593  	struct mpi3mr_ioc *mrioc;
594  
595  	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
596  
597  	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
598  	    mrioc->unrecoverable))
599  		return 0;
600  
601  	num_entries = mpi3mr_process_op_reply_q(mrioc,
602  			&mrioc->op_reply_qinfo[queue_num]);
603  
604  	return num_entries;
605  }
606  
mpi3mr_isr_primary(int irq,void * privdata)607  static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
608  {
609  	struct mpi3mr_intr_info *intr_info = privdata;
610  	struct mpi3mr_ioc *mrioc;
611  	u16 midx;
612  	u32 num_admin_replies = 0, num_op_reply = 0;
613  
614  	if (!intr_info)
615  		return IRQ_NONE;
616  
617  	mrioc = intr_info->mrioc;
618  
619  	if (!mrioc->intr_enabled)
620  		return IRQ_NONE;
621  
622  	midx = intr_info->msix_index;
623  
624  	if (!midx)
625  		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
626  	if (intr_info->op_reply_q)
627  		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
628  		    intr_info->op_reply_q);
629  
630  	if (num_admin_replies || num_op_reply)
631  		return IRQ_HANDLED;
632  	else
633  		return IRQ_NONE;
634  }
635  
636  #ifndef CONFIG_PREEMPT_RT
637  
mpi3mr_isr(int irq,void * privdata)638  static irqreturn_t mpi3mr_isr(int irq, void *privdata)
639  {
640  	struct mpi3mr_intr_info *intr_info = privdata;
641  	int ret;
642  
643  	if (!intr_info)
644  		return IRQ_NONE;
645  
646  	/* Call primary ISR routine */
647  	ret = mpi3mr_isr_primary(irq, privdata);
648  
649  	/*
650  	 * If more IOs are expected, schedule IRQ polling thread.
651  	 * Otherwise exit from ISR.
652  	 */
653  	if (!intr_info->op_reply_q)
654  		return ret;
655  
656  	if (!intr_info->op_reply_q->enable_irq_poll ||
657  	    !atomic_read(&intr_info->op_reply_q->pend_ios))
658  		return ret;
659  
660  	disable_irq_nosync(intr_info->os_irq);
661  
662  	return IRQ_WAKE_THREAD;
663  }
664  
665  /**
666   * mpi3mr_isr_poll - Reply queue polling routine
667   * @irq: IRQ
668   * @privdata: Interrupt info
669   *
670   * poll for pending I/O completions in a loop until pending I/Os
671   * present or controller queue depth I/Os are processed.
672   *
673   * Return: IRQ_NONE or IRQ_HANDLED
674   */
mpi3mr_isr_poll(int irq,void * privdata)675  static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
676  {
677  	struct mpi3mr_intr_info *intr_info = privdata;
678  	struct mpi3mr_ioc *mrioc;
679  	u16 midx;
680  	u32 num_op_reply = 0;
681  
682  	if (!intr_info || !intr_info->op_reply_q)
683  		return IRQ_NONE;
684  
685  	mrioc = intr_info->mrioc;
686  	midx = intr_info->msix_index;
687  
688  	/* Poll for pending IOs completions */
689  	do {
690  		if (!mrioc->intr_enabled || mrioc->unrecoverable)
691  			break;
692  
693  		if (!midx)
694  			mpi3mr_process_admin_reply_q(mrioc);
695  		if (intr_info->op_reply_q)
696  			num_op_reply +=
697  			    mpi3mr_process_op_reply_q(mrioc,
698  				intr_info->op_reply_q);
699  
700  		usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
701  
702  	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
703  	    (num_op_reply < mrioc->max_host_ios));
704  
705  	intr_info->op_reply_q->enable_irq_poll = false;
706  	enable_irq(intr_info->os_irq);
707  
708  	return IRQ_HANDLED;
709  }
710  
711  #endif
712  
713  /**
714   * mpi3mr_request_irq - Request IRQ and register ISR
715   * @mrioc: Adapter instance reference
716   * @index: IRQ vector index
717   *
718   * Request threaded ISR with primary ISR and secondary
719   *
720   * Return: 0 on success and non zero on failures.
721   */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)722  static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
723  {
724  	struct pci_dev *pdev = mrioc->pdev;
725  	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
726  	int retval = 0;
727  
728  	intr_info->mrioc = mrioc;
729  	intr_info->msix_index = index;
730  	intr_info->op_reply_q = NULL;
731  
732  	snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
733  	    mrioc->driver_name, mrioc->id, index);
734  
735  #ifndef CONFIG_PREEMPT_RT
736  	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
737  	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
738  #else
739  	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
740  	    NULL, IRQF_SHARED, intr_info->name, intr_info);
741  #endif
742  	if (retval) {
743  		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
744  		    intr_info->name, pci_irq_vector(pdev, index));
745  		return retval;
746  	}
747  
748  	intr_info->os_irq = pci_irq_vector(pdev, index);
749  	return retval;
750  }
751  
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)752  static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
753  {
754  	if (!mrioc->requested_poll_qcount)
755  		return;
756  
757  	/* Reserved for Admin and Default Queue */
758  	if (max_vectors > 2 &&
759  		(mrioc->requested_poll_qcount < max_vectors - 2)) {
760  		ioc_info(mrioc,
761  		    "enabled polled queues (%d) msix (%d)\n",
762  		    mrioc->requested_poll_qcount, max_vectors);
763  	} else {
764  		ioc_info(mrioc,
765  		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
766  		    mrioc->requested_poll_qcount, max_vectors);
767  		mrioc->requested_poll_qcount = 0;
768  	}
769  }
770  
771  /**
772   * mpi3mr_setup_isr - Setup ISR for the controller
773   * @mrioc: Adapter instance reference
774   * @setup_one: Request one IRQ or more
775   *
776   * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
777   *
778   * Return: 0 on success and non zero on failures.
779   */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)780  static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
781  {
782  	unsigned int irq_flags = PCI_IRQ_MSIX;
783  	int max_vectors, min_vec;
784  	int retval;
785  	int i;
786  	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
787  
788  	if (mrioc->is_intr_info_set)
789  		return 0;
790  
791  	mpi3mr_cleanup_isr(mrioc);
792  
793  	if (setup_one || reset_devices) {
794  		max_vectors = 1;
795  		retval = pci_alloc_irq_vectors(mrioc->pdev,
796  		    1, max_vectors, irq_flags);
797  		if (retval < 0) {
798  			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
799  			    retval);
800  			goto out_failed;
801  		}
802  	} else {
803  		max_vectors =
804  		    min_t(int, mrioc->cpu_count + 1 +
805  			mrioc->requested_poll_qcount, mrioc->msix_count);
806  
807  		mpi3mr_calc_poll_queues(mrioc, max_vectors);
808  
809  		ioc_info(mrioc,
810  		    "MSI-X vectors supported: %d, no of cores: %d,",
811  		    mrioc->msix_count, mrioc->cpu_count);
812  		ioc_info(mrioc,
813  		    "MSI-x vectors requested: %d poll_queues %d\n",
814  		    max_vectors, mrioc->requested_poll_qcount);
815  
816  		desc.post_vectors = mrioc->requested_poll_qcount;
817  		min_vec = desc.pre_vectors + desc.post_vectors;
818  		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
819  
820  		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
821  			min_vec, max_vectors, irq_flags, &desc);
822  
823  		if (retval < 0) {
824  			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
825  			    retval);
826  			goto out_failed;
827  		}
828  
829  
830  		/*
831  		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
832  		 * between Admin queue and operational queue
833  		 */
834  		if (retval == min_vec)
835  			mrioc->op_reply_q_offset = 0;
836  		else if (retval != (max_vectors)) {
837  			ioc_info(mrioc,
838  			    "allocated vectors (%d) are less than configured (%d)\n",
839  			    retval, max_vectors);
840  		}
841  
842  		max_vectors = retval;
843  		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
844  
845  		mpi3mr_calc_poll_queues(mrioc, max_vectors);
846  
847  	}
848  
849  	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
850  	    GFP_KERNEL);
851  	if (!mrioc->intr_info) {
852  		retval = -ENOMEM;
853  		pci_free_irq_vectors(mrioc->pdev);
854  		goto out_failed;
855  	}
856  	for (i = 0; i < max_vectors; i++) {
857  		retval = mpi3mr_request_irq(mrioc, i);
858  		if (retval) {
859  			mrioc->intr_info_count = i;
860  			goto out_failed;
861  		}
862  	}
863  	if (reset_devices || !setup_one)
864  		mrioc->is_intr_info_set = true;
865  	mrioc->intr_info_count = max_vectors;
866  	mpi3mr_ioc_enable_intr(mrioc);
867  	return 0;
868  
869  out_failed:
870  	mpi3mr_cleanup_isr(mrioc);
871  
872  	return retval;
873  }
874  
875  static const struct {
876  	enum mpi3mr_iocstate value;
877  	char *name;
878  } mrioc_states[] = {
879  	{ MRIOC_STATE_READY, "ready" },
880  	{ MRIOC_STATE_FAULT, "fault" },
881  	{ MRIOC_STATE_RESET, "reset" },
882  	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
883  	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
884  	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
885  };
886  
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)887  static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
888  {
889  	int i;
890  	char *name = NULL;
891  
892  	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
893  		if (mrioc_states[i].value == mrioc_state) {
894  			name = mrioc_states[i].name;
895  			break;
896  		}
897  	}
898  	return name;
899  }
900  
901  /* Reset reason to name mapper structure*/
902  static const struct {
903  	enum mpi3mr_reset_reason value;
904  	char *name;
905  } mpi3mr_reset_reason_codes[] = {
906  	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
907  	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
908  	{ MPI3MR_RESET_FROM_APP, "application invocation" },
909  	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
910  	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
911  	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
912  	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
913  	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
914  	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
915  	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
916  	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
917  	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
918  	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
919  	{
920  		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
921  		"create request queue timeout"
922  	},
923  	{
924  		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
925  		"create reply queue timeout"
926  	},
927  	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
928  	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
929  	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
930  	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
931  	{
932  		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
933  		"component image activation timeout"
934  	},
935  	{
936  		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
937  		"get package version timeout"
938  	},
939  	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
940  	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
941  	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
942  	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
943  	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
944  };
945  
946  /**
947   * mpi3mr_reset_rc_name - get reset reason code name
948   * @reason_code: reset reason code value
949   *
950   * Map reset reason to an NULL terminated ASCII string
951   *
952   * Return: name corresponding to reset reason value or NULL.
953   */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)954  static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
955  {
956  	int i;
957  	char *name = NULL;
958  
959  	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
960  		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
961  			name = mpi3mr_reset_reason_codes[i].name;
962  			break;
963  		}
964  	}
965  	return name;
966  }
967  
968  /* Reset type to name mapper structure*/
969  static const struct {
970  	u16 reset_type;
971  	char *name;
972  } mpi3mr_reset_types[] = {
973  	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
974  	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
975  };
976  
977  /**
978   * mpi3mr_reset_type_name - get reset type name
979   * @reset_type: reset type value
980   *
981   * Map reset type to an NULL terminated ASCII string
982   *
983   * Return: name corresponding to reset type value or NULL.
984   */
mpi3mr_reset_type_name(u16 reset_type)985  static const char *mpi3mr_reset_type_name(u16 reset_type)
986  {
987  	int i;
988  	char *name = NULL;
989  
990  	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
991  		if (mpi3mr_reset_types[i].reset_type == reset_type) {
992  			name = mpi3mr_reset_types[i].name;
993  			break;
994  		}
995  	}
996  	return name;
997  }
998  
999  /**
1000   * mpi3mr_print_fault_info - Display fault information
1001   * @mrioc: Adapter instance reference
1002   *
1003   * Display the controller fault information if there is a
1004   * controller fault.
1005   *
1006   * Return: Nothing.
1007   */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1008  void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1009  {
1010  	u32 ioc_status, code, code1, code2, code3;
1011  
1012  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1013  
1014  	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1015  		code = readl(&mrioc->sysif_regs->fault);
1016  		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1017  		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1018  		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1019  
1020  		ioc_info(mrioc,
1021  		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1022  		    code, code1, code2, code3);
1023  	}
1024  }
1025  
1026  /**
1027   * mpi3mr_get_iocstate - Get IOC State
1028   * @mrioc: Adapter instance reference
1029   *
1030   * Return a proper IOC state enum based on the IOC status and
1031   * IOC configuration and unrcoverable state of the controller.
1032   *
1033   * Return: Current IOC state.
1034   */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1035  enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1036  {
1037  	u32 ioc_status, ioc_config;
1038  	u8 ready, enabled;
1039  
1040  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1041  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1042  
1043  	if (mrioc->unrecoverable)
1044  		return MRIOC_STATE_UNRECOVERABLE;
1045  	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1046  		return MRIOC_STATE_FAULT;
1047  
1048  	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1049  	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1050  
1051  	if (ready && enabled)
1052  		return MRIOC_STATE_READY;
1053  	if ((!ready) && (!enabled))
1054  		return MRIOC_STATE_RESET;
1055  	if ((!ready) && (enabled))
1056  		return MRIOC_STATE_BECOMING_READY;
1057  
1058  	return MRIOC_STATE_RESET_REQUESTED;
1059  }
1060  
1061  /**
1062   * mpi3mr_clear_reset_history - clear reset history
1063   * @mrioc: Adapter instance reference
1064   *
1065   * Write the reset history bit in IOC status to clear the bit,
1066   * if it is already set.
1067   *
1068   * Return: Nothing.
1069   */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1070  static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1071  {
1072  	u32 ioc_status;
1073  
1074  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1075  	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1076  		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1077  }
1078  
1079  /**
1080   * mpi3mr_issue_and_process_mur - Message unit Reset handler
1081   * @mrioc: Adapter instance reference
1082   * @reset_reason: Reset reason code
1083   *
1084   * Issue Message unit Reset to the controller and wait for it to
1085   * be complete.
1086   *
1087   * Return: 0 on success, -1 on failure.
1088   */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1089  static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1090  	u32 reset_reason)
1091  {
1092  	u32 ioc_config, timeout, ioc_status;
1093  	int retval = -1;
1094  
1095  	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1096  	if (mrioc->unrecoverable) {
1097  		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1098  		return retval;
1099  	}
1100  	mpi3mr_clear_reset_history(mrioc);
1101  	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1102  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1103  	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1104  	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1105  
1106  	timeout = MPI3MR_MUR_TIMEOUT * 10;
1107  	do {
1108  		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1109  		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1110  			mpi3mr_clear_reset_history(mrioc);
1111  			break;
1112  		}
1113  		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1114  			mpi3mr_print_fault_info(mrioc);
1115  			break;
1116  		}
1117  		msleep(100);
1118  	} while (--timeout);
1119  
1120  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1121  	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1122  	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1123  	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1124  		retval = 0;
1125  
1126  	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
1127  	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1128  	return retval;
1129  }
1130  
1131  /**
1132   * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1133   * during reset/resume
1134   * @mrioc: Adapter instance reference
1135   *
1136   * Return zero if the new IOCFacts parameters value is compatible with
1137   * older values else return -EPERM
1138   */
1139  static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1140  mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1141  {
1142  	unsigned long *removepend_bitmap;
1143  
1144  	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1145  		ioc_err(mrioc,
1146  		    "cannot increase reply size from %d to %d\n",
1147  		    mrioc->reply_sz, mrioc->facts.reply_sz);
1148  		return -EPERM;
1149  	}
1150  
1151  	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1152  		ioc_err(mrioc,
1153  		    "cannot reduce number of operational reply queues from %d to %d\n",
1154  		    mrioc->num_op_reply_q,
1155  		    mrioc->facts.max_op_reply_q);
1156  		return -EPERM;
1157  	}
1158  
1159  	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1160  		ioc_err(mrioc,
1161  		    "cannot reduce number of operational request queues from %d to %d\n",
1162  		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1163  		return -EPERM;
1164  	}
1165  
1166  	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1167  		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1168  			    "\tchanged after reset: previous(%d), new(%d),\n"
1169  			    "the driver cannot change this at run time\n",
1170  			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1171  
1172  	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1173  	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
1174  		ioc_err(mrioc,
1175  		    "critical error: multipath capability is enabled at the\n"
1176  		    "\tcontroller while sas transport support is enabled at the\n"
1177  		    "\tdriver, please reboot the system or reload the driver\n");
1178  
1179  	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1180  		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1181  						  GFP_KERNEL);
1182  		if (!removepend_bitmap) {
1183  			ioc_err(mrioc,
1184  				"failed to increase removepend_bitmap bits from %d to %d\n",
1185  				mrioc->dev_handle_bitmap_bits,
1186  				mrioc->facts.max_devhandle);
1187  			return -EPERM;
1188  		}
1189  		bitmap_free(mrioc->removepend_bitmap);
1190  		mrioc->removepend_bitmap = removepend_bitmap;
1191  		ioc_info(mrioc,
1192  			 "increased bits of dev_handle_bitmap from %d to %d\n",
1193  			 mrioc->dev_handle_bitmap_bits,
1194  			 mrioc->facts.max_devhandle);
1195  		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1196  	}
1197  
1198  	return 0;
1199  }
1200  
1201  /**
1202   * mpi3mr_bring_ioc_ready - Bring controller to ready state
1203   * @mrioc: Adapter instance reference
1204   *
1205   * Set Enable IOC bit in IOC configuration register and wait for
1206   * the controller to become ready.
1207   *
1208   * Return: 0 on success, appropriate error on failure.
1209   */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1210  static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1211  {
1212  	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1213  	int retval = 0;
1214  	enum mpi3mr_iocstate ioc_state;
1215  	u64 base_info;
1216  
1217  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1218  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1219  	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1220  	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1221  	    ioc_status, ioc_config, base_info);
1222  
1223  	/*The timeout value is in 2sec unit, changing it to seconds*/
1224  	mrioc->ready_timeout =
1225  	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1226  	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1227  
1228  	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1229  
1230  	ioc_state = mpi3mr_get_iocstate(mrioc);
1231  	ioc_info(mrioc, "controller is in %s state during detection\n",
1232  	    mpi3mr_iocstate_name(ioc_state));
1233  
1234  	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1235  	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1236  		timeout = mrioc->ready_timeout * 10;
1237  		do {
1238  			msleep(100);
1239  		} while (--timeout);
1240  
1241  		if (!pci_device_is_present(mrioc->pdev)) {
1242  			mrioc->unrecoverable = 1;
1243  			ioc_err(mrioc,
1244  			    "controller is not present while waiting to reset\n");
1245  			retval = -1;
1246  			goto out_device_not_present;
1247  		}
1248  
1249  		ioc_state = mpi3mr_get_iocstate(mrioc);
1250  		ioc_info(mrioc,
1251  		    "controller is in %s state after waiting to reset\n",
1252  		    mpi3mr_iocstate_name(ioc_state));
1253  	}
1254  
1255  	if (ioc_state == MRIOC_STATE_READY) {
1256  		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1257  		retval = mpi3mr_issue_and_process_mur(mrioc,
1258  		    MPI3MR_RESET_FROM_BRINGUP);
1259  		ioc_state = mpi3mr_get_iocstate(mrioc);
1260  		if (retval)
1261  			ioc_err(mrioc,
1262  			    "message unit reset failed with error %d current state %s\n",
1263  			    retval, mpi3mr_iocstate_name(ioc_state));
1264  	}
1265  	if (ioc_state != MRIOC_STATE_RESET) {
1266  		if (ioc_state == MRIOC_STATE_FAULT) {
1267  			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1268  			mpi3mr_print_fault_info(mrioc);
1269  			do {
1270  				host_diagnostic =
1271  					readl(&mrioc->sysif_regs->host_diagnostic);
1272  				if (!(host_diagnostic &
1273  				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1274  					break;
1275  				if (!pci_device_is_present(mrioc->pdev)) {
1276  					mrioc->unrecoverable = 1;
1277  					ioc_err(mrioc, "controller is not present at the bringup\n");
1278  					goto out_device_not_present;
1279  				}
1280  				msleep(100);
1281  			} while (--timeout);
1282  		}
1283  		mpi3mr_print_fault_info(mrioc);
1284  		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1285  		retval = mpi3mr_issue_reset(mrioc,
1286  		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1287  		    MPI3MR_RESET_FROM_BRINGUP);
1288  		if (retval) {
1289  			ioc_err(mrioc,
1290  			    "soft reset failed with error %d\n", retval);
1291  			goto out_failed;
1292  		}
1293  	}
1294  	ioc_state = mpi3mr_get_iocstate(mrioc);
1295  	if (ioc_state != MRIOC_STATE_RESET) {
1296  		ioc_err(mrioc,
1297  		    "cannot bring controller to reset state, current state: %s\n",
1298  		    mpi3mr_iocstate_name(ioc_state));
1299  		goto out_failed;
1300  	}
1301  	mpi3mr_clear_reset_history(mrioc);
1302  	retval = mpi3mr_setup_admin_qpair(mrioc);
1303  	if (retval) {
1304  		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1305  		    retval);
1306  		goto out_failed;
1307  	}
1308  
1309  	ioc_info(mrioc, "bringing controller to ready state\n");
1310  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1311  	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1312  	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1313  
1314  	timeout = mrioc->ready_timeout * 10;
1315  	do {
1316  		ioc_state = mpi3mr_get_iocstate(mrioc);
1317  		if (ioc_state == MRIOC_STATE_READY) {
1318  			ioc_info(mrioc,
1319  			    "successfully transitioned to %s state\n",
1320  			    mpi3mr_iocstate_name(ioc_state));
1321  			return 0;
1322  		}
1323  		if (!pci_device_is_present(mrioc->pdev)) {
1324  			mrioc->unrecoverable = 1;
1325  			ioc_err(mrioc,
1326  			    "controller is not present at the bringup\n");
1327  			retval = -1;
1328  			goto out_device_not_present;
1329  		}
1330  		msleep(100);
1331  	} while (--timeout);
1332  
1333  out_failed:
1334  	ioc_state = mpi3mr_get_iocstate(mrioc);
1335  	ioc_err(mrioc,
1336  	    "failed to bring to ready state,  current state: %s\n",
1337  	    mpi3mr_iocstate_name(ioc_state));
1338  out_device_not_present:
1339  	return retval;
1340  }
1341  
1342  /**
1343   * mpi3mr_soft_reset_success - Check softreset is success or not
1344   * @ioc_status: IOC status register value
1345   * @ioc_config: IOC config register value
1346   *
1347   * Check whether the soft reset is successful or not based on
1348   * IOC status and IOC config register values.
1349   *
1350   * Return: True when the soft reset is success, false otherwise.
1351   */
1352  static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1353  mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1354  {
1355  	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1356  	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1357  		return true;
1358  	return false;
1359  }
1360  
1361  /**
1362   * mpi3mr_diagfault_success - Check diag fault is success or not
1363   * @mrioc: Adapter reference
1364   * @ioc_status: IOC status register value
1365   *
1366   * Check whether the controller hit diag reset fault code.
1367   *
1368   * Return: True when there is diag fault, false otherwise.
1369   */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1370  static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1371  	u32 ioc_status)
1372  {
1373  	u32 fault;
1374  
1375  	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1376  		return false;
1377  	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1378  	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1379  		mpi3mr_print_fault_info(mrioc);
1380  		return true;
1381  	}
1382  	return false;
1383  }
1384  
1385  /**
1386   * mpi3mr_set_diagsave - Set diag save bit for snapdump
1387   * @mrioc: Adapter reference
1388   *
1389   * Set diag save bit in IOC configuration register to enable
1390   * snapdump.
1391   *
1392   * Return: Nothing.
1393   */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1394  static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1395  {
1396  	u32 ioc_config;
1397  
1398  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1399  	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1400  	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1401  }
1402  
1403  /**
1404   * mpi3mr_issue_reset - Issue reset to the controller
1405   * @mrioc: Adapter reference
1406   * @reset_type: Reset type
1407   * @reset_reason: Reset reason code
1408   *
1409   * Unlock the host diagnostic registers and write the specific
1410   * reset type to that, wait for reset acknowledgment from the
1411   * controller, if the reset is not successful retry for the
1412   * predefined number of times.
1413   *
1414   * Return: 0 on success, non-zero on failure.
1415   */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u32 reset_reason)1416  static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1417  	u32 reset_reason)
1418  {
1419  	int retval = -1;
1420  	u8 unlock_retry_count = 0;
1421  	u32 host_diagnostic, ioc_status, ioc_config;
1422  	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1423  
1424  	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1425  	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1426  		return retval;
1427  	if (mrioc->unrecoverable)
1428  		return retval;
1429  	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1430  		retval = 0;
1431  		return retval;
1432  	}
1433  
1434  	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1435  	    mpi3mr_reset_type_name(reset_type),
1436  	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1437  
1438  	mpi3mr_clear_reset_history(mrioc);
1439  	do {
1440  		ioc_info(mrioc,
1441  		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1442  		    ++unlock_retry_count);
1443  		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1444  			ioc_err(mrioc,
1445  			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1446  			    mpi3mr_reset_type_name(reset_type),
1447  			    host_diagnostic);
1448  			mrioc->unrecoverable = 1;
1449  			return retval;
1450  		}
1451  
1452  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1453  		    &mrioc->sysif_regs->write_sequence);
1454  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1455  		    &mrioc->sysif_regs->write_sequence);
1456  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1457  		    &mrioc->sysif_regs->write_sequence);
1458  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1459  		    &mrioc->sysif_regs->write_sequence);
1460  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1461  		    &mrioc->sysif_regs->write_sequence);
1462  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1463  		    &mrioc->sysif_regs->write_sequence);
1464  		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1465  		    &mrioc->sysif_regs->write_sequence);
1466  		usleep_range(1000, 1100);
1467  		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1468  		ioc_info(mrioc,
1469  		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1470  		    unlock_retry_count, host_diagnostic);
1471  	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1472  
1473  	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1474  	writel(host_diagnostic | reset_type,
1475  	    &mrioc->sysif_regs->host_diagnostic);
1476  	switch (reset_type) {
1477  	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1478  		do {
1479  			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1480  			ioc_config =
1481  			    readl(&mrioc->sysif_regs->ioc_configuration);
1482  			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1483  			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1484  			    ) {
1485  				mpi3mr_clear_reset_history(mrioc);
1486  				retval = 0;
1487  				break;
1488  			}
1489  			msleep(100);
1490  		} while (--timeout);
1491  		mpi3mr_print_fault_info(mrioc);
1492  		break;
1493  	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1494  		do {
1495  			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1496  			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1497  				retval = 0;
1498  				break;
1499  			}
1500  			msleep(100);
1501  		} while (--timeout);
1502  		break;
1503  	default:
1504  		break;
1505  	}
1506  
1507  	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1508  	    &mrioc->sysif_regs->write_sequence);
1509  
1510  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1511  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1512  	ioc_info(mrioc,
1513  	    "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1514  	    (!retval)?"successful":"failed", ioc_status,
1515  	    ioc_config);
1516  	if (retval)
1517  		mrioc->unrecoverable = 1;
1518  	return retval;
1519  }
1520  
1521  /**
1522   * mpi3mr_admin_request_post - Post request to admin queue
1523   * @mrioc: Adapter reference
1524   * @admin_req: MPI3 request
1525   * @admin_req_sz: Request size
1526   * @ignore_reset: Ignore reset in process
1527   *
1528   * Post the MPI3 request into admin request queue and
1529   * inform the controller, if the queue is full return
1530   * appropriate error.
1531   *
1532   * Return: 0 on success, non-zero on failure.
1533   */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1534  int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1535  	u16 admin_req_sz, u8 ignore_reset)
1536  {
1537  	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1538  	int retval = 0;
1539  	unsigned long flags;
1540  	u8 *areq_entry;
1541  
1542  	if (mrioc->unrecoverable) {
1543  		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1544  		return -EFAULT;
1545  	}
1546  
1547  	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1548  	areq_pi = mrioc->admin_req_pi;
1549  	areq_ci = mrioc->admin_req_ci;
1550  	max_entries = mrioc->num_admin_req;
1551  	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1552  	    (areq_pi == (max_entries - 1)))) {
1553  		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1554  		retval = -EAGAIN;
1555  		goto out;
1556  	}
1557  	if (!ignore_reset && mrioc->reset_in_progress) {
1558  		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1559  		retval = -EAGAIN;
1560  		goto out;
1561  	}
1562  	areq_entry = (u8 *)mrioc->admin_req_base +
1563  	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1564  	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1565  	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1566  
1567  	if (++areq_pi == max_entries)
1568  		areq_pi = 0;
1569  	mrioc->admin_req_pi = areq_pi;
1570  
1571  	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1572  
1573  out:
1574  	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1575  
1576  	return retval;
1577  }
1578  
1579  /**
1580   * mpi3mr_free_op_req_q_segments - free request memory segments
1581   * @mrioc: Adapter instance reference
1582   * @q_idx: operational request queue index
1583   *
1584   * Free memory segments allocated for operational request queue
1585   *
1586   * Return: Nothing.
1587   */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1588  static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1589  {
1590  	u16 j;
1591  	int size;
1592  	struct segments *segments;
1593  
1594  	segments = mrioc->req_qinfo[q_idx].q_segments;
1595  	if (!segments)
1596  		return;
1597  
1598  	if (mrioc->enable_segqueue) {
1599  		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1600  		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1601  			dma_free_coherent(&mrioc->pdev->dev,
1602  			    MPI3MR_MAX_SEG_LIST_SIZE,
1603  			    mrioc->req_qinfo[q_idx].q_segment_list,
1604  			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1605  			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1606  		}
1607  	} else
1608  		size = mrioc->req_qinfo[q_idx].segment_qd *
1609  		    mrioc->facts.op_req_sz;
1610  
1611  	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1612  		if (!segments[j].segment)
1613  			continue;
1614  		dma_free_coherent(&mrioc->pdev->dev,
1615  		    size, segments[j].segment, segments[j].segment_dma);
1616  		segments[j].segment = NULL;
1617  	}
1618  	kfree(mrioc->req_qinfo[q_idx].q_segments);
1619  	mrioc->req_qinfo[q_idx].q_segments = NULL;
1620  	mrioc->req_qinfo[q_idx].qid = 0;
1621  }
1622  
1623  /**
1624   * mpi3mr_free_op_reply_q_segments - free reply memory segments
1625   * @mrioc: Adapter instance reference
1626   * @q_idx: operational reply queue index
1627   *
1628   * Free memory segments allocated for operational reply queue
1629   *
1630   * Return: Nothing.
1631   */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1632  static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1633  {
1634  	u16 j;
1635  	int size;
1636  	struct segments *segments;
1637  
1638  	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1639  	if (!segments)
1640  		return;
1641  
1642  	if (mrioc->enable_segqueue) {
1643  		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1644  		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1645  			dma_free_coherent(&mrioc->pdev->dev,
1646  			    MPI3MR_MAX_SEG_LIST_SIZE,
1647  			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1648  			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1649  			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1650  		}
1651  	} else
1652  		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1653  		    mrioc->op_reply_desc_sz;
1654  
1655  	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1656  		if (!segments[j].segment)
1657  			continue;
1658  		dma_free_coherent(&mrioc->pdev->dev,
1659  		    size, segments[j].segment, segments[j].segment_dma);
1660  		segments[j].segment = NULL;
1661  	}
1662  
1663  	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1664  	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1665  	mrioc->op_reply_qinfo[q_idx].qid = 0;
1666  }
1667  
1668  /**
1669   * mpi3mr_delete_op_reply_q - delete operational reply queue
1670   * @mrioc: Adapter instance reference
1671   * @qidx: operational reply queue index
1672   *
1673   * Delete operatinal reply queue by issuing MPI request
1674   * through admin queue.
1675   *
1676   * Return:  0 on success, non-zero on failure.
1677   */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)1678  static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1679  {
1680  	struct mpi3_delete_reply_queue_request delq_req;
1681  	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1682  	int retval = 0;
1683  	u16 reply_qid = 0, midx;
1684  
1685  	reply_qid = op_reply_q->qid;
1686  
1687  	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1688  
1689  	if (!reply_qid)	{
1690  		retval = -1;
1691  		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1692  		goto out;
1693  	}
1694  
1695  	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1696  	    mrioc->active_poll_qcount--;
1697  
1698  	memset(&delq_req, 0, sizeof(delq_req));
1699  	mutex_lock(&mrioc->init_cmds.mutex);
1700  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1701  		retval = -1;
1702  		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1703  		mutex_unlock(&mrioc->init_cmds.mutex);
1704  		goto out;
1705  	}
1706  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1707  	mrioc->init_cmds.is_waiting = 1;
1708  	mrioc->init_cmds.callback = NULL;
1709  	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1710  	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1711  	delq_req.queue_id = cpu_to_le16(reply_qid);
1712  
1713  	init_completion(&mrioc->init_cmds.done);
1714  	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1715  	    1);
1716  	if (retval) {
1717  		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1718  		goto out_unlock;
1719  	}
1720  	wait_for_completion_timeout(&mrioc->init_cmds.done,
1721  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1722  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1723  		ioc_err(mrioc, "delete reply queue timed out\n");
1724  		mpi3mr_check_rh_fault_ioc(mrioc,
1725  		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1726  		retval = -1;
1727  		goto out_unlock;
1728  	}
1729  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1730  	    != MPI3_IOCSTATUS_SUCCESS) {
1731  		ioc_err(mrioc,
1732  		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1733  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1734  		    mrioc->init_cmds.ioc_loginfo);
1735  		retval = -1;
1736  		goto out_unlock;
1737  	}
1738  	mrioc->intr_info[midx].op_reply_q = NULL;
1739  
1740  	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1741  out_unlock:
1742  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1743  	mutex_unlock(&mrioc->init_cmds.mutex);
1744  out:
1745  
1746  	return retval;
1747  }
1748  
1749  /**
1750   * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1751   * @mrioc: Adapter instance reference
1752   * @qidx: request queue index
1753   *
1754   * Allocate segmented memory pools for operational reply
1755   * queue.
1756   *
1757   * Return: 0 on success, non-zero on failure.
1758   */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)1759  static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1760  {
1761  	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1762  	int i, size;
1763  	u64 *q_segment_list_entry = NULL;
1764  	struct segments *segments;
1765  
1766  	if (mrioc->enable_segqueue) {
1767  		op_reply_q->segment_qd =
1768  		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1769  
1770  		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1771  
1772  		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1773  		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1774  		    GFP_KERNEL);
1775  		if (!op_reply_q->q_segment_list)
1776  			return -ENOMEM;
1777  		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1778  	} else {
1779  		op_reply_q->segment_qd = op_reply_q->num_replies;
1780  		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1781  	}
1782  
1783  	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1784  	    op_reply_q->segment_qd);
1785  
1786  	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1787  	    sizeof(struct segments), GFP_KERNEL);
1788  	if (!op_reply_q->q_segments)
1789  		return -ENOMEM;
1790  
1791  	segments = op_reply_q->q_segments;
1792  	for (i = 0; i < op_reply_q->num_segments; i++) {
1793  		segments[i].segment =
1794  		    dma_alloc_coherent(&mrioc->pdev->dev,
1795  		    size, &segments[i].segment_dma, GFP_KERNEL);
1796  		if (!segments[i].segment)
1797  			return -ENOMEM;
1798  		if (mrioc->enable_segqueue)
1799  			q_segment_list_entry[i] =
1800  			    (unsigned long)segments[i].segment_dma;
1801  	}
1802  
1803  	return 0;
1804  }
1805  
1806  /**
1807   * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1808   * @mrioc: Adapter instance reference
1809   * @qidx: request queue index
1810   *
1811   * Allocate segmented memory pools for operational request
1812   * queue.
1813   *
1814   * Return: 0 on success, non-zero on failure.
1815   */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)1816  static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1817  {
1818  	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1819  	int i, size;
1820  	u64 *q_segment_list_entry = NULL;
1821  	struct segments *segments;
1822  
1823  	if (mrioc->enable_segqueue) {
1824  		op_req_q->segment_qd =
1825  		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1826  
1827  		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1828  
1829  		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1830  		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1831  		    GFP_KERNEL);
1832  		if (!op_req_q->q_segment_list)
1833  			return -ENOMEM;
1834  		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1835  
1836  	} else {
1837  		op_req_q->segment_qd = op_req_q->num_requests;
1838  		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1839  	}
1840  
1841  	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1842  	    op_req_q->segment_qd);
1843  
1844  	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1845  	    sizeof(struct segments), GFP_KERNEL);
1846  	if (!op_req_q->q_segments)
1847  		return -ENOMEM;
1848  
1849  	segments = op_req_q->q_segments;
1850  	for (i = 0; i < op_req_q->num_segments; i++) {
1851  		segments[i].segment =
1852  		    dma_alloc_coherent(&mrioc->pdev->dev,
1853  		    size, &segments[i].segment_dma, GFP_KERNEL);
1854  		if (!segments[i].segment)
1855  			return -ENOMEM;
1856  		if (mrioc->enable_segqueue)
1857  			q_segment_list_entry[i] =
1858  			    (unsigned long)segments[i].segment_dma;
1859  	}
1860  
1861  	return 0;
1862  }
1863  
1864  /**
1865   * mpi3mr_create_op_reply_q - create operational reply queue
1866   * @mrioc: Adapter instance reference
1867   * @qidx: operational reply queue index
1868   *
1869   * Create operatinal reply queue by issuing MPI request
1870   * through admin queue.
1871   *
1872   * Return:  0 on success, non-zero on failure.
1873   */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)1874  static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1875  {
1876  	struct mpi3_create_reply_queue_request create_req;
1877  	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1878  	int retval = 0;
1879  	u16 reply_qid = 0, midx;
1880  
1881  	reply_qid = op_reply_q->qid;
1882  
1883  	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1884  
1885  	if (reply_qid) {
1886  		retval = -1;
1887  		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1888  		    reply_qid);
1889  
1890  		return retval;
1891  	}
1892  
1893  	reply_qid = qidx + 1;
1894  	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
1895  	if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
1896  		!mrioc->pdev->revision)
1897  		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
1898  	op_reply_q->ci = 0;
1899  	op_reply_q->ephase = 1;
1900  	atomic_set(&op_reply_q->pend_ios, 0);
1901  	atomic_set(&op_reply_q->in_use, 0);
1902  	op_reply_q->enable_irq_poll = false;
1903  
1904  	if (!op_reply_q->q_segments) {
1905  		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
1906  		if (retval) {
1907  			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1908  			goto out;
1909  		}
1910  	}
1911  
1912  	memset(&create_req, 0, sizeof(create_req));
1913  	mutex_lock(&mrioc->init_cmds.mutex);
1914  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1915  		retval = -1;
1916  		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
1917  		goto out_unlock;
1918  	}
1919  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1920  	mrioc->init_cmds.is_waiting = 1;
1921  	mrioc->init_cmds.callback = NULL;
1922  	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1923  	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
1924  	create_req.queue_id = cpu_to_le16(reply_qid);
1925  
1926  	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
1927  		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
1928  	else
1929  		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
1930  
1931  	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
1932  		create_req.flags =
1933  			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
1934  		create_req.msix_index =
1935  			cpu_to_le16(mrioc->intr_info[midx].msix_index);
1936  	} else {
1937  		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
1938  		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
1939  			reply_qid, midx);
1940  		if (!mrioc->active_poll_qcount)
1941  			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
1942  			    mrioc->intr_info_count - 1));
1943  	}
1944  
1945  	if (mrioc->enable_segqueue) {
1946  		create_req.flags |=
1947  		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1948  		create_req.base_address = cpu_to_le64(
1949  		    op_reply_q->q_segment_list_dma);
1950  	} else
1951  		create_req.base_address = cpu_to_le64(
1952  		    op_reply_q->q_segments[0].segment_dma);
1953  
1954  	create_req.size = cpu_to_le16(op_reply_q->num_replies);
1955  
1956  	init_completion(&mrioc->init_cmds.done);
1957  	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1958  	    sizeof(create_req), 1);
1959  	if (retval) {
1960  		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
1961  		goto out_unlock;
1962  	}
1963  	wait_for_completion_timeout(&mrioc->init_cmds.done,
1964  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1965  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1966  		ioc_err(mrioc, "create reply queue timed out\n");
1967  		mpi3mr_check_rh_fault_ioc(mrioc,
1968  		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
1969  		retval = -1;
1970  		goto out_unlock;
1971  	}
1972  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1973  	    != MPI3_IOCSTATUS_SUCCESS) {
1974  		ioc_err(mrioc,
1975  		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1976  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1977  		    mrioc->init_cmds.ioc_loginfo);
1978  		retval = -1;
1979  		goto out_unlock;
1980  	}
1981  	op_reply_q->qid = reply_qid;
1982  	if (midx < mrioc->intr_info_count)
1983  		mrioc->intr_info[midx].op_reply_q = op_reply_q;
1984  
1985  	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
1986  	    mrioc->active_poll_qcount++;
1987  
1988  out_unlock:
1989  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1990  	mutex_unlock(&mrioc->init_cmds.mutex);
1991  out:
1992  
1993  	return retval;
1994  }
1995  
1996  /**
1997   * mpi3mr_create_op_req_q - create operational request queue
1998   * @mrioc: Adapter instance reference
1999   * @idx: operational request queue index
2000   * @reply_qid: Reply queue ID
2001   *
2002   * Create operatinal request queue by issuing MPI request
2003   * through admin queue.
2004   *
2005   * Return:  0 on success, non-zero on failure.
2006   */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2007  static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2008  	u16 reply_qid)
2009  {
2010  	struct mpi3_create_request_queue_request create_req;
2011  	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2012  	int retval = 0;
2013  	u16 req_qid = 0;
2014  
2015  	req_qid = op_req_q->qid;
2016  
2017  	if (req_qid) {
2018  		retval = -1;
2019  		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2020  		    req_qid);
2021  
2022  		return retval;
2023  	}
2024  	req_qid = idx + 1;
2025  
2026  	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2027  	op_req_q->ci = 0;
2028  	op_req_q->pi = 0;
2029  	op_req_q->reply_qid = reply_qid;
2030  	spin_lock_init(&op_req_q->q_lock);
2031  
2032  	if (!op_req_q->q_segments) {
2033  		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2034  		if (retval) {
2035  			mpi3mr_free_op_req_q_segments(mrioc, idx);
2036  			goto out;
2037  		}
2038  	}
2039  
2040  	memset(&create_req, 0, sizeof(create_req));
2041  	mutex_lock(&mrioc->init_cmds.mutex);
2042  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2043  		retval = -1;
2044  		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2045  		goto out_unlock;
2046  	}
2047  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2048  	mrioc->init_cmds.is_waiting = 1;
2049  	mrioc->init_cmds.callback = NULL;
2050  	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2051  	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2052  	create_req.queue_id = cpu_to_le16(req_qid);
2053  	if (mrioc->enable_segqueue) {
2054  		create_req.flags =
2055  		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2056  		create_req.base_address = cpu_to_le64(
2057  		    op_req_q->q_segment_list_dma);
2058  	} else
2059  		create_req.base_address = cpu_to_le64(
2060  		    op_req_q->q_segments[0].segment_dma);
2061  	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2062  	create_req.size = cpu_to_le16(op_req_q->num_requests);
2063  
2064  	init_completion(&mrioc->init_cmds.done);
2065  	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2066  	    sizeof(create_req), 1);
2067  	if (retval) {
2068  		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2069  		goto out_unlock;
2070  	}
2071  	wait_for_completion_timeout(&mrioc->init_cmds.done,
2072  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2073  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2074  		ioc_err(mrioc, "create request queue timed out\n");
2075  		mpi3mr_check_rh_fault_ioc(mrioc,
2076  		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2077  		retval = -1;
2078  		goto out_unlock;
2079  	}
2080  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2081  	    != MPI3_IOCSTATUS_SUCCESS) {
2082  		ioc_err(mrioc,
2083  		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2084  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2085  		    mrioc->init_cmds.ioc_loginfo);
2086  		retval = -1;
2087  		goto out_unlock;
2088  	}
2089  	op_req_q->qid = req_qid;
2090  
2091  out_unlock:
2092  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2093  	mutex_unlock(&mrioc->init_cmds.mutex);
2094  out:
2095  
2096  	return retval;
2097  }
2098  
2099  /**
2100   * mpi3mr_create_op_queues - create operational queue pairs
2101   * @mrioc: Adapter instance reference
2102   *
2103   * Allocate memory for operational queue meta data and call
2104   * create request and reply queue functions.
2105   *
2106   * Return: 0 on success, non-zero on failures.
2107   */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2108  static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2109  {
2110  	int retval = 0;
2111  	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2112  
2113  	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2114  	    mrioc->facts.max_op_req_q);
2115  
2116  	msix_count_op_q =
2117  	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2118  	if (!mrioc->num_queues)
2119  		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2120  	/*
2121  	 * During reset set the num_queues to the number of queues
2122  	 * that was set before the reset.
2123  	 */
2124  	num_queues = mrioc->num_op_reply_q ?
2125  	    mrioc->num_op_reply_q : mrioc->num_queues;
2126  	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2127  	    num_queues);
2128  
2129  	if (!mrioc->req_qinfo) {
2130  		mrioc->req_qinfo = kcalloc(num_queues,
2131  		    sizeof(struct op_req_qinfo), GFP_KERNEL);
2132  		if (!mrioc->req_qinfo) {
2133  			retval = -1;
2134  			goto out_failed;
2135  		}
2136  
2137  		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2138  		    num_queues, GFP_KERNEL);
2139  		if (!mrioc->op_reply_qinfo) {
2140  			retval = -1;
2141  			goto out_failed;
2142  		}
2143  	}
2144  
2145  	if (mrioc->enable_segqueue)
2146  		ioc_info(mrioc,
2147  		    "allocating operational queues through segmented queues\n");
2148  
2149  	for (i = 0; i < num_queues; i++) {
2150  		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2151  			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2152  			break;
2153  		}
2154  		if (mpi3mr_create_op_req_q(mrioc, i,
2155  		    mrioc->op_reply_qinfo[i].qid)) {
2156  			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2157  			mpi3mr_delete_op_reply_q(mrioc, i);
2158  			break;
2159  		}
2160  	}
2161  
2162  	if (i == 0) {
2163  		/* Not even one queue is created successfully*/
2164  		retval = -1;
2165  		goto out_failed;
2166  	}
2167  	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2168  	ioc_info(mrioc,
2169  	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2170  	    mrioc->num_op_reply_q, mrioc->default_qcount,
2171  	    mrioc->active_poll_qcount);
2172  
2173  	return retval;
2174  out_failed:
2175  	kfree(mrioc->req_qinfo);
2176  	mrioc->req_qinfo = NULL;
2177  
2178  	kfree(mrioc->op_reply_qinfo);
2179  	mrioc->op_reply_qinfo = NULL;
2180  
2181  	return retval;
2182  }
2183  
2184  /**
2185   * mpi3mr_op_request_post - Post request to operational queue
2186   * @mrioc: Adapter reference
2187   * @op_req_q: Operational request queue info
2188   * @req: MPI3 request
2189   *
2190   * Post the MPI3 request into operational request queue and
2191   * inform the controller, if the queue is full return
2192   * appropriate error.
2193   *
2194   * Return: 0 on success, non-zero on failure.
2195   */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2196  int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2197  	struct op_req_qinfo *op_req_q, u8 *req)
2198  {
2199  	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2200  	int retval = 0;
2201  	unsigned long flags;
2202  	u8 *req_entry;
2203  	void *segment_base_addr;
2204  	u16 req_sz = mrioc->facts.op_req_sz;
2205  	struct segments *segments = op_req_q->q_segments;
2206  
2207  	reply_qidx = op_req_q->reply_qid - 1;
2208  
2209  	if (mrioc->unrecoverable)
2210  		return -EFAULT;
2211  
2212  	spin_lock_irqsave(&op_req_q->q_lock, flags);
2213  	pi = op_req_q->pi;
2214  	max_entries = op_req_q->num_requests;
2215  
2216  	if (mpi3mr_check_req_qfull(op_req_q)) {
2217  		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2218  		    reply_qidx, mrioc->op_reply_q_offset);
2219  		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2220  
2221  		if (mpi3mr_check_req_qfull(op_req_q)) {
2222  			retval = -EAGAIN;
2223  			goto out;
2224  		}
2225  	}
2226  
2227  	if (mrioc->reset_in_progress) {
2228  		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2229  		retval = -EAGAIN;
2230  		goto out;
2231  	}
2232  
2233  	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2234  	req_entry = (u8 *)segment_base_addr +
2235  	    ((pi % op_req_q->segment_qd) * req_sz);
2236  
2237  	memset(req_entry, 0, req_sz);
2238  	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2239  
2240  	if (++pi == max_entries)
2241  		pi = 0;
2242  	op_req_q->pi = pi;
2243  
2244  #ifndef CONFIG_PREEMPT_RT
2245  	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2246  	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2247  		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2248  #else
2249  	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2250  #endif
2251  
2252  	writel(op_req_q->pi,
2253  	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2254  
2255  out:
2256  	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2257  	return retval;
2258  }
2259  
2260  /**
2261   * mpi3mr_check_rh_fault_ioc - check reset history and fault
2262   * controller
2263   * @mrioc: Adapter instance reference
2264   * @reason_code: reason code for the fault.
2265   *
2266   * This routine will save snapdump and fault the controller with
2267   * the given reason code if it is not already in the fault or
2268   * not asynchronosuly reset. This will be used to handle
2269   * initilaization time faults/resets/timeout as in those cases
2270   * immediate soft reset invocation is not required.
2271   *
2272   * Return:  None.
2273   */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2274  void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2275  {
2276  	u32 ioc_status, host_diagnostic, timeout;
2277  
2278  	if (mrioc->unrecoverable) {
2279  		ioc_err(mrioc, "controller is unrecoverable\n");
2280  		return;
2281  	}
2282  
2283  	if (!pci_device_is_present(mrioc->pdev)) {
2284  		mrioc->unrecoverable = 1;
2285  		ioc_err(mrioc, "controller is not present\n");
2286  		return;
2287  	}
2288  
2289  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2290  	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2291  	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
2292  		mpi3mr_print_fault_info(mrioc);
2293  		return;
2294  	}
2295  	mpi3mr_set_diagsave(mrioc);
2296  	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2297  	    reason_code);
2298  	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2299  	do {
2300  		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2301  		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2302  			break;
2303  		msleep(100);
2304  	} while (--timeout);
2305  }
2306  
2307  /**
2308   * mpi3mr_sync_timestamp - Issue time stamp sync request
2309   * @mrioc: Adapter reference
2310   *
2311   * Issue IO unit control MPI request to synchornize firmware
2312   * timestamp with host time.
2313   *
2314   * Return: 0 on success, non-zero on failure.
2315   */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2316  static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2317  {
2318  	ktime_t current_time;
2319  	struct mpi3_iounit_control_request iou_ctrl;
2320  	int retval = 0;
2321  
2322  	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2323  	mutex_lock(&mrioc->init_cmds.mutex);
2324  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2325  		retval = -1;
2326  		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2327  		mutex_unlock(&mrioc->init_cmds.mutex);
2328  		goto out;
2329  	}
2330  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2331  	mrioc->init_cmds.is_waiting = 1;
2332  	mrioc->init_cmds.callback = NULL;
2333  	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2334  	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2335  	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2336  	current_time = ktime_get_real();
2337  	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2338  
2339  	init_completion(&mrioc->init_cmds.done);
2340  	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2341  	    sizeof(iou_ctrl), 0);
2342  	if (retval) {
2343  		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2344  		goto out_unlock;
2345  	}
2346  
2347  	wait_for_completion_timeout(&mrioc->init_cmds.done,
2348  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2349  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2350  		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2351  		mrioc->init_cmds.is_waiting = 0;
2352  		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2353  			mpi3mr_check_rh_fault_ioc(mrioc,
2354  			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2355  		retval = -1;
2356  		goto out_unlock;
2357  	}
2358  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2359  	    != MPI3_IOCSTATUS_SUCCESS) {
2360  		ioc_err(mrioc,
2361  		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2362  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2363  		    mrioc->init_cmds.ioc_loginfo);
2364  		retval = -1;
2365  		goto out_unlock;
2366  	}
2367  
2368  out_unlock:
2369  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2370  	mutex_unlock(&mrioc->init_cmds.mutex);
2371  
2372  out:
2373  	return retval;
2374  }
2375  
2376  /**
2377   * mpi3mr_print_pkg_ver - display controller fw package version
2378   * @mrioc: Adapter reference
2379   *
2380   * Retrieve firmware package version from the component image
2381   * header of the controller flash and display it.
2382   *
2383   * Return: 0 on success and non-zero on failure.
2384   */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2385  static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2386  {
2387  	struct mpi3_ci_upload_request ci_upload;
2388  	int retval = -1;
2389  	void *data = NULL;
2390  	dma_addr_t data_dma;
2391  	struct mpi3_ci_manifest_mpi *manifest;
2392  	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2393  	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2394  
2395  	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2396  	    GFP_KERNEL);
2397  	if (!data)
2398  		return -ENOMEM;
2399  
2400  	memset(&ci_upload, 0, sizeof(ci_upload));
2401  	mutex_lock(&mrioc->init_cmds.mutex);
2402  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2403  		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2404  		mutex_unlock(&mrioc->init_cmds.mutex);
2405  		goto out;
2406  	}
2407  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2408  	mrioc->init_cmds.is_waiting = 1;
2409  	mrioc->init_cmds.callback = NULL;
2410  	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2411  	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2412  	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2413  	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2414  	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2415  	ci_upload.segment_size = cpu_to_le32(data_len);
2416  
2417  	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2418  	    data_dma);
2419  	init_completion(&mrioc->init_cmds.done);
2420  	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2421  	    sizeof(ci_upload), 1);
2422  	if (retval) {
2423  		ioc_err(mrioc, "posting get package version failed\n");
2424  		goto out_unlock;
2425  	}
2426  	wait_for_completion_timeout(&mrioc->init_cmds.done,
2427  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2428  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2429  		ioc_err(mrioc, "get package version timed out\n");
2430  		mpi3mr_check_rh_fault_ioc(mrioc,
2431  		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2432  		retval = -1;
2433  		goto out_unlock;
2434  	}
2435  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2436  	    == MPI3_IOCSTATUS_SUCCESS) {
2437  		manifest = (struct mpi3_ci_manifest_mpi *) data;
2438  		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2439  			ioc_info(mrioc,
2440  			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2441  			    manifest->package_version.gen_major,
2442  			    manifest->package_version.gen_minor,
2443  			    manifest->package_version.phase_major,
2444  			    manifest->package_version.phase_minor,
2445  			    manifest->package_version.customer_id,
2446  			    manifest->package_version.build_num);
2447  		}
2448  	}
2449  	retval = 0;
2450  out_unlock:
2451  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2452  	mutex_unlock(&mrioc->init_cmds.mutex);
2453  
2454  out:
2455  	if (data)
2456  		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2457  		    data_dma);
2458  	return retval;
2459  }
2460  
2461  /**
2462   * mpi3mr_watchdog_work - watchdog thread to monitor faults
2463   * @work: work struct
2464   *
2465   * Watch dog work periodically executed (1 second interval) to
2466   * monitor firmware fault and to issue periodic timer sync to
2467   * the firmware.
2468   *
2469   * Return: Nothing.
2470   */
mpi3mr_watchdog_work(struct work_struct * work)2471  static void mpi3mr_watchdog_work(struct work_struct *work)
2472  {
2473  	struct mpi3mr_ioc *mrioc =
2474  	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2475  	unsigned long flags;
2476  	enum mpi3mr_iocstate ioc_state;
2477  	u32 fault, host_diagnostic, ioc_status;
2478  	u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2479  
2480  	if (mrioc->reset_in_progress)
2481  		return;
2482  
2483  	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2484  		ioc_err(mrioc, "watchdog could not detect the controller\n");
2485  		mrioc->unrecoverable = 1;
2486  	}
2487  
2488  	if (mrioc->unrecoverable) {
2489  		ioc_err(mrioc,
2490  		    "flush pending commands for unrecoverable controller\n");
2491  		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2492  		return;
2493  	}
2494  
2495  	if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2496  		mrioc->ts_update_counter = 0;
2497  		mpi3mr_sync_timestamp(mrioc);
2498  	}
2499  
2500  	if ((mrioc->prepare_for_reset) &&
2501  	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2502  	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2503  		mpi3mr_soft_reset_handler(mrioc,
2504  		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2505  		return;
2506  	}
2507  
2508  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2509  	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2510  		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2511  		return;
2512  	}
2513  
2514  	/*Check for fault state every one second and issue Soft reset*/
2515  	ioc_state = mpi3mr_get_iocstate(mrioc);
2516  	if (ioc_state != MRIOC_STATE_FAULT)
2517  		goto schedule_work;
2518  
2519  	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2520  	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2521  	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2522  		if (!mrioc->diagsave_timeout) {
2523  			mpi3mr_print_fault_info(mrioc);
2524  			ioc_warn(mrioc, "diag save in progress\n");
2525  		}
2526  		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2527  			goto schedule_work;
2528  	}
2529  
2530  	mpi3mr_print_fault_info(mrioc);
2531  	mrioc->diagsave_timeout = 0;
2532  
2533  	switch (fault) {
2534  	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2535  	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2536  		ioc_warn(mrioc,
2537  		    "controller requires system power cycle, marking controller as unrecoverable\n");
2538  		mrioc->unrecoverable = 1;
2539  		goto schedule_work;
2540  	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2541  		goto schedule_work;
2542  	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2543  		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2544  		break;
2545  	default:
2546  		break;
2547  	}
2548  	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2549  	return;
2550  
2551  schedule_work:
2552  	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2553  	if (mrioc->watchdog_work_q)
2554  		queue_delayed_work(mrioc->watchdog_work_q,
2555  		    &mrioc->watchdog_work,
2556  		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2557  	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2558  	return;
2559  }
2560  
2561  /**
2562   * mpi3mr_start_watchdog - Start watchdog
2563   * @mrioc: Adapter instance reference
2564   *
2565   * Create and start the watchdog thread to monitor controller
2566   * faults.
2567   *
2568   * Return: Nothing.
2569   */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2570  void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2571  {
2572  	if (mrioc->watchdog_work_q)
2573  		return;
2574  
2575  	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2576  	snprintf(mrioc->watchdog_work_q_name,
2577  	    sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2578  	    mrioc->id);
2579  	mrioc->watchdog_work_q =
2580  	    create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2581  	if (!mrioc->watchdog_work_q) {
2582  		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2583  		return;
2584  	}
2585  
2586  	if (mrioc->watchdog_work_q)
2587  		queue_delayed_work(mrioc->watchdog_work_q,
2588  		    &mrioc->watchdog_work,
2589  		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2590  }
2591  
2592  /**
2593   * mpi3mr_stop_watchdog - Stop watchdog
2594   * @mrioc: Adapter instance reference
2595   *
2596   * Stop the watchdog thread created to monitor controller
2597   * faults.
2598   *
2599   * Return: Nothing.
2600   */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)2601  void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2602  {
2603  	unsigned long flags;
2604  	struct workqueue_struct *wq;
2605  
2606  	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2607  	wq = mrioc->watchdog_work_q;
2608  	mrioc->watchdog_work_q = NULL;
2609  	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2610  	if (wq) {
2611  		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2612  			flush_workqueue(wq);
2613  		destroy_workqueue(wq);
2614  	}
2615  }
2616  
2617  /**
2618   * mpi3mr_setup_admin_qpair - Setup admin queue pair
2619   * @mrioc: Adapter instance reference
2620   *
2621   * Allocate memory for admin queue pair if required and register
2622   * the admin queue with the controller.
2623   *
2624   * Return: 0 on success, non-zero on failures.
2625   */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)2626  static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2627  {
2628  	int retval = 0;
2629  	u32 num_admin_entries = 0;
2630  
2631  	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2632  	mrioc->num_admin_req = mrioc->admin_req_q_sz /
2633  	    MPI3MR_ADMIN_REQ_FRAME_SZ;
2634  	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2635  
2636  	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2637  	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2638  	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
2639  	mrioc->admin_reply_ci = 0;
2640  	mrioc->admin_reply_ephase = 1;
2641  	atomic_set(&mrioc->admin_reply_q_in_use, 0);
2642  
2643  	if (!mrioc->admin_req_base) {
2644  		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2645  		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2646  
2647  		if (!mrioc->admin_req_base) {
2648  			retval = -1;
2649  			goto out_failed;
2650  		}
2651  
2652  		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2653  		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2654  		    GFP_KERNEL);
2655  
2656  		if (!mrioc->admin_reply_base) {
2657  			retval = -1;
2658  			goto out_failed;
2659  		}
2660  	}
2661  
2662  	num_admin_entries = (mrioc->num_admin_replies << 16) |
2663  	    (mrioc->num_admin_req);
2664  	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2665  	mpi3mr_writeq(mrioc->admin_req_dma,
2666  	    &mrioc->sysif_regs->admin_request_queue_address);
2667  	mpi3mr_writeq(mrioc->admin_reply_dma,
2668  	    &mrioc->sysif_regs->admin_reply_queue_address);
2669  	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2670  	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2671  	return retval;
2672  
2673  out_failed:
2674  
2675  	if (mrioc->admin_reply_base) {
2676  		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2677  		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
2678  		mrioc->admin_reply_base = NULL;
2679  	}
2680  	if (mrioc->admin_req_base) {
2681  		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2682  		    mrioc->admin_req_base, mrioc->admin_req_dma);
2683  		mrioc->admin_req_base = NULL;
2684  	}
2685  	return retval;
2686  }
2687  
2688  /**
2689   * mpi3mr_issue_iocfacts - Send IOC Facts
2690   * @mrioc: Adapter instance reference
2691   * @facts_data: Cached IOC facts data
2692   *
2693   * Issue IOC Facts MPI request through admin queue and wait for
2694   * the completion of it or time out.
2695   *
2696   * Return: 0 on success, non-zero on failures.
2697   */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)2698  static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2699  	struct mpi3_ioc_facts_data *facts_data)
2700  {
2701  	struct mpi3_ioc_facts_request iocfacts_req;
2702  	void *data = NULL;
2703  	dma_addr_t data_dma;
2704  	u32 data_len = sizeof(*facts_data);
2705  	int retval = 0;
2706  	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2707  
2708  	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2709  	    GFP_KERNEL);
2710  
2711  	if (!data) {
2712  		retval = -1;
2713  		goto out;
2714  	}
2715  
2716  	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2717  	mutex_lock(&mrioc->init_cmds.mutex);
2718  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2719  		retval = -1;
2720  		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2721  		mutex_unlock(&mrioc->init_cmds.mutex);
2722  		goto out;
2723  	}
2724  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2725  	mrioc->init_cmds.is_waiting = 1;
2726  	mrioc->init_cmds.callback = NULL;
2727  	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2728  	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2729  
2730  	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2731  	    data_dma);
2732  
2733  	init_completion(&mrioc->init_cmds.done);
2734  	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2735  	    sizeof(iocfacts_req), 1);
2736  	if (retval) {
2737  		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2738  		goto out_unlock;
2739  	}
2740  	wait_for_completion_timeout(&mrioc->init_cmds.done,
2741  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2742  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2743  		ioc_err(mrioc, "ioc_facts timed out\n");
2744  		mpi3mr_check_rh_fault_ioc(mrioc,
2745  		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2746  		retval = -1;
2747  		goto out_unlock;
2748  	}
2749  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2750  	    != MPI3_IOCSTATUS_SUCCESS) {
2751  		ioc_err(mrioc,
2752  		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2753  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2754  		    mrioc->init_cmds.ioc_loginfo);
2755  		retval = -1;
2756  		goto out_unlock;
2757  	}
2758  	memcpy(facts_data, (u8 *)data, data_len);
2759  	mpi3mr_process_factsdata(mrioc, facts_data);
2760  out_unlock:
2761  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2762  	mutex_unlock(&mrioc->init_cmds.mutex);
2763  
2764  out:
2765  	if (data)
2766  		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2767  
2768  	return retval;
2769  }
2770  
2771  /**
2772   * mpi3mr_check_reset_dma_mask - Process IOC facts data
2773   * @mrioc: Adapter instance reference
2774   *
2775   * Check whether the new DMA mask requested through IOCFacts by
2776   * firmware needs to be set, if so set it .
2777   *
2778   * Return: 0 on success, non-zero on failure.
2779   */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)2780  static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2781  {
2782  	struct pci_dev *pdev = mrioc->pdev;
2783  	int r;
2784  	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2785  
2786  	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2787  		return 0;
2788  
2789  	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2790  	    mrioc->dma_mask, facts_dma_mask);
2791  
2792  	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2793  	if (r) {
2794  		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2795  		    facts_dma_mask, r);
2796  		return r;
2797  	}
2798  	mrioc->dma_mask = facts_dma_mask;
2799  	return r;
2800  }
2801  
2802  /**
2803   * mpi3mr_process_factsdata - Process IOC facts data
2804   * @mrioc: Adapter instance reference
2805   * @facts_data: Cached IOC facts data
2806   *
2807   * Convert IOC facts data into cpu endianness and cache it in
2808   * the driver .
2809   *
2810   * Return: Nothing.
2811   */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)2812  static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2813  	struct mpi3_ioc_facts_data *facts_data)
2814  {
2815  	u32 ioc_config, req_sz, facts_flags;
2816  
2817  	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2818  	    (sizeof(*facts_data) / 4)) {
2819  		ioc_warn(mrioc,
2820  		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2821  		    sizeof(*facts_data),
2822  		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2823  	}
2824  
2825  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2826  	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2827  	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2828  	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2829  		ioc_err(mrioc,
2830  		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2831  		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2832  	}
2833  
2834  	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2835  
2836  	facts_flags = le32_to_cpu(facts_data->flags);
2837  	mrioc->facts.op_req_sz = req_sz;
2838  	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2839  	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2840  	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2841  
2842  	mrioc->facts.ioc_num = facts_data->ioc_number;
2843  	mrioc->facts.who_init = facts_data->who_init;
2844  	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2845  	mrioc->facts.personality = (facts_flags &
2846  	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2847  	mrioc->facts.dma_mask = (facts_flags &
2848  	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2849  	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2850  	mrioc->facts.protocol_flags = facts_data->protocol_flags;
2851  	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2852  	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
2853  	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2854  	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2855  	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2856  	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2857  	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2858  	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2859  	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
2860  	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
2861  	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2862  	mrioc->facts.max_pcie_switches =
2863  	    le16_to_cpu(facts_data->max_pcie_switches);
2864  	mrioc->facts.max_sasexpanders =
2865  	    le16_to_cpu(facts_data->max_sas_expanders);
2866  	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
2867  	mrioc->facts.max_sasinitiators =
2868  	    le16_to_cpu(facts_data->max_sas_initiators);
2869  	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2870  	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2871  	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2872  	mrioc->facts.max_op_req_q =
2873  	    le16_to_cpu(facts_data->max_operational_request_queues);
2874  	mrioc->facts.max_op_reply_q =
2875  	    le16_to_cpu(facts_data->max_operational_reply_queues);
2876  	mrioc->facts.ioc_capabilities =
2877  	    le32_to_cpu(facts_data->ioc_capabilities);
2878  	mrioc->facts.fw_ver.build_num =
2879  	    le16_to_cpu(facts_data->fw_version.build_num);
2880  	mrioc->facts.fw_ver.cust_id =
2881  	    le16_to_cpu(facts_data->fw_version.customer_id);
2882  	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2883  	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2884  	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2885  	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2886  	mrioc->msix_count = min_t(int, mrioc->msix_count,
2887  	    mrioc->facts.max_msix_vectors);
2888  	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2889  	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2890  	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2891  	mrioc->facts.shutdown_timeout =
2892  	    le16_to_cpu(facts_data->shutdown_timeout);
2893  
2894  	mrioc->facts.max_dev_per_tg =
2895  	    facts_data->max_devices_per_throttle_group;
2896  	mrioc->facts.io_throttle_data_length =
2897  	    le16_to_cpu(facts_data->io_throttle_data_length);
2898  	mrioc->facts.max_io_throttle_group =
2899  	    le16_to_cpu(facts_data->max_io_throttle_group);
2900  	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
2901  	mrioc->facts.io_throttle_high =
2902  	    le16_to_cpu(facts_data->io_throttle_high);
2903  
2904  	if (mrioc->facts.max_data_length ==
2905  	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
2906  		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
2907  	else
2908  		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
2909  	/* Store in 512b block count */
2910  	if (mrioc->facts.io_throttle_data_length)
2911  		mrioc->io_throttle_data_length =
2912  		    (mrioc->facts.io_throttle_data_length * 2 * 4);
2913  	else
2914  		/* set the length to 1MB + 1K to disable throttle */
2915  		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
2916  
2917  	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
2918  	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
2919  
2920  	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
2921  	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
2922  	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
2923  	ioc_info(mrioc,
2924  	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
2925  	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
2926  	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
2927  	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
2928  	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
2929  	    mrioc->facts.sge_mod_shift);
2930  	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
2931  	    mrioc->facts.dma_mask, (facts_flags &
2932  	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
2933  	ioc_info(mrioc,
2934  	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
2935  	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
2936  	ioc_info(mrioc,
2937  	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
2938  	   mrioc->facts.io_throttle_data_length * 4,
2939  	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
2940  }
2941  
2942  /**
2943   * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
2944   * @mrioc: Adapter instance reference
2945   *
2946   * Allocate and initialize the reply free buffers, sense
2947   * buffers, reply free queue and sense buffer queue.
2948   *
2949   * Return: 0 on success, non-zero on failures.
2950   */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)2951  static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
2952  {
2953  	int retval = 0;
2954  	u32 sz, i;
2955  
2956  	if (mrioc->init_cmds.reply)
2957  		return retval;
2958  
2959  	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2960  	if (!mrioc->init_cmds.reply)
2961  		goto out_failed;
2962  
2963  	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2964  	if (!mrioc->bsg_cmds.reply)
2965  		goto out_failed;
2966  
2967  	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2968  	if (!mrioc->transport_cmds.reply)
2969  		goto out_failed;
2970  
2971  	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2972  		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
2973  		    GFP_KERNEL);
2974  		if (!mrioc->dev_rmhs_cmds[i].reply)
2975  			goto out_failed;
2976  	}
2977  
2978  	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
2979  		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
2980  		    GFP_KERNEL);
2981  		if (!mrioc->evtack_cmds[i].reply)
2982  			goto out_failed;
2983  	}
2984  
2985  	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2986  	if (!mrioc->host_tm_cmds.reply)
2987  		goto out_failed;
2988  
2989  	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2990  	if (!mrioc->pel_cmds.reply)
2991  		goto out_failed;
2992  
2993  	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2994  	if (!mrioc->pel_abort_cmd.reply)
2995  		goto out_failed;
2996  
2997  	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
2998  	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
2999  						 GFP_KERNEL);
3000  	if (!mrioc->removepend_bitmap)
3001  		goto out_failed;
3002  
3003  	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3004  	if (!mrioc->devrem_bitmap)
3005  		goto out_failed;
3006  
3007  	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3008  						  GFP_KERNEL);
3009  	if (!mrioc->evtack_cmds_bitmap)
3010  		goto out_failed;
3011  
3012  	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3013  	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3014  	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3015  	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3016  
3017  	/* reply buffer pool, 16 byte align */
3018  	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3019  	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3020  	    &mrioc->pdev->dev, sz, 16, 0);
3021  	if (!mrioc->reply_buf_pool) {
3022  		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3023  		goto out_failed;
3024  	}
3025  
3026  	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3027  	    &mrioc->reply_buf_dma);
3028  	if (!mrioc->reply_buf)
3029  		goto out_failed;
3030  
3031  	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3032  
3033  	/* reply free queue, 8 byte align */
3034  	sz = mrioc->reply_free_qsz * 8;
3035  	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3036  	    &mrioc->pdev->dev, sz, 8, 0);
3037  	if (!mrioc->reply_free_q_pool) {
3038  		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3039  		goto out_failed;
3040  	}
3041  	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3042  	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3043  	if (!mrioc->reply_free_q)
3044  		goto out_failed;
3045  
3046  	/* sense buffer pool,  4 byte align */
3047  	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3048  	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3049  	    &mrioc->pdev->dev, sz, 4, 0);
3050  	if (!mrioc->sense_buf_pool) {
3051  		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3052  		goto out_failed;
3053  	}
3054  	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3055  	    &mrioc->sense_buf_dma);
3056  	if (!mrioc->sense_buf)
3057  		goto out_failed;
3058  
3059  	/* sense buffer queue, 8 byte align */
3060  	sz = mrioc->sense_buf_q_sz * 8;
3061  	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3062  	    &mrioc->pdev->dev, sz, 8, 0);
3063  	if (!mrioc->sense_buf_q_pool) {
3064  		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3065  		goto out_failed;
3066  	}
3067  	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3068  	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3069  	if (!mrioc->sense_buf_q)
3070  		goto out_failed;
3071  
3072  	return retval;
3073  
3074  out_failed:
3075  	retval = -1;
3076  	return retval;
3077  }
3078  
3079  /**
3080   * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3081   * buffers
3082   * @mrioc: Adapter instance reference
3083   *
3084   * Helper function to initialize reply and sense buffers along
3085   * with some debug prints.
3086   *
3087   * Return:  None.
3088   */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3089  static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3090  {
3091  	u32 sz, i;
3092  	dma_addr_t phy_addr;
3093  
3094  	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3095  	ioc_info(mrioc,
3096  	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3097  	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3098  	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3099  	sz = mrioc->reply_free_qsz * 8;
3100  	ioc_info(mrioc,
3101  	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3102  	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3103  	    (unsigned long long)mrioc->reply_free_q_dma);
3104  	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3105  	ioc_info(mrioc,
3106  	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3107  	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3108  	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3109  	sz = mrioc->sense_buf_q_sz * 8;
3110  	ioc_info(mrioc,
3111  	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3112  	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3113  	    (unsigned long long)mrioc->sense_buf_q_dma);
3114  
3115  	/* initialize Reply buffer Queue */
3116  	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3117  	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3118  		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3119  	mrioc->reply_free_q[i] = cpu_to_le64(0);
3120  
3121  	/* initialize Sense Buffer Queue */
3122  	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3123  	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3124  		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3125  	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3126  }
3127  
3128  /**
3129   * mpi3mr_issue_iocinit - Send IOC Init
3130   * @mrioc: Adapter instance reference
3131   *
3132   * Issue IOC Init MPI request through admin queue and wait for
3133   * the completion of it or time out.
3134   *
3135   * Return: 0 on success, non-zero on failures.
3136   */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3137  static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3138  {
3139  	struct mpi3_ioc_init_request iocinit_req;
3140  	struct mpi3_driver_info_layout *drv_info;
3141  	dma_addr_t data_dma;
3142  	u32 data_len = sizeof(*drv_info);
3143  	int retval = 0;
3144  	ktime_t current_time;
3145  
3146  	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3147  	    GFP_KERNEL);
3148  	if (!drv_info) {
3149  		retval = -1;
3150  		goto out;
3151  	}
3152  	mpimr_initialize_reply_sbuf_queues(mrioc);
3153  
3154  	drv_info->information_length = cpu_to_le32(data_len);
3155  	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3156  	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3157  	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3158  	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3159  	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3160  	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3161  	    sizeof(drv_info->driver_release_date));
3162  	drv_info->driver_capabilities = 0;
3163  	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3164  	    sizeof(mrioc->driver_info));
3165  
3166  	memset(&iocinit_req, 0, sizeof(iocinit_req));
3167  	mutex_lock(&mrioc->init_cmds.mutex);
3168  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3169  		retval = -1;
3170  		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3171  		mutex_unlock(&mrioc->init_cmds.mutex);
3172  		goto out;
3173  	}
3174  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3175  	mrioc->init_cmds.is_waiting = 1;
3176  	mrioc->init_cmds.callback = NULL;
3177  	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3178  	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3179  	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3180  	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3181  	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3182  	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3183  	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3184  	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3185  	iocinit_req.reply_free_queue_address =
3186  	    cpu_to_le64(mrioc->reply_free_q_dma);
3187  	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3188  	iocinit_req.sense_buffer_free_queue_depth =
3189  	    cpu_to_le16(mrioc->sense_buf_q_sz);
3190  	iocinit_req.sense_buffer_free_queue_address =
3191  	    cpu_to_le64(mrioc->sense_buf_q_dma);
3192  	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3193  
3194  	current_time = ktime_get_real();
3195  	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3196  
3197  	init_completion(&mrioc->init_cmds.done);
3198  	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3199  	    sizeof(iocinit_req), 1);
3200  	if (retval) {
3201  		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3202  		goto out_unlock;
3203  	}
3204  	wait_for_completion_timeout(&mrioc->init_cmds.done,
3205  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3206  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3207  		mpi3mr_check_rh_fault_ioc(mrioc,
3208  		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3209  		ioc_err(mrioc, "ioc_init timed out\n");
3210  		retval = -1;
3211  		goto out_unlock;
3212  	}
3213  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3214  	    != MPI3_IOCSTATUS_SUCCESS) {
3215  		ioc_err(mrioc,
3216  		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3217  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3218  		    mrioc->init_cmds.ioc_loginfo);
3219  		retval = -1;
3220  		goto out_unlock;
3221  	}
3222  
3223  	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3224  	writel(mrioc->reply_free_queue_host_index,
3225  	    &mrioc->sysif_regs->reply_free_host_index);
3226  
3227  	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3228  	writel(mrioc->sbq_host_index,
3229  	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3230  out_unlock:
3231  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3232  	mutex_unlock(&mrioc->init_cmds.mutex);
3233  
3234  out:
3235  	if (drv_info)
3236  		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3237  		    data_dma);
3238  
3239  	return retval;
3240  }
3241  
3242  /**
3243   * mpi3mr_unmask_events - Unmask events in event mask bitmap
3244   * @mrioc: Adapter instance reference
3245   * @event: MPI event ID
3246   *
3247   * Un mask the specific event by resetting the event_mask
3248   * bitmap.
3249   *
3250   * Return: 0 on success, non-zero on failures.
3251   */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3252  static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3253  {
3254  	u32 desired_event;
3255  	u8 word;
3256  
3257  	if (event >= 128)
3258  		return;
3259  
3260  	desired_event = (1 << (event % 32));
3261  	word = event / 32;
3262  
3263  	mrioc->event_masks[word] &= ~desired_event;
3264  }
3265  
3266  /**
3267   * mpi3mr_issue_event_notification - Send event notification
3268   * @mrioc: Adapter instance reference
3269   *
3270   * Issue event notification MPI request through admin queue and
3271   * wait for the completion of it or time out.
3272   *
3273   * Return: 0 on success, non-zero on failures.
3274   */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3275  static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3276  {
3277  	struct mpi3_event_notification_request evtnotify_req;
3278  	int retval = 0;
3279  	u8 i;
3280  
3281  	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3282  	mutex_lock(&mrioc->init_cmds.mutex);
3283  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3284  		retval = -1;
3285  		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3286  		mutex_unlock(&mrioc->init_cmds.mutex);
3287  		goto out;
3288  	}
3289  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3290  	mrioc->init_cmds.is_waiting = 1;
3291  	mrioc->init_cmds.callback = NULL;
3292  	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3293  	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3294  	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3295  		evtnotify_req.event_masks[i] =
3296  		    cpu_to_le32(mrioc->event_masks[i]);
3297  	init_completion(&mrioc->init_cmds.done);
3298  	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3299  	    sizeof(evtnotify_req), 1);
3300  	if (retval) {
3301  		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3302  		goto out_unlock;
3303  	}
3304  	wait_for_completion_timeout(&mrioc->init_cmds.done,
3305  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3306  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3307  		ioc_err(mrioc, "event notification timed out\n");
3308  		mpi3mr_check_rh_fault_ioc(mrioc,
3309  		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3310  		retval = -1;
3311  		goto out_unlock;
3312  	}
3313  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3314  	    != MPI3_IOCSTATUS_SUCCESS) {
3315  		ioc_err(mrioc,
3316  		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3317  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3318  		    mrioc->init_cmds.ioc_loginfo);
3319  		retval = -1;
3320  		goto out_unlock;
3321  	}
3322  
3323  out_unlock:
3324  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3325  	mutex_unlock(&mrioc->init_cmds.mutex);
3326  out:
3327  	return retval;
3328  }
3329  
3330  /**
3331   * mpi3mr_process_event_ack - Process event acknowledgment
3332   * @mrioc: Adapter instance reference
3333   * @event: MPI3 event ID
3334   * @event_ctx: event context
3335   *
3336   * Send event acknowledgment through admin queue and wait for
3337   * it to complete.
3338   *
3339   * Return: 0 on success, non-zero on failures.
3340   */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3341  int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3342  	u32 event_ctx)
3343  {
3344  	struct mpi3_event_ack_request evtack_req;
3345  	int retval = 0;
3346  
3347  	memset(&evtack_req, 0, sizeof(evtack_req));
3348  	mutex_lock(&mrioc->init_cmds.mutex);
3349  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3350  		retval = -1;
3351  		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3352  		mutex_unlock(&mrioc->init_cmds.mutex);
3353  		goto out;
3354  	}
3355  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3356  	mrioc->init_cmds.is_waiting = 1;
3357  	mrioc->init_cmds.callback = NULL;
3358  	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3359  	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3360  	evtack_req.event = event;
3361  	evtack_req.event_context = cpu_to_le32(event_ctx);
3362  
3363  	init_completion(&mrioc->init_cmds.done);
3364  	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3365  	    sizeof(evtack_req), 1);
3366  	if (retval) {
3367  		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3368  		goto out_unlock;
3369  	}
3370  	wait_for_completion_timeout(&mrioc->init_cmds.done,
3371  	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3372  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3373  		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3374  		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3375  			mpi3mr_check_rh_fault_ioc(mrioc,
3376  			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3377  		retval = -1;
3378  		goto out_unlock;
3379  	}
3380  	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3381  	    != MPI3_IOCSTATUS_SUCCESS) {
3382  		ioc_err(mrioc,
3383  		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3384  		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3385  		    mrioc->init_cmds.ioc_loginfo);
3386  		retval = -1;
3387  		goto out_unlock;
3388  	}
3389  
3390  out_unlock:
3391  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3392  	mutex_unlock(&mrioc->init_cmds.mutex);
3393  out:
3394  	return retval;
3395  }
3396  
3397  /**
3398   * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3399   * @mrioc: Adapter instance reference
3400   *
3401   * Allocate chain buffers and set a bitmap to indicate free
3402   * chain buffers. Chain buffers are used to pass the SGE
3403   * information along with MPI3 SCSI IO requests for host I/O.
3404   *
3405   * Return: 0 on success, non-zero on failure
3406   */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3407  static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3408  {
3409  	int retval = 0;
3410  	u32 sz, i;
3411  	u16 num_chains;
3412  
3413  	if (mrioc->chain_sgl_list)
3414  		return retval;
3415  
3416  	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3417  
3418  	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3419  	    | SHOST_DIX_TYPE1_PROTECTION
3420  	    | SHOST_DIX_TYPE2_PROTECTION
3421  	    | SHOST_DIX_TYPE3_PROTECTION))
3422  		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3423  
3424  	mrioc->chain_buf_count = num_chains;
3425  	sz = sizeof(struct chain_element) * num_chains;
3426  	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3427  	if (!mrioc->chain_sgl_list)
3428  		goto out_failed;
3429  
3430  	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3431  		MPI3MR_PAGE_SIZE_4K))
3432  		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3433  			MPI3MR_PAGE_SIZE_4K;
3434  	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3435  	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3436  			mrioc->max_sgl_entries, sz/1024);
3437  
3438  	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3439  	    &mrioc->pdev->dev, sz, 16, 0);
3440  	if (!mrioc->chain_buf_pool) {
3441  		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3442  		goto out_failed;
3443  	}
3444  
3445  	for (i = 0; i < num_chains; i++) {
3446  		mrioc->chain_sgl_list[i].addr =
3447  		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3448  		    &mrioc->chain_sgl_list[i].dma_addr);
3449  
3450  		if (!mrioc->chain_sgl_list[i].addr)
3451  			goto out_failed;
3452  	}
3453  	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3454  	if (!mrioc->chain_bitmap)
3455  		goto out_failed;
3456  	return retval;
3457  out_failed:
3458  	retval = -1;
3459  	return retval;
3460  }
3461  
3462  /**
3463   * mpi3mr_port_enable_complete - Mark port enable complete
3464   * @mrioc: Adapter instance reference
3465   * @drv_cmd: Internal command tracker
3466   *
3467   * Call back for asynchronous port enable request sets the
3468   * driver command to indicate port enable request is complete.
3469   *
3470   * Return: Nothing
3471   */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3472  static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3473  	struct mpi3mr_drv_cmd *drv_cmd)
3474  {
3475  	drv_cmd->callback = NULL;
3476  	mrioc->scan_started = 0;
3477  	if (drv_cmd->state & MPI3MR_CMD_RESET)
3478  		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3479  	else
3480  		mrioc->scan_failed = drv_cmd->ioc_status;
3481  	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3482  }
3483  
3484  /**
3485   * mpi3mr_issue_port_enable - Issue Port Enable
3486   * @mrioc: Adapter instance reference
3487   * @async: Flag to wait for completion or not
3488   *
3489   * Issue Port Enable MPI request through admin queue and if the
3490   * async flag is not set wait for the completion of the port
3491   * enable or time out.
3492   *
3493   * Return: 0 on success, non-zero on failures.
3494   */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3495  int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3496  {
3497  	struct mpi3_port_enable_request pe_req;
3498  	int retval = 0;
3499  	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3500  
3501  	memset(&pe_req, 0, sizeof(pe_req));
3502  	mutex_lock(&mrioc->init_cmds.mutex);
3503  	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3504  		retval = -1;
3505  		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3506  		mutex_unlock(&mrioc->init_cmds.mutex);
3507  		goto out;
3508  	}
3509  	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3510  	if (async) {
3511  		mrioc->init_cmds.is_waiting = 0;
3512  		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3513  	} else {
3514  		mrioc->init_cmds.is_waiting = 1;
3515  		mrioc->init_cmds.callback = NULL;
3516  		init_completion(&mrioc->init_cmds.done);
3517  	}
3518  	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3519  	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3520  
3521  	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3522  	if (retval) {
3523  		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3524  		goto out_unlock;
3525  	}
3526  	if (async) {
3527  		mutex_unlock(&mrioc->init_cmds.mutex);
3528  		goto out;
3529  	}
3530  
3531  	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3532  	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3533  		ioc_err(mrioc, "port enable timed out\n");
3534  		retval = -1;
3535  		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3536  		goto out_unlock;
3537  	}
3538  	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3539  
3540  out_unlock:
3541  	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3542  	mutex_unlock(&mrioc->init_cmds.mutex);
3543  out:
3544  	return retval;
3545  }
3546  
3547  /* Protocol type to name mapper structure */
3548  static const struct {
3549  	u8 protocol;
3550  	char *name;
3551  } mpi3mr_protocols[] = {
3552  	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3553  	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3554  	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3555  };
3556  
3557  /* Capability to name mapper structure*/
3558  static const struct {
3559  	u32 capability;
3560  	char *name;
3561  } mpi3mr_capabilities[] = {
3562  	{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3563  	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
3564  };
3565  
3566  /**
3567   * mpi3mr_print_ioc_info - Display controller information
3568   * @mrioc: Adapter instance reference
3569   *
3570   * Display controller personalit, capability, supported
3571   * protocols etc.
3572   *
3573   * Return: Nothing
3574   */
3575  static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)3576  mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3577  {
3578  	int i = 0, bytes_written = 0;
3579  	char personality[16];
3580  	char protocol[50] = {0};
3581  	char capabilities[100] = {0};
3582  	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3583  
3584  	switch (mrioc->facts.personality) {
3585  	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3586  		strncpy(personality, "Enhanced HBA", sizeof(personality));
3587  		break;
3588  	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3589  		strncpy(personality, "RAID", sizeof(personality));
3590  		break;
3591  	default:
3592  		strncpy(personality, "Unknown", sizeof(personality));
3593  		break;
3594  	}
3595  
3596  	ioc_info(mrioc, "Running in %s Personality", personality);
3597  
3598  	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3599  	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3600  	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3601  
3602  	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3603  		if (mrioc->facts.protocol_flags &
3604  		    mpi3mr_protocols[i].protocol) {
3605  			bytes_written += scnprintf(protocol + bytes_written,
3606  				    sizeof(protocol) - bytes_written, "%s%s",
3607  				    bytes_written ? "," : "",
3608  				    mpi3mr_protocols[i].name);
3609  		}
3610  	}
3611  
3612  	bytes_written = 0;
3613  	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3614  		if (mrioc->facts.protocol_flags &
3615  		    mpi3mr_capabilities[i].capability) {
3616  			bytes_written += scnprintf(capabilities + bytes_written,
3617  				    sizeof(capabilities) - bytes_written, "%s%s",
3618  				    bytes_written ? "," : "",
3619  				    mpi3mr_capabilities[i].name);
3620  		}
3621  	}
3622  
3623  	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3624  		 protocol, capabilities);
3625  }
3626  
3627  /**
3628   * mpi3mr_cleanup_resources - Free PCI resources
3629   * @mrioc: Adapter instance reference
3630   *
3631   * Unmap PCI device memory and disable PCI device.
3632   *
3633   * Return: 0 on success and non-zero on failure.
3634   */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)3635  void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3636  {
3637  	struct pci_dev *pdev = mrioc->pdev;
3638  
3639  	mpi3mr_cleanup_isr(mrioc);
3640  
3641  	if (mrioc->sysif_regs) {
3642  		iounmap((void __iomem *)mrioc->sysif_regs);
3643  		mrioc->sysif_regs = NULL;
3644  	}
3645  
3646  	if (pci_is_enabled(pdev)) {
3647  		if (mrioc->bars)
3648  			pci_release_selected_regions(pdev, mrioc->bars);
3649  		pci_disable_device(pdev);
3650  	}
3651  }
3652  
3653  /**
3654   * mpi3mr_setup_resources - Enable PCI resources
3655   * @mrioc: Adapter instance reference
3656   *
3657   * Enable PCI device memory, MSI-x registers and set DMA mask.
3658   *
3659   * Return: 0 on success and non-zero on failure.
3660   */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)3661  int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3662  {
3663  	struct pci_dev *pdev = mrioc->pdev;
3664  	u32 memap_sz = 0;
3665  	int i, retval = 0, capb = 0;
3666  	u16 message_control;
3667  	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3668  	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3669  
3670  	if (pci_enable_device_mem(pdev)) {
3671  		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3672  		retval = -ENODEV;
3673  		goto out_failed;
3674  	}
3675  
3676  	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3677  	if (!capb) {
3678  		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3679  		retval = -ENODEV;
3680  		goto out_failed;
3681  	}
3682  	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3683  
3684  	if (pci_request_selected_regions(pdev, mrioc->bars,
3685  	    mrioc->driver_name)) {
3686  		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3687  		retval = -ENODEV;
3688  		goto out_failed;
3689  	}
3690  
3691  	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3692  		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3693  			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3694  			memap_sz = pci_resource_len(pdev, i);
3695  			mrioc->sysif_regs =
3696  			    ioremap(mrioc->sysif_regs_phys, memap_sz);
3697  			break;
3698  		}
3699  	}
3700  
3701  	pci_set_master(pdev);
3702  
3703  	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3704  	if (retval) {
3705  		if (dma_mask != DMA_BIT_MASK(32)) {
3706  			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3707  			dma_mask = DMA_BIT_MASK(32);
3708  			retval = dma_set_mask_and_coherent(&pdev->dev,
3709  			    dma_mask);
3710  		}
3711  		if (retval) {
3712  			mrioc->dma_mask = 0;
3713  			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3714  			goto out_failed;
3715  		}
3716  	}
3717  	mrioc->dma_mask = dma_mask;
3718  
3719  	if (!mrioc->sysif_regs) {
3720  		ioc_err(mrioc,
3721  		    "Unable to map adapter memory or resource not found\n");
3722  		retval = -EINVAL;
3723  		goto out_failed;
3724  	}
3725  
3726  	pci_read_config_word(pdev, capb + 2, &message_control);
3727  	mrioc->msix_count = (message_control & 0x3FF) + 1;
3728  
3729  	pci_save_state(pdev);
3730  
3731  	pci_set_drvdata(pdev, mrioc->shost);
3732  
3733  	mpi3mr_ioc_disable_intr(mrioc);
3734  
3735  	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3736  	    (unsigned long long)mrioc->sysif_regs_phys,
3737  	    mrioc->sysif_regs, memap_sz);
3738  	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3739  	    mrioc->msix_count);
3740  
3741  	if (!reset_devices && poll_queues > 0)
3742  		mrioc->requested_poll_qcount = min_t(int, poll_queues,
3743  				mrioc->msix_count - 2);
3744  	return retval;
3745  
3746  out_failed:
3747  	mpi3mr_cleanup_resources(mrioc);
3748  	return retval;
3749  }
3750  
3751  /**
3752   * mpi3mr_enable_events - Enable required events
3753   * @mrioc: Adapter instance reference
3754   *
3755   * This routine unmasks the events required by the driver by
3756   * sennding appropriate event mask bitmapt through an event
3757   * notification request.
3758   *
3759   * Return: 0 on success and non-zero on failure.
3760   */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)3761  static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
3762  {
3763  	int retval = 0;
3764  	u32  i;
3765  
3766  	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3767  		mrioc->event_masks[i] = -1;
3768  
3769  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3770  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3771  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3772  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3773  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
3774  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3775  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3776  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3777  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3778  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3779  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3780  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
3781  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3782  	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3783  
3784  	retval = mpi3mr_issue_event_notification(mrioc);
3785  	if (retval)
3786  		ioc_err(mrioc, "failed to issue event notification %d\n",
3787  		    retval);
3788  	return retval;
3789  }
3790  
3791  /**
3792   * mpi3mr_init_ioc - Initialize the controller
3793   * @mrioc: Adapter instance reference
3794   *
3795   * This the controller initialization routine, executed either
3796   * after soft reset or from pci probe callback.
3797   * Setup the required resources, memory map the controller
3798   * registers, create admin and operational reply queue pairs,
3799   * allocate required memory for reply pool, sense buffer pool,
3800   * issue IOC init request to the firmware, unmask the events and
3801   * issue port enable to discover SAS/SATA/NVMe devies and RAID
3802   * volumes.
3803   *
3804   * Return: 0 on success and non-zero on failure.
3805   */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)3806  int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
3807  {
3808  	int retval = 0;
3809  	u8 retry = 0;
3810  	struct mpi3_ioc_facts_data facts_data;
3811  	u32 sz;
3812  
3813  retry_init:
3814  	retval = mpi3mr_bring_ioc_ready(mrioc);
3815  	if (retval) {
3816  		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3817  		    retval);
3818  		goto out_failed_noretry;
3819  	}
3820  
3821  	retval = mpi3mr_setup_isr(mrioc, 1);
3822  	if (retval) {
3823  		ioc_err(mrioc, "Failed to setup ISR error %d\n",
3824  		    retval);
3825  		goto out_failed_noretry;
3826  	}
3827  
3828  	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3829  	if (retval) {
3830  		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3831  		    retval);
3832  		goto out_failed;
3833  	}
3834  
3835  	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
3836  	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
3837  	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
3838  	atomic_set(&mrioc->pend_large_data_sz, 0);
3839  
3840  	if (reset_devices)
3841  		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
3842  		    MPI3MR_HOST_IOS_KDUMP);
3843  
3844  	if (!(mrioc->facts.ioc_capabilities &
3845  	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
3846  		mrioc->sas_transport_enabled = 1;
3847  		mrioc->scsi_device_channel = 1;
3848  		mrioc->shost->max_channel = 1;
3849  		mrioc->shost->transportt = mpi3mr_transport_template;
3850  	}
3851  
3852  	mrioc->reply_sz = mrioc->facts.reply_sz;
3853  
3854  	retval = mpi3mr_check_reset_dma_mask(mrioc);
3855  	if (retval) {
3856  		ioc_err(mrioc, "Resetting dma mask failed %d\n",
3857  		    retval);
3858  		goto out_failed_noretry;
3859  	}
3860  
3861  	mpi3mr_print_ioc_info(mrioc);
3862  
3863  	if (!mrioc->cfg_page) {
3864  		dprint_init(mrioc, "allocating config page buffers\n");
3865  		mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
3866  		mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
3867  		    mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
3868  		if (!mrioc->cfg_page) {
3869  			retval = -1;
3870  			goto out_failed_noretry;
3871  		}
3872  	}
3873  
3874  	if (!mrioc->init_cmds.reply) {
3875  		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3876  		if (retval) {
3877  			ioc_err(mrioc,
3878  			    "%s :Failed to allocated reply sense buffers %d\n",
3879  			    __func__, retval);
3880  			goto out_failed_noretry;
3881  		}
3882  	}
3883  
3884  	if (!mrioc->chain_sgl_list) {
3885  		retval = mpi3mr_alloc_chain_bufs(mrioc);
3886  		if (retval) {
3887  			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
3888  			    retval);
3889  			goto out_failed_noretry;
3890  		}
3891  	}
3892  
3893  	retval = mpi3mr_issue_iocinit(mrioc);
3894  	if (retval) {
3895  		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
3896  		    retval);
3897  		goto out_failed;
3898  	}
3899  
3900  	retval = mpi3mr_print_pkg_ver(mrioc);
3901  	if (retval) {
3902  		ioc_err(mrioc, "failed to get package version\n");
3903  		goto out_failed;
3904  	}
3905  
3906  	retval = mpi3mr_setup_isr(mrioc, 0);
3907  	if (retval) {
3908  		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
3909  		    retval);
3910  		goto out_failed_noretry;
3911  	}
3912  
3913  	retval = mpi3mr_create_op_queues(mrioc);
3914  	if (retval) {
3915  		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
3916  		    retval);
3917  		goto out_failed;
3918  	}
3919  
3920  	if (!mrioc->pel_seqnum_virt) {
3921  		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
3922  		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
3923  		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
3924  		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
3925  		    GFP_KERNEL);
3926  		if (!mrioc->pel_seqnum_virt) {
3927  			retval = -ENOMEM;
3928  			goto out_failed_noretry;
3929  		}
3930  	}
3931  
3932  	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
3933  		dprint_init(mrioc, "allocating memory for throttle groups\n");
3934  		sz = sizeof(struct mpi3mr_throttle_group_info);
3935  		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
3936  		if (!mrioc->throttle_groups) {
3937  			retval = -1;
3938  			goto out_failed_noretry;
3939  		}
3940  	}
3941  
3942  	retval = mpi3mr_enable_events(mrioc);
3943  	if (retval) {
3944  		ioc_err(mrioc, "failed to enable events %d\n",
3945  		    retval);
3946  		goto out_failed;
3947  	}
3948  
3949  	ioc_info(mrioc, "controller initialization completed successfully\n");
3950  	return retval;
3951  out_failed:
3952  	if (retry < 2) {
3953  		retry++;
3954  		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
3955  		    retry);
3956  		mpi3mr_memset_buffers(mrioc);
3957  		goto retry_init;
3958  	}
3959  	retval = -1;
3960  out_failed_noretry:
3961  	ioc_err(mrioc, "controller initialization failed\n");
3962  	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3963  	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
3964  	mrioc->unrecoverable = 1;
3965  	return retval;
3966  }
3967  
3968  /**
3969   * mpi3mr_reinit_ioc - Re-Initialize the controller
3970   * @mrioc: Adapter instance reference
3971   * @is_resume: Called from resume or reset path
3972   *
3973   * This the controller re-initialization routine, executed from
3974   * the soft reset handler or resume callback. Creates
3975   * operational reply queue pairs, allocate required memory for
3976   * reply pool, sense buffer pool, issue IOC init request to the
3977   * firmware, unmask the events and issue port enable to discover
3978   * SAS/SATA/NVMe devices and RAID volumes.
3979   *
3980   * Return: 0 on success and non-zero on failure.
3981   */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)3982  int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
3983  {
3984  	int retval = 0;
3985  	u8 retry = 0;
3986  	struct mpi3_ioc_facts_data facts_data;
3987  	u32 pe_timeout, ioc_status;
3988  
3989  retry_init:
3990  	pe_timeout =
3991  	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
3992  
3993  	dprint_reset(mrioc, "bringing up the controller to ready state\n");
3994  	retval = mpi3mr_bring_ioc_ready(mrioc);
3995  	if (retval) {
3996  		ioc_err(mrioc, "failed to bring to ready state\n");
3997  		goto out_failed_noretry;
3998  	}
3999  
4000  	if (is_resume) {
4001  		dprint_reset(mrioc, "setting up single ISR\n");
4002  		retval = mpi3mr_setup_isr(mrioc, 1);
4003  		if (retval) {
4004  			ioc_err(mrioc, "failed to setup ISR\n");
4005  			goto out_failed_noretry;
4006  		}
4007  	} else
4008  		mpi3mr_ioc_enable_intr(mrioc);
4009  
4010  	dprint_reset(mrioc, "getting ioc_facts\n");
4011  	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4012  	if (retval) {
4013  		ioc_err(mrioc, "failed to get ioc_facts\n");
4014  		goto out_failed;
4015  	}
4016  
4017  	dprint_reset(mrioc, "validating ioc_facts\n");
4018  	retval = mpi3mr_revalidate_factsdata(mrioc);
4019  	if (retval) {
4020  		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4021  		goto out_failed_noretry;
4022  	}
4023  
4024  	mpi3mr_print_ioc_info(mrioc);
4025  
4026  	dprint_reset(mrioc, "sending ioc_init\n");
4027  	retval = mpi3mr_issue_iocinit(mrioc);
4028  	if (retval) {
4029  		ioc_err(mrioc, "failed to send ioc_init\n");
4030  		goto out_failed;
4031  	}
4032  
4033  	dprint_reset(mrioc, "getting package version\n");
4034  	retval = mpi3mr_print_pkg_ver(mrioc);
4035  	if (retval) {
4036  		ioc_err(mrioc, "failed to get package version\n");
4037  		goto out_failed;
4038  	}
4039  
4040  	if (is_resume) {
4041  		dprint_reset(mrioc, "setting up multiple ISR\n");
4042  		retval = mpi3mr_setup_isr(mrioc, 0);
4043  		if (retval) {
4044  			ioc_err(mrioc, "failed to re-setup ISR\n");
4045  			goto out_failed_noretry;
4046  		}
4047  	}
4048  
4049  	dprint_reset(mrioc, "creating operational queue pairs\n");
4050  	retval = mpi3mr_create_op_queues(mrioc);
4051  	if (retval) {
4052  		ioc_err(mrioc, "failed to create operational queue pairs\n");
4053  		goto out_failed;
4054  	}
4055  
4056  	if (!mrioc->pel_seqnum_virt) {
4057  		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4058  		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4059  		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4060  		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4061  		    GFP_KERNEL);
4062  		if (!mrioc->pel_seqnum_virt) {
4063  			retval = -ENOMEM;
4064  			goto out_failed_noretry;
4065  		}
4066  	}
4067  
4068  	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4069  		ioc_err(mrioc,
4070  		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4071  		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4072  		retval = -1;
4073  		goto out_failed_noretry;
4074  	}
4075  
4076  	dprint_reset(mrioc, "enabling events\n");
4077  	retval = mpi3mr_enable_events(mrioc);
4078  	if (retval) {
4079  		ioc_err(mrioc, "failed to enable events\n");
4080  		goto out_failed;
4081  	}
4082  
4083  	mrioc->device_refresh_on = 1;
4084  	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4085  
4086  	ioc_info(mrioc, "sending port enable\n");
4087  	retval = mpi3mr_issue_port_enable(mrioc, 1);
4088  	if (retval) {
4089  		ioc_err(mrioc, "failed to issue port enable\n");
4090  		goto out_failed;
4091  	}
4092  	do {
4093  		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4094  		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4095  			break;
4096  		if (!pci_device_is_present(mrioc->pdev))
4097  			mrioc->unrecoverable = 1;
4098  		if (mrioc->unrecoverable) {
4099  			retval = -1;
4100  			goto out_failed_noretry;
4101  		}
4102  		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4103  		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4104  		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4105  			mpi3mr_print_fault_info(mrioc);
4106  			mrioc->init_cmds.is_waiting = 0;
4107  			mrioc->init_cmds.callback = NULL;
4108  			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4109  			goto out_failed;
4110  		}
4111  	} while (--pe_timeout);
4112  
4113  	if (!pe_timeout) {
4114  		ioc_err(mrioc, "port enable timed out\n");
4115  		mpi3mr_check_rh_fault_ioc(mrioc,
4116  		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4117  		mrioc->init_cmds.is_waiting = 0;
4118  		mrioc->init_cmds.callback = NULL;
4119  		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4120  		goto out_failed;
4121  	} else if (mrioc->scan_failed) {
4122  		ioc_err(mrioc,
4123  		    "port enable failed with status=0x%04x\n",
4124  		    mrioc->scan_failed);
4125  	} else
4126  		ioc_info(mrioc, "port enable completed successfully\n");
4127  
4128  	ioc_info(mrioc, "controller %s completed successfully\n",
4129  	    (is_resume)?"resume":"re-initialization");
4130  	return retval;
4131  out_failed:
4132  	if (retry < 2) {
4133  		retry++;
4134  		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4135  		    (is_resume)?"resume":"re-initialization", retry);
4136  		mpi3mr_memset_buffers(mrioc);
4137  		goto retry_init;
4138  	}
4139  	retval = -1;
4140  out_failed_noretry:
4141  	ioc_err(mrioc, "controller %s is failed\n",
4142  	    (is_resume)?"resume":"re-initialization");
4143  	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4144  	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4145  	mrioc->unrecoverable = 1;
4146  	return retval;
4147  }
4148  
4149  /**
4150   * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4151   *					segments
4152   * @mrioc: Adapter instance reference
4153   * @qidx: Operational reply queue index
4154   *
4155   * Return: Nothing.
4156   */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4157  static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4158  {
4159  	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4160  	struct segments *segments;
4161  	int i, size;
4162  
4163  	if (!op_reply_q->q_segments)
4164  		return;
4165  
4166  	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4167  	segments = op_reply_q->q_segments;
4168  	for (i = 0; i < op_reply_q->num_segments; i++)
4169  		memset(segments[i].segment, 0, size);
4170  }
4171  
4172  /**
4173   * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4174   *					segments
4175   * @mrioc: Adapter instance reference
4176   * @qidx: Operational request queue index
4177   *
4178   * Return: Nothing.
4179   */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4180  static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4181  {
4182  	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4183  	struct segments *segments;
4184  	int i, size;
4185  
4186  	if (!op_req_q->q_segments)
4187  		return;
4188  
4189  	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4190  	segments = op_req_q->q_segments;
4191  	for (i = 0; i < op_req_q->num_segments; i++)
4192  		memset(segments[i].segment, 0, size);
4193  }
4194  
4195  /**
4196   * mpi3mr_memset_buffers - memset memory for a controller
4197   * @mrioc: Adapter instance reference
4198   *
4199   * clear all the memory allocated for a controller, typically
4200   * called post reset to reuse the memory allocated during the
4201   * controller init.
4202   *
4203   * Return: Nothing.
4204   */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4205  void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4206  {
4207  	u16 i;
4208  	struct mpi3mr_throttle_group_info *tg;
4209  
4210  	mrioc->change_count = 0;
4211  	mrioc->active_poll_qcount = 0;
4212  	mrioc->default_qcount = 0;
4213  	if (mrioc->admin_req_base)
4214  		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4215  	if (mrioc->admin_reply_base)
4216  		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4217  	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4218  
4219  	if (mrioc->init_cmds.reply) {
4220  		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4221  		memset(mrioc->bsg_cmds.reply, 0,
4222  		    sizeof(*mrioc->bsg_cmds.reply));
4223  		memset(mrioc->host_tm_cmds.reply, 0,
4224  		    sizeof(*mrioc->host_tm_cmds.reply));
4225  		memset(mrioc->pel_cmds.reply, 0,
4226  		    sizeof(*mrioc->pel_cmds.reply));
4227  		memset(mrioc->pel_abort_cmd.reply, 0,
4228  		    sizeof(*mrioc->pel_abort_cmd.reply));
4229  		memset(mrioc->transport_cmds.reply, 0,
4230  		    sizeof(*mrioc->transport_cmds.reply));
4231  		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4232  			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4233  			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4234  		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4235  			memset(mrioc->evtack_cmds[i].reply, 0,
4236  			    sizeof(*mrioc->evtack_cmds[i].reply));
4237  		bitmap_clear(mrioc->removepend_bitmap, 0,
4238  			     mrioc->dev_handle_bitmap_bits);
4239  		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4240  		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4241  			     MPI3MR_NUM_EVTACKCMD);
4242  	}
4243  
4244  	for (i = 0; i < mrioc->num_queues; i++) {
4245  		mrioc->op_reply_qinfo[i].qid = 0;
4246  		mrioc->op_reply_qinfo[i].ci = 0;
4247  		mrioc->op_reply_qinfo[i].num_replies = 0;
4248  		mrioc->op_reply_qinfo[i].ephase = 0;
4249  		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4250  		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4251  		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4252  
4253  		mrioc->req_qinfo[i].ci = 0;
4254  		mrioc->req_qinfo[i].pi = 0;
4255  		mrioc->req_qinfo[i].num_requests = 0;
4256  		mrioc->req_qinfo[i].qid = 0;
4257  		mrioc->req_qinfo[i].reply_qid = 0;
4258  		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4259  		mpi3mr_memset_op_req_q_buffers(mrioc, i);
4260  	}
4261  
4262  	atomic_set(&mrioc->pend_large_data_sz, 0);
4263  	if (mrioc->throttle_groups) {
4264  		tg = mrioc->throttle_groups;
4265  		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4266  			tg->id = 0;
4267  			tg->fw_qd = 0;
4268  			tg->modified_qd = 0;
4269  			tg->io_divert = 0;
4270  			tg->need_qd_reduction = 0;
4271  			tg->high = 0;
4272  			tg->low = 0;
4273  			tg->qd_reduction = 0;
4274  			atomic_set(&tg->pend_large_data_sz, 0);
4275  		}
4276  	}
4277  }
4278  
4279  /**
4280   * mpi3mr_free_mem - Free memory allocated for a controller
4281   * @mrioc: Adapter instance reference
4282   *
4283   * Free all the memory allocated for a controller.
4284   *
4285   * Return: Nothing.
4286   */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4287  void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4288  {
4289  	u16 i;
4290  	struct mpi3mr_intr_info *intr_info;
4291  
4292  	mpi3mr_free_enclosure_list(mrioc);
4293  
4294  	if (mrioc->sense_buf_pool) {
4295  		if (mrioc->sense_buf)
4296  			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4297  			    mrioc->sense_buf_dma);
4298  		dma_pool_destroy(mrioc->sense_buf_pool);
4299  		mrioc->sense_buf = NULL;
4300  		mrioc->sense_buf_pool = NULL;
4301  	}
4302  	if (mrioc->sense_buf_q_pool) {
4303  		if (mrioc->sense_buf_q)
4304  			dma_pool_free(mrioc->sense_buf_q_pool,
4305  			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4306  		dma_pool_destroy(mrioc->sense_buf_q_pool);
4307  		mrioc->sense_buf_q = NULL;
4308  		mrioc->sense_buf_q_pool = NULL;
4309  	}
4310  
4311  	if (mrioc->reply_buf_pool) {
4312  		if (mrioc->reply_buf)
4313  			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4314  			    mrioc->reply_buf_dma);
4315  		dma_pool_destroy(mrioc->reply_buf_pool);
4316  		mrioc->reply_buf = NULL;
4317  		mrioc->reply_buf_pool = NULL;
4318  	}
4319  	if (mrioc->reply_free_q_pool) {
4320  		if (mrioc->reply_free_q)
4321  			dma_pool_free(mrioc->reply_free_q_pool,
4322  			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4323  		dma_pool_destroy(mrioc->reply_free_q_pool);
4324  		mrioc->reply_free_q = NULL;
4325  		mrioc->reply_free_q_pool = NULL;
4326  	}
4327  
4328  	for (i = 0; i < mrioc->num_op_req_q; i++)
4329  		mpi3mr_free_op_req_q_segments(mrioc, i);
4330  
4331  	for (i = 0; i < mrioc->num_op_reply_q; i++)
4332  		mpi3mr_free_op_reply_q_segments(mrioc, i);
4333  
4334  	for (i = 0; i < mrioc->intr_info_count; i++) {
4335  		intr_info = mrioc->intr_info + i;
4336  		intr_info->op_reply_q = NULL;
4337  	}
4338  
4339  	kfree(mrioc->req_qinfo);
4340  	mrioc->req_qinfo = NULL;
4341  	mrioc->num_op_req_q = 0;
4342  
4343  	kfree(mrioc->op_reply_qinfo);
4344  	mrioc->op_reply_qinfo = NULL;
4345  	mrioc->num_op_reply_q = 0;
4346  
4347  	kfree(mrioc->init_cmds.reply);
4348  	mrioc->init_cmds.reply = NULL;
4349  
4350  	kfree(mrioc->bsg_cmds.reply);
4351  	mrioc->bsg_cmds.reply = NULL;
4352  
4353  	kfree(mrioc->host_tm_cmds.reply);
4354  	mrioc->host_tm_cmds.reply = NULL;
4355  
4356  	kfree(mrioc->pel_cmds.reply);
4357  	mrioc->pel_cmds.reply = NULL;
4358  
4359  	kfree(mrioc->pel_abort_cmd.reply);
4360  	mrioc->pel_abort_cmd.reply = NULL;
4361  
4362  	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4363  		kfree(mrioc->evtack_cmds[i].reply);
4364  		mrioc->evtack_cmds[i].reply = NULL;
4365  	}
4366  
4367  	bitmap_free(mrioc->removepend_bitmap);
4368  	mrioc->removepend_bitmap = NULL;
4369  
4370  	bitmap_free(mrioc->devrem_bitmap);
4371  	mrioc->devrem_bitmap = NULL;
4372  
4373  	bitmap_free(mrioc->evtack_cmds_bitmap);
4374  	mrioc->evtack_cmds_bitmap = NULL;
4375  
4376  	bitmap_free(mrioc->chain_bitmap);
4377  	mrioc->chain_bitmap = NULL;
4378  
4379  	kfree(mrioc->transport_cmds.reply);
4380  	mrioc->transport_cmds.reply = NULL;
4381  
4382  	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4383  		kfree(mrioc->dev_rmhs_cmds[i].reply);
4384  		mrioc->dev_rmhs_cmds[i].reply = NULL;
4385  	}
4386  
4387  	if (mrioc->chain_buf_pool) {
4388  		for (i = 0; i < mrioc->chain_buf_count; i++) {
4389  			if (mrioc->chain_sgl_list[i].addr) {
4390  				dma_pool_free(mrioc->chain_buf_pool,
4391  				    mrioc->chain_sgl_list[i].addr,
4392  				    mrioc->chain_sgl_list[i].dma_addr);
4393  				mrioc->chain_sgl_list[i].addr = NULL;
4394  			}
4395  		}
4396  		dma_pool_destroy(mrioc->chain_buf_pool);
4397  		mrioc->chain_buf_pool = NULL;
4398  	}
4399  
4400  	kfree(mrioc->chain_sgl_list);
4401  	mrioc->chain_sgl_list = NULL;
4402  
4403  	if (mrioc->admin_reply_base) {
4404  		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4405  		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
4406  		mrioc->admin_reply_base = NULL;
4407  	}
4408  	if (mrioc->admin_req_base) {
4409  		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4410  		    mrioc->admin_req_base, mrioc->admin_req_dma);
4411  		mrioc->admin_req_base = NULL;
4412  	}
4413  	if (mrioc->cfg_page) {
4414  		dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
4415  		    mrioc->cfg_page, mrioc->cfg_page_dma);
4416  		mrioc->cfg_page = NULL;
4417  	}
4418  	if (mrioc->pel_seqnum_virt) {
4419  		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4420  		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4421  		mrioc->pel_seqnum_virt = NULL;
4422  	}
4423  
4424  	kfree(mrioc->throttle_groups);
4425  	mrioc->throttle_groups = NULL;
4426  
4427  	kfree(mrioc->logdata_buf);
4428  	mrioc->logdata_buf = NULL;
4429  
4430  }
4431  
4432  /**
4433   * mpi3mr_issue_ioc_shutdown - shutdown controller
4434   * @mrioc: Adapter instance reference
4435   *
4436   * Send shutodwn notification to the controller and wait for the
4437   * shutdown_timeout for it to be completed.
4438   *
4439   * Return: Nothing.
4440   */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)4441  static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4442  {
4443  	u32 ioc_config, ioc_status;
4444  	u8 retval = 1;
4445  	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4446  
4447  	ioc_info(mrioc, "Issuing shutdown Notification\n");
4448  	if (mrioc->unrecoverable) {
4449  		ioc_warn(mrioc,
4450  		    "IOC is unrecoverable shutdown is not issued\n");
4451  		return;
4452  	}
4453  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4454  	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4455  	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4456  		ioc_info(mrioc, "shutdown already in progress\n");
4457  		return;
4458  	}
4459  
4460  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4461  	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4462  	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4463  
4464  	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4465  
4466  	if (mrioc->facts.shutdown_timeout)
4467  		timeout = mrioc->facts.shutdown_timeout * 10;
4468  
4469  	do {
4470  		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4471  		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4472  		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4473  			retval = 0;
4474  			break;
4475  		}
4476  		msleep(100);
4477  	} while (--timeout);
4478  
4479  	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4480  	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4481  
4482  	if (retval) {
4483  		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4484  		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4485  			ioc_warn(mrioc,
4486  			    "shutdown still in progress after timeout\n");
4487  	}
4488  
4489  	ioc_info(mrioc,
4490  	    "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
4491  	    (!retval) ? "successful" : "failed", ioc_status,
4492  	    ioc_config);
4493  }
4494  
4495  /**
4496   * mpi3mr_cleanup_ioc - Cleanup controller
4497   * @mrioc: Adapter instance reference
4498   *
4499   * controller cleanup handler, Message unit reset or soft reset
4500   * and shutdown notification is issued to the controller.
4501   *
4502   * Return: Nothing.
4503   */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)4504  void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4505  {
4506  	enum mpi3mr_iocstate ioc_state;
4507  
4508  	dprint_exit(mrioc, "cleaning up the controller\n");
4509  	mpi3mr_ioc_disable_intr(mrioc);
4510  
4511  	ioc_state = mpi3mr_get_iocstate(mrioc);
4512  
4513  	if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
4514  	    (ioc_state == MRIOC_STATE_READY)) {
4515  		if (mpi3mr_issue_and_process_mur(mrioc,
4516  		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
4517  			mpi3mr_issue_reset(mrioc,
4518  			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4519  			    MPI3MR_RESET_FROM_MUR_FAILURE);
4520  		mpi3mr_issue_ioc_shutdown(mrioc);
4521  	}
4522  	dprint_exit(mrioc, "controller cleanup completed\n");
4523  }
4524  
4525  /**
4526   * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4527   * @mrioc: Adapter instance reference
4528   * @cmdptr: Internal command tracker
4529   *
4530   * Complete an internal driver commands with state indicating it
4531   * is completed due to reset.
4532   *
4533   * Return: Nothing.
4534   */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)4535  static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
4536  	struct mpi3mr_drv_cmd *cmdptr)
4537  {
4538  	if (cmdptr->state & MPI3MR_CMD_PENDING) {
4539  		cmdptr->state |= MPI3MR_CMD_RESET;
4540  		cmdptr->state &= ~MPI3MR_CMD_PENDING;
4541  		if (cmdptr->is_waiting) {
4542  			complete(&cmdptr->done);
4543  			cmdptr->is_waiting = 0;
4544  		} else if (cmdptr->callback)
4545  			cmdptr->callback(mrioc, cmdptr);
4546  	}
4547  }
4548  
4549  /**
4550   * mpi3mr_flush_drv_cmds - Flush internaldriver commands
4551   * @mrioc: Adapter instance reference
4552   *
4553   * Flush all internal driver commands post reset
4554   *
4555   * Return: Nothing.
4556   */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)4557  void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
4558  {
4559  	struct mpi3mr_drv_cmd *cmdptr;
4560  	u8 i;
4561  
4562  	cmdptr = &mrioc->init_cmds;
4563  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4564  
4565  	cmdptr = &mrioc->cfg_cmds;
4566  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4567  
4568  	cmdptr = &mrioc->bsg_cmds;
4569  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4570  	cmdptr = &mrioc->host_tm_cmds;
4571  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4572  
4573  	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4574  		cmdptr = &mrioc->dev_rmhs_cmds[i];
4575  		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4576  	}
4577  
4578  	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4579  		cmdptr = &mrioc->evtack_cmds[i];
4580  		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4581  	}
4582  
4583  	cmdptr = &mrioc->pel_cmds;
4584  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4585  
4586  	cmdptr = &mrioc->pel_abort_cmd;
4587  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4588  
4589  	cmdptr = &mrioc->transport_cmds;
4590  	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4591  }
4592  
4593  /**
4594   * mpi3mr_pel_wait_post - Issue PEL Wait
4595   * @mrioc: Adapter instance reference
4596   * @drv_cmd: Internal command tracker
4597   *
4598   * Issue PEL Wait MPI request through admin queue and return.
4599   *
4600   * Return: Nothing.
4601   */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)4602  static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
4603  	struct mpi3mr_drv_cmd *drv_cmd)
4604  {
4605  	struct mpi3_pel_req_action_wait pel_wait;
4606  
4607  	mrioc->pel_abort_requested = false;
4608  
4609  	memset(&pel_wait, 0, sizeof(pel_wait));
4610  	drv_cmd->state = MPI3MR_CMD_PENDING;
4611  	drv_cmd->is_waiting = 0;
4612  	drv_cmd->callback = mpi3mr_pel_wait_complete;
4613  	drv_cmd->ioc_status = 0;
4614  	drv_cmd->ioc_loginfo = 0;
4615  	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4616  	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4617  	pel_wait.action = MPI3_PEL_ACTION_WAIT;
4618  	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
4619  	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
4620  	pel_wait.class = cpu_to_le16(mrioc->pel_class);
4621  	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
4622  	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
4623  	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
4624  
4625  	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
4626  		dprint_bsg_err(mrioc,
4627  			    "Issuing PELWait: Admin post failed\n");
4628  		drv_cmd->state = MPI3MR_CMD_NOTUSED;
4629  		drv_cmd->callback = NULL;
4630  		drv_cmd->retry_count = 0;
4631  		mrioc->pel_enabled = false;
4632  	}
4633  }
4634  
4635  /**
4636   * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
4637   * @mrioc: Adapter instance reference
4638   * @drv_cmd: Internal command tracker
4639   *
4640   * Issue PEL get sequence number MPI request through admin queue
4641   * and return.
4642   *
4643   * Return: 0 on success, non-zero on failure.
4644   */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)4645  int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
4646  	struct mpi3mr_drv_cmd *drv_cmd)
4647  {
4648  	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
4649  	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
4650  	int retval = 0;
4651  
4652  	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
4653  	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
4654  	mrioc->pel_cmds.is_waiting = 0;
4655  	mrioc->pel_cmds.ioc_status = 0;
4656  	mrioc->pel_cmds.ioc_loginfo = 0;
4657  	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
4658  	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4659  	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4660  	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
4661  	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
4662  	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
4663  
4664  	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
4665  			sizeof(pel_getseq_req), 0);
4666  	if (retval) {
4667  		if (drv_cmd) {
4668  			drv_cmd->state = MPI3MR_CMD_NOTUSED;
4669  			drv_cmd->callback = NULL;
4670  			drv_cmd->retry_count = 0;
4671  		}
4672  		mrioc->pel_enabled = false;
4673  	}
4674  
4675  	return retval;
4676  }
4677  
4678  /**
4679   * mpi3mr_pel_wait_complete - PELWait Completion callback
4680   * @mrioc: Adapter instance reference
4681   * @drv_cmd: Internal command tracker
4682   *
4683   * This is a callback handler for the PELWait request and
4684   * firmware completes a PELWait request when it is aborted or a
4685   * new PEL entry is available. This sends AEN to the application
4686   * and if the PELwait completion is not due to PELAbort then
4687   * this will send a request for new PEL Sequence number
4688   *
4689   * Return: Nothing.
4690   */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)4691  static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
4692  	struct mpi3mr_drv_cmd *drv_cmd)
4693  {
4694  	struct mpi3_pel_reply *pel_reply = NULL;
4695  	u16 ioc_status, pe_log_status;
4696  	bool do_retry = false;
4697  
4698  	if (drv_cmd->state & MPI3MR_CMD_RESET)
4699  		goto cleanup_drv_cmd;
4700  
4701  	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4702  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4703  		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
4704  			__func__, ioc_status, drv_cmd->ioc_loginfo);
4705  		dprint_bsg_err(mrioc,
4706  		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4707  		    ioc_status, drv_cmd->ioc_loginfo);
4708  		do_retry = true;
4709  	}
4710  
4711  	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4712  		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4713  
4714  	if (!pel_reply) {
4715  		dprint_bsg_err(mrioc,
4716  		    "pel_wait: failed due to no reply\n");
4717  		goto out_failed;
4718  	}
4719  
4720  	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
4721  	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
4722  	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
4723  		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
4724  			__func__, pe_log_status);
4725  		dprint_bsg_err(mrioc,
4726  		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
4727  		    pe_log_status);
4728  		do_retry = true;
4729  	}
4730  
4731  	if (do_retry) {
4732  		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4733  			drv_cmd->retry_count++;
4734  			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
4735  			    drv_cmd->retry_count);
4736  			mpi3mr_pel_wait_post(mrioc, drv_cmd);
4737  			return;
4738  		}
4739  		dprint_bsg_err(mrioc,
4740  		    "pel_wait: failed after all retries(%d)\n",
4741  		    drv_cmd->retry_count);
4742  		goto out_failed;
4743  	}
4744  	atomic64_inc(&event_counter);
4745  	if (!mrioc->pel_abort_requested) {
4746  		mrioc->pel_cmds.retry_count = 0;
4747  		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
4748  	}
4749  
4750  	return;
4751  out_failed:
4752  	mrioc->pel_enabled = false;
4753  cleanup_drv_cmd:
4754  	drv_cmd->state = MPI3MR_CMD_NOTUSED;
4755  	drv_cmd->callback = NULL;
4756  	drv_cmd->retry_count = 0;
4757  }
4758  
4759  /**
4760   * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
4761   * @mrioc: Adapter instance reference
4762   * @drv_cmd: Internal command tracker
4763   *
4764   * This is a callback handler for the PEL get sequence number
4765   * request and a new PEL wait request will be issued to the
4766   * firmware from this
4767   *
4768   * Return: Nothing.
4769   */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)4770  void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
4771  	struct mpi3mr_drv_cmd *drv_cmd)
4772  {
4773  	struct mpi3_pel_reply *pel_reply = NULL;
4774  	struct mpi3_pel_seq *pel_seqnum_virt;
4775  	u16 ioc_status;
4776  	bool do_retry = false;
4777  
4778  	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
4779  
4780  	if (drv_cmd->state & MPI3MR_CMD_RESET)
4781  		goto cleanup_drv_cmd;
4782  
4783  	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4784  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4785  		dprint_bsg_err(mrioc,
4786  		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4787  		    ioc_status, drv_cmd->ioc_loginfo);
4788  		do_retry = true;
4789  	}
4790  
4791  	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4792  		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4793  	if (!pel_reply) {
4794  		dprint_bsg_err(mrioc,
4795  		    "pel_get_seqnum: failed due to no reply\n");
4796  		goto out_failed;
4797  	}
4798  
4799  	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
4800  		dprint_bsg_err(mrioc,
4801  		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
4802  		    le16_to_cpu(pel_reply->pe_log_status));
4803  		do_retry = true;
4804  	}
4805  
4806  	if (do_retry) {
4807  		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4808  			drv_cmd->retry_count++;
4809  			dprint_bsg_err(mrioc,
4810  			    "pel_get_seqnum: retrying(%d)\n",
4811  			    drv_cmd->retry_count);
4812  			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
4813  			return;
4814  		}
4815  
4816  		dprint_bsg_err(mrioc,
4817  		    "pel_get_seqnum: failed after all retries(%d)\n",
4818  		    drv_cmd->retry_count);
4819  		goto out_failed;
4820  	}
4821  	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
4822  	drv_cmd->retry_count = 0;
4823  	mpi3mr_pel_wait_post(mrioc, drv_cmd);
4824  
4825  	return;
4826  out_failed:
4827  	mrioc->pel_enabled = false;
4828  cleanup_drv_cmd:
4829  	drv_cmd->state = MPI3MR_CMD_NOTUSED;
4830  	drv_cmd->callback = NULL;
4831  	drv_cmd->retry_count = 0;
4832  }
4833  
4834  /**
4835   * mpi3mr_soft_reset_handler - Reset the controller
4836   * @mrioc: Adapter instance reference
4837   * @reset_reason: Reset reason code
4838   * @snapdump: Flag to generate snapdump in firmware or not
4839   *
4840   * This is an handler for recovering controller by issuing soft
4841   * reset are diag fault reset.  This is a blocking function and
4842   * when one reset is executed if any other resets they will be
4843   * blocked. All BSG requests will be blocked during the reset. If
4844   * controller reset is successful then the controller will be
4845   * reinitalized, otherwise the controller will be marked as not
4846   * recoverable
4847   *
4848   * In snapdump bit is set, the controller is issued with diag
4849   * fault reset so that the firmware can create a snap dump and
4850   * post that the firmware will result in F000 fault and the
4851   * driver will issue soft reset to recover from that.
4852   *
4853   * Return: 0 on success, non-zero on failure.
4854   */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u32 reset_reason,u8 snapdump)4855  int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
4856  	u32 reset_reason, u8 snapdump)
4857  {
4858  	int retval = 0, i;
4859  	unsigned long flags;
4860  	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
4861  
4862  	/* Block the reset handler until diag save in progress*/
4863  	dprint_reset(mrioc,
4864  	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
4865  	    mrioc->diagsave_timeout);
4866  	while (mrioc->diagsave_timeout)
4867  		ssleep(1);
4868  	/*
4869  	 * Block new resets until the currently executing one is finished and
4870  	 * return the status of the existing reset for all blocked resets
4871  	 */
4872  	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
4873  	if (!mutex_trylock(&mrioc->reset_mutex)) {
4874  		ioc_info(mrioc,
4875  		    "controller reset triggered by %s is blocked due to another reset in progress\n",
4876  		    mpi3mr_reset_rc_name(reset_reason));
4877  		do {
4878  			ssleep(1);
4879  		} while (mrioc->reset_in_progress == 1);
4880  		ioc_info(mrioc,
4881  		    "returning previous reset result(%d) for the reset triggered by %s\n",
4882  		    mrioc->prev_reset_result,
4883  		    mpi3mr_reset_rc_name(reset_reason));
4884  		return mrioc->prev_reset_result;
4885  	}
4886  	ioc_info(mrioc, "controller reset is triggered by %s\n",
4887  	    mpi3mr_reset_rc_name(reset_reason));
4888  
4889  	mrioc->device_refresh_on = 0;
4890  	mrioc->reset_in_progress = 1;
4891  	mrioc->stop_bsgs = 1;
4892  	mrioc->prev_reset_result = -1;
4893  
4894  	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
4895  	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
4896  	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
4897  		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4898  			mrioc->event_masks[i] = -1;
4899  
4900  		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
4901  		mpi3mr_issue_event_notification(mrioc);
4902  	}
4903  
4904  	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
4905  
4906  	mpi3mr_ioc_disable_intr(mrioc);
4907  
4908  	if (snapdump) {
4909  		mpi3mr_set_diagsave(mrioc);
4910  		retval = mpi3mr_issue_reset(mrioc,
4911  		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4912  		if (!retval) {
4913  			do {
4914  				host_diagnostic =
4915  				    readl(&mrioc->sysif_regs->host_diagnostic);
4916  				if (!(host_diagnostic &
4917  				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
4918  					break;
4919  				msleep(100);
4920  			} while (--timeout);
4921  		}
4922  	}
4923  
4924  	retval = mpi3mr_issue_reset(mrioc,
4925  	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
4926  	if (retval) {
4927  		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
4928  		goto out;
4929  	}
4930  	if (mrioc->num_io_throttle_group !=
4931  	    mrioc->facts.max_io_throttle_group) {
4932  		ioc_err(mrioc,
4933  		    "max io throttle group doesn't match old(%d), new(%d)\n",
4934  		    mrioc->num_io_throttle_group,
4935  		    mrioc->facts.max_io_throttle_group);
4936  		retval = -EPERM;
4937  		goto out;
4938  	}
4939  
4940  	mpi3mr_flush_delayed_cmd_lists(mrioc);
4941  	mpi3mr_flush_drv_cmds(mrioc);
4942  	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4943  	bitmap_clear(mrioc->removepend_bitmap, 0,
4944  		     mrioc->dev_handle_bitmap_bits);
4945  	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
4946  	mpi3mr_flush_host_io(mrioc);
4947  	mpi3mr_cleanup_fwevt_list(mrioc);
4948  	mpi3mr_invalidate_devhandles(mrioc);
4949  	mpi3mr_free_enclosure_list(mrioc);
4950  
4951  	if (mrioc->prepare_for_reset) {
4952  		mrioc->prepare_for_reset = 0;
4953  		mrioc->prepare_for_reset_timeout_counter = 0;
4954  	}
4955  	mpi3mr_memset_buffers(mrioc);
4956  	retval = mpi3mr_reinit_ioc(mrioc, 0);
4957  	if (retval) {
4958  		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
4959  		    mrioc->name, reset_reason);
4960  		goto out;
4961  	}
4962  	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
4963  
4964  out:
4965  	if (!retval) {
4966  		mrioc->diagsave_timeout = 0;
4967  		mrioc->reset_in_progress = 0;
4968  		mrioc->pel_abort_requested = 0;
4969  		if (mrioc->pel_enabled) {
4970  			mrioc->pel_cmds.retry_count = 0;
4971  			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
4972  		}
4973  
4974  		mrioc->device_refresh_on = 0;
4975  
4976  		mrioc->ts_update_counter = 0;
4977  		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
4978  		if (mrioc->watchdog_work_q)
4979  			queue_delayed_work(mrioc->watchdog_work_q,
4980  			    &mrioc->watchdog_work,
4981  			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
4982  		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
4983  		mrioc->stop_bsgs = 0;
4984  		if (mrioc->pel_enabled)
4985  			atomic64_inc(&event_counter);
4986  	} else {
4987  		mpi3mr_issue_reset(mrioc,
4988  		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4989  		mrioc->device_refresh_on = 0;
4990  		mrioc->unrecoverable = 1;
4991  		mrioc->reset_in_progress = 0;
4992  		retval = -1;
4993  		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
4994  	}
4995  	mrioc->prev_reset_result = retval;
4996  	mutex_unlock(&mrioc->reset_mutex);
4997  	ioc_info(mrioc, "controller reset is %s\n",
4998  	    ((retval == 0) ? "successful" : "failed"));
4999  	return retval;
5000  }
5001  
5002  
5003  /**
5004   * mpi3mr_free_config_dma_memory - free memory for config page
5005   * @mrioc: Adapter instance reference
5006   * @mem_desc: memory descriptor structure
5007   *
5008   * Check whether the size of the buffer specified by the memory
5009   * descriptor is greater than the default page size if so then
5010   * free the memory pointed by the descriptor.
5011   *
5012   * Return: Nothing.
5013   */
mpi3mr_free_config_dma_memory(struct mpi3mr_ioc * mrioc,struct dma_memory_desc * mem_desc)5014  static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
5015  	struct dma_memory_desc *mem_desc)
5016  {
5017  	if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
5018  		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
5019  		    mem_desc->addr, mem_desc->dma_addr);
5020  		mem_desc->addr = NULL;
5021  	}
5022  }
5023  
5024  /**
5025   * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
5026   * @mrioc: Adapter instance reference
5027   * @mem_desc: Memory descriptor to hold dma memory info
5028   *
5029   * This function allocates new dmaable memory or provides the
5030   * default config page dmaable memory based on the memory size
5031   * described by the descriptor.
5032   *
5033   * Return: 0 on success, non-zero on failure.
5034   */
mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc * mrioc,struct dma_memory_desc * mem_desc)5035  static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
5036  	struct dma_memory_desc *mem_desc)
5037  {
5038  	if (mem_desc->size > mrioc->cfg_page_sz) {
5039  		mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
5040  		    mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
5041  		if (!mem_desc->addr)
5042  			return -ENOMEM;
5043  	} else {
5044  		mem_desc->addr = mrioc->cfg_page;
5045  		mem_desc->dma_addr = mrioc->cfg_page_dma;
5046  		memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
5047  	}
5048  	return 0;
5049  }
5050  
5051  /**
5052   * mpi3mr_post_cfg_req - Issue config requests and wait
5053   * @mrioc: Adapter instance reference
5054   * @cfg_req: Configuration request
5055   * @timeout: Timeout in seconds
5056   * @ioc_status: Pointer to return ioc status
5057   *
5058   * A generic function for posting MPI3 configuration request to
5059   * the firmware. This blocks for the completion of request for
5060   * timeout seconds and if the request times out this function
5061   * faults the controller with proper reason code.
5062   *
5063   * On successful completion of the request this function returns
5064   * appropriate ioc status from the firmware back to the caller.
5065   *
5066   * Return: 0 on success, non-zero on failure.
5067   */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5068  static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5069  	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5070  {
5071  	int retval = 0;
5072  
5073  	mutex_lock(&mrioc->cfg_cmds.mutex);
5074  	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5075  		retval = -1;
5076  		ioc_err(mrioc, "sending config request failed due to command in use\n");
5077  		mutex_unlock(&mrioc->cfg_cmds.mutex);
5078  		goto out;
5079  	}
5080  	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5081  	mrioc->cfg_cmds.is_waiting = 1;
5082  	mrioc->cfg_cmds.callback = NULL;
5083  	mrioc->cfg_cmds.ioc_status = 0;
5084  	mrioc->cfg_cmds.ioc_loginfo = 0;
5085  
5086  	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5087  	cfg_req->function = MPI3_FUNCTION_CONFIG;
5088  
5089  	init_completion(&mrioc->cfg_cmds.done);
5090  	dprint_cfg_info(mrioc, "posting config request\n");
5091  	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5092  		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5093  		    "mpi3_cfg_req");
5094  	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5095  	if (retval) {
5096  		ioc_err(mrioc, "posting config request failed\n");
5097  		goto out_unlock;
5098  	}
5099  	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5100  	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5101  		mpi3mr_check_rh_fault_ioc(mrioc,
5102  		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5103  		ioc_err(mrioc, "config request timed out\n");
5104  		retval = -1;
5105  		goto out_unlock;
5106  	}
5107  	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5108  	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5109  		dprint_cfg_err(mrioc,
5110  		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5111  		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5112  
5113  out_unlock:
5114  	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5115  	mutex_unlock(&mrioc->cfg_cmds.mutex);
5116  
5117  out:
5118  	return retval;
5119  }
5120  
5121  /**
5122   * mpi3mr_process_cfg_req - config page request processor
5123   * @mrioc: Adapter instance reference
5124   * @cfg_req: Configuration request
5125   * @cfg_hdr: Configuration page header
5126   * @timeout: Timeout in seconds
5127   * @ioc_status: Pointer to return ioc status
5128   * @cfg_buf: Memory pointer to copy config page or header
5129   * @cfg_buf_sz: Size of the memory to get config page or header
5130   *
5131   * This is handler for config page read, write and config page
5132   * header read operations.
5133   *
5134   * This function expects the cfg_req to be populated with page
5135   * type, page number, action for the header read and with page
5136   * address for all other operations.
5137   *
5138   * The cfg_hdr can be passed as null for reading required header
5139   * details for read/write pages the cfg_hdr should point valid
5140   * configuration page header.
5141   *
5142   * This allocates dmaable memory based on the size of the config
5143   * buffer and set the SGE of the cfg_req.
5144   *
5145   * For write actions, the config page data has to be passed in
5146   * the cfg_buf and size of the data has to be mentioned in the
5147   * cfg_buf_sz.
5148   *
5149   * For read/header actions, on successful completion of the
5150   * request with successful ioc_status the data will be copied
5151   * into the cfg_buf limited to a minimum of actual page size and
5152   * cfg_buf_sz
5153   *
5154   *
5155   * Return: 0 on success, non-zero on failure.
5156   */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5157  static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5158  	struct mpi3_config_request *cfg_req,
5159  	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5160  	void *cfg_buf, u32 cfg_buf_sz)
5161  {
5162  	struct dma_memory_desc mem_desc;
5163  	int retval = -1;
5164  	u8 invalid_action = 0;
5165  	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5166  
5167  	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5168  
5169  	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5170  		mem_desc.size = sizeof(struct mpi3_config_page_header);
5171  	else {
5172  		if (!cfg_hdr) {
5173  			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5174  			    cfg_req->action, cfg_req->page_type,
5175  			    cfg_req->page_number);
5176  			goto out;
5177  		}
5178  		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5179  		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5180  			if (cfg_req->action
5181  			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5182  				invalid_action = 1;
5183  			break;
5184  		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5185  			if ((cfg_req->action ==
5186  			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5187  			    (cfg_req->action ==
5188  			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5189  				invalid_action = 1;
5190  			break;
5191  		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5192  		default:
5193  			break;
5194  		}
5195  		if (invalid_action) {
5196  			ioc_err(mrioc,
5197  			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5198  			    cfg_req->action, cfg_req->page_type,
5199  			    cfg_req->page_number, cfg_hdr->page_attribute);
5200  			goto out;
5201  		}
5202  		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5203  		cfg_req->page_length = cfg_hdr->page_length;
5204  		cfg_req->page_version = cfg_hdr->page_version;
5205  	}
5206  	if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
5207  		goto out;
5208  
5209  	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5210  	    mem_desc.dma_addr);
5211  
5212  	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5213  	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5214  		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5215  		    cfg_buf_sz));
5216  		dprint_cfg_info(mrioc, "config buffer to be written\n");
5217  		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5218  			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5219  	}
5220  
5221  	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5222  		goto out;
5223  
5224  	retval = 0;
5225  	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5226  	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5227  	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5228  		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5229  		    cfg_buf_sz));
5230  		dprint_cfg_info(mrioc, "config buffer read\n");
5231  		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5232  			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5233  	}
5234  
5235  out:
5236  	mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
5237  	return retval;
5238  }
5239  
5240  /**
5241   * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5242   * @mrioc: Adapter instance reference
5243   * @ioc_status: Pointer to return ioc status
5244   * @dev_pg0: Pointer to return device page 0
5245   * @pg_sz: Size of the memory allocated to the page pointer
5246   * @form: The form to be used for addressing the page
5247   * @form_spec: Form specific information like device handle
5248   *
5249   * This is handler for config page read for a specific device
5250   * page0. The ioc_status has the controller returned ioc_status.
5251   * This routine doesn't check ioc_status to decide whether the
5252   * page read is success or not and it is the callers
5253   * responsibility.
5254   *
5255   * Return: 0 on success, non-zero on failure.
5256   */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5257  int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5258  	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5259  {
5260  	struct mpi3_config_page_header cfg_hdr;
5261  	struct mpi3_config_request cfg_req;
5262  	u32 page_address;
5263  
5264  	memset(dev_pg0, 0, pg_sz);
5265  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5266  	memset(&cfg_req, 0, sizeof(cfg_req));
5267  
5268  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5269  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5270  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5271  	cfg_req.page_number = 0;
5272  	cfg_req.page_address = 0;
5273  
5274  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5275  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5276  		ioc_err(mrioc, "device page0 header read failed\n");
5277  		goto out_failed;
5278  	}
5279  	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5280  		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5281  		    *ioc_status);
5282  		goto out_failed;
5283  	}
5284  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5285  	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5286  	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5287  	cfg_req.page_address = cpu_to_le32(page_address);
5288  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5289  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5290  		ioc_err(mrioc, "device page0 read failed\n");
5291  		goto out_failed;
5292  	}
5293  	return 0;
5294  out_failed:
5295  	return -1;
5296  }
5297  
5298  
5299  /**
5300   * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5301   * @mrioc: Adapter instance reference
5302   * @ioc_status: Pointer to return ioc status
5303   * @phy_pg0: Pointer to return SAS Phy page 0
5304   * @pg_sz: Size of the memory allocated to the page pointer
5305   * @form: The form to be used for addressing the page
5306   * @form_spec: Form specific information like phy number
5307   *
5308   * This is handler for config page read for a specific SAS Phy
5309   * page0. The ioc_status has the controller returned ioc_status.
5310   * This routine doesn't check ioc_status to decide whether the
5311   * page read is success or not and it is the callers
5312   * responsibility.
5313   *
5314   * Return: 0 on success, non-zero on failure.
5315   */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5316  int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5317  	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5318  	u32 form_spec)
5319  {
5320  	struct mpi3_config_page_header cfg_hdr;
5321  	struct mpi3_config_request cfg_req;
5322  	u32 page_address;
5323  
5324  	memset(phy_pg0, 0, pg_sz);
5325  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5326  	memset(&cfg_req, 0, sizeof(cfg_req));
5327  
5328  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5329  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5330  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5331  	cfg_req.page_number = 0;
5332  	cfg_req.page_address = 0;
5333  
5334  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5335  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5336  		ioc_err(mrioc, "sas phy page0 header read failed\n");
5337  		goto out_failed;
5338  	}
5339  	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5340  		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5341  		    *ioc_status);
5342  		goto out_failed;
5343  	}
5344  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5345  	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5346  	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5347  	cfg_req.page_address = cpu_to_le32(page_address);
5348  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5349  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5350  		ioc_err(mrioc, "sas phy page0 read failed\n");
5351  		goto out_failed;
5352  	}
5353  	return 0;
5354  out_failed:
5355  	return -1;
5356  }
5357  
5358  /**
5359   * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5360   * @mrioc: Adapter instance reference
5361   * @ioc_status: Pointer to return ioc status
5362   * @phy_pg1: Pointer to return SAS Phy page 1
5363   * @pg_sz: Size of the memory allocated to the page pointer
5364   * @form: The form to be used for addressing the page
5365   * @form_spec: Form specific information like phy number
5366   *
5367   * This is handler for config page read for a specific SAS Phy
5368   * page1. The ioc_status has the controller returned ioc_status.
5369   * This routine doesn't check ioc_status to decide whether the
5370   * page read is success or not and it is the callers
5371   * responsibility.
5372   *
5373   * Return: 0 on success, non-zero on failure.
5374   */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)5375  int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5376  	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5377  	u32 form_spec)
5378  {
5379  	struct mpi3_config_page_header cfg_hdr;
5380  	struct mpi3_config_request cfg_req;
5381  	u32 page_address;
5382  
5383  	memset(phy_pg1, 0, pg_sz);
5384  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5385  	memset(&cfg_req, 0, sizeof(cfg_req));
5386  
5387  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5388  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5389  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5390  	cfg_req.page_number = 1;
5391  	cfg_req.page_address = 0;
5392  
5393  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5394  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5395  		ioc_err(mrioc, "sas phy page1 header read failed\n");
5396  		goto out_failed;
5397  	}
5398  	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5399  		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5400  		    *ioc_status);
5401  		goto out_failed;
5402  	}
5403  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5404  	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5405  	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5406  	cfg_req.page_address = cpu_to_le32(page_address);
5407  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5408  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5409  		ioc_err(mrioc, "sas phy page1 read failed\n");
5410  		goto out_failed;
5411  	}
5412  	return 0;
5413  out_failed:
5414  	return -1;
5415  }
5416  
5417  
5418  /**
5419   * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5420   * @mrioc: Adapter instance reference
5421   * @ioc_status: Pointer to return ioc status
5422   * @exp_pg0: Pointer to return SAS Expander page 0
5423   * @pg_sz: Size of the memory allocated to the page pointer
5424   * @form: The form to be used for addressing the page
5425   * @form_spec: Form specific information like device handle
5426   *
5427   * This is handler for config page read for a specific SAS
5428   * Expander page0. The ioc_status has the controller returned
5429   * ioc_status. This routine doesn't check ioc_status to decide
5430   * whether the page read is success or not and it is the callers
5431   * responsibility.
5432   *
5433   * Return: 0 on success, non-zero on failure.
5434   */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)5435  int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5436  	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5437  	u32 form_spec)
5438  {
5439  	struct mpi3_config_page_header cfg_hdr;
5440  	struct mpi3_config_request cfg_req;
5441  	u32 page_address;
5442  
5443  	memset(exp_pg0, 0, pg_sz);
5444  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5445  	memset(&cfg_req, 0, sizeof(cfg_req));
5446  
5447  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5448  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5449  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5450  	cfg_req.page_number = 0;
5451  	cfg_req.page_address = 0;
5452  
5453  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5454  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5455  		ioc_err(mrioc, "expander page0 header read failed\n");
5456  		goto out_failed;
5457  	}
5458  	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5459  		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5460  		    *ioc_status);
5461  		goto out_failed;
5462  	}
5463  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5464  	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5465  	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5466  	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5467  	cfg_req.page_address = cpu_to_le32(page_address);
5468  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5469  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
5470  		ioc_err(mrioc, "expander page0 read failed\n");
5471  		goto out_failed;
5472  	}
5473  	return 0;
5474  out_failed:
5475  	return -1;
5476  }
5477  
5478  /**
5479   * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
5480   * @mrioc: Adapter instance reference
5481   * @ioc_status: Pointer to return ioc status
5482   * @exp_pg1: Pointer to return SAS Expander page 1
5483   * @pg_sz: Size of the memory allocated to the page pointer
5484   * @form: The form to be used for addressing the page
5485   * @form_spec: Form specific information like phy number
5486   *
5487   * This is handler for config page read for a specific SAS
5488   * Expander page1. The ioc_status has the controller returned
5489   * ioc_status. This routine doesn't check ioc_status to decide
5490   * whether the page read is success or not and it is the callers
5491   * responsibility.
5492   *
5493   * Return: 0 on success, non-zero on failure.
5494   */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)5495  int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5496  	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
5497  	u32 form_spec)
5498  {
5499  	struct mpi3_config_page_header cfg_hdr;
5500  	struct mpi3_config_request cfg_req;
5501  	u32 page_address;
5502  
5503  	memset(exp_pg1, 0, pg_sz);
5504  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5505  	memset(&cfg_req, 0, sizeof(cfg_req));
5506  
5507  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5508  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5509  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5510  	cfg_req.page_number = 1;
5511  	cfg_req.page_address = 0;
5512  
5513  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5514  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5515  		ioc_err(mrioc, "expander page1 header read failed\n");
5516  		goto out_failed;
5517  	}
5518  	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5519  		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
5520  		    *ioc_status);
5521  		goto out_failed;
5522  	}
5523  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5524  	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5525  	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5526  	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5527  	cfg_req.page_address = cpu_to_le32(page_address);
5528  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5529  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
5530  		ioc_err(mrioc, "expander page1 read failed\n");
5531  		goto out_failed;
5532  	}
5533  	return 0;
5534  out_failed:
5535  	return -1;
5536  }
5537  
5538  /**
5539   * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
5540   * @mrioc: Adapter instance reference
5541   * @ioc_status: Pointer to return ioc status
5542   * @encl_pg0: Pointer to return Enclosure page 0
5543   * @pg_sz: Size of the memory allocated to the page pointer
5544   * @form: The form to be used for addressing the page
5545   * @form_spec: Form specific information like device handle
5546   *
5547   * This is handler for config page read for a specific Enclosure
5548   * page0. The ioc_status has the controller returned ioc_status.
5549   * This routine doesn't check ioc_status to decide whether the
5550   * page read is success or not and it is the callers
5551   * responsibility.
5552   *
5553   * Return: 0 on success, non-zero on failure.
5554   */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)5555  int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5556  	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
5557  	u32 form_spec)
5558  {
5559  	struct mpi3_config_page_header cfg_hdr;
5560  	struct mpi3_config_request cfg_req;
5561  	u32 page_address;
5562  
5563  	memset(encl_pg0, 0, pg_sz);
5564  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5565  	memset(&cfg_req, 0, sizeof(cfg_req));
5566  
5567  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5568  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5569  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
5570  	cfg_req.page_number = 0;
5571  	cfg_req.page_address = 0;
5572  
5573  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5574  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5575  		ioc_err(mrioc, "enclosure page0 header read failed\n");
5576  		goto out_failed;
5577  	}
5578  	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5579  		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
5580  		    *ioc_status);
5581  		goto out_failed;
5582  	}
5583  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5584  	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
5585  	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
5586  	cfg_req.page_address = cpu_to_le32(page_address);
5587  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5588  	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
5589  		ioc_err(mrioc, "enclosure page0 read failed\n");
5590  		goto out_failed;
5591  	}
5592  	return 0;
5593  out_failed:
5594  	return -1;
5595  }
5596  
5597  
5598  /**
5599   * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
5600   * @mrioc: Adapter instance reference
5601   * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
5602   * @pg_sz: Size of the memory allocated to the page pointer
5603   *
5604   * This is handler for config page read for the SAS IO Unit
5605   * page0. This routine checks ioc_status to decide whether the
5606   * page read is success or not.
5607   *
5608   * Return: 0 on success, non-zero on failure.
5609   */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)5610  int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
5611  	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
5612  {
5613  	struct mpi3_config_page_header cfg_hdr;
5614  	struct mpi3_config_request cfg_req;
5615  	u16 ioc_status = 0;
5616  
5617  	memset(sas_io_unit_pg0, 0, pg_sz);
5618  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5619  	memset(&cfg_req, 0, sizeof(cfg_req));
5620  
5621  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5622  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5623  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5624  	cfg_req.page_number = 0;
5625  	cfg_req.page_address = 0;
5626  
5627  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5628  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5629  		ioc_err(mrioc, "sas io unit page0 header read failed\n");
5630  		goto out_failed;
5631  	}
5632  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5633  		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
5634  		    ioc_status);
5635  		goto out_failed;
5636  	}
5637  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5638  
5639  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5640  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
5641  		ioc_err(mrioc, "sas io unit page0 read failed\n");
5642  		goto out_failed;
5643  	}
5644  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5645  		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
5646  		    ioc_status);
5647  		goto out_failed;
5648  	}
5649  	return 0;
5650  out_failed:
5651  	return -1;
5652  }
5653  
5654  /**
5655   * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
5656   * @mrioc: Adapter instance reference
5657   * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
5658   * @pg_sz: Size of the memory allocated to the page pointer
5659   *
5660   * This is handler for config page read for the SAS IO Unit
5661   * page1. This routine checks ioc_status to decide whether the
5662   * page read is success or not.
5663   *
5664   * Return: 0 on success, non-zero on failure.
5665   */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)5666  int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
5667  	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
5668  {
5669  	struct mpi3_config_page_header cfg_hdr;
5670  	struct mpi3_config_request cfg_req;
5671  	u16 ioc_status = 0;
5672  
5673  	memset(sas_io_unit_pg1, 0, pg_sz);
5674  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5675  	memset(&cfg_req, 0, sizeof(cfg_req));
5676  
5677  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5678  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5679  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5680  	cfg_req.page_number = 1;
5681  	cfg_req.page_address = 0;
5682  
5683  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5684  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5685  		ioc_err(mrioc, "sas io unit page1 header read failed\n");
5686  		goto out_failed;
5687  	}
5688  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5689  		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
5690  		    ioc_status);
5691  		goto out_failed;
5692  	}
5693  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5694  
5695  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5696  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
5697  		ioc_err(mrioc, "sas io unit page1 read failed\n");
5698  		goto out_failed;
5699  	}
5700  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5701  		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
5702  		    ioc_status);
5703  		goto out_failed;
5704  	}
5705  	return 0;
5706  out_failed:
5707  	return -1;
5708  }
5709  
5710  /**
5711   * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
5712   * @mrioc: Adapter instance reference
5713   * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
5714   * @pg_sz: Size of the memory allocated to the page pointer
5715   *
5716   * This is handler for config page write for the SAS IO Unit
5717   * page1. This routine checks ioc_status to decide whether the
5718   * page read is success or not. This will modify both current
5719   * and persistent page.
5720   *
5721   * Return: 0 on success, non-zero on failure.
5722   */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)5723  int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
5724  	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
5725  {
5726  	struct mpi3_config_page_header cfg_hdr;
5727  	struct mpi3_config_request cfg_req;
5728  	u16 ioc_status = 0;
5729  
5730  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5731  	memset(&cfg_req, 0, sizeof(cfg_req));
5732  
5733  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5734  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5735  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5736  	cfg_req.page_number = 1;
5737  	cfg_req.page_address = 0;
5738  
5739  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5740  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5741  		ioc_err(mrioc, "sas io unit page1 header read failed\n");
5742  		goto out_failed;
5743  	}
5744  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5745  		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
5746  		    ioc_status);
5747  		goto out_failed;
5748  	}
5749  	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
5750  
5751  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5752  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
5753  		ioc_err(mrioc, "sas io unit page1 write current failed\n");
5754  		goto out_failed;
5755  	}
5756  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5757  		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
5758  		    ioc_status);
5759  		goto out_failed;
5760  	}
5761  
5762  	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
5763  
5764  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5765  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
5766  		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
5767  		goto out_failed;
5768  	}
5769  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5770  		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
5771  		    ioc_status);
5772  		goto out_failed;
5773  	}
5774  	return 0;
5775  out_failed:
5776  	return -1;
5777  }
5778  
5779  /**
5780   * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
5781   * @mrioc: Adapter instance reference
5782   * @driver_pg1: Pointer to return Driver page 1
5783   * @pg_sz: Size of the memory allocated to the page pointer
5784   *
5785   * This is handler for config page read for the Driver page1.
5786   * This routine checks ioc_status to decide whether the page
5787   * read is success or not.
5788   *
5789   * Return: 0 on success, non-zero on failure.
5790   */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)5791  int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
5792  	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
5793  {
5794  	struct mpi3_config_page_header cfg_hdr;
5795  	struct mpi3_config_request cfg_req;
5796  	u16 ioc_status = 0;
5797  
5798  	memset(driver_pg1, 0, pg_sz);
5799  	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5800  	memset(&cfg_req, 0, sizeof(cfg_req));
5801  
5802  	cfg_req.function = MPI3_FUNCTION_CONFIG;
5803  	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5804  	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
5805  	cfg_req.page_number = 1;
5806  	cfg_req.page_address = 0;
5807  
5808  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5809  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5810  		ioc_err(mrioc, "driver page1 header read failed\n");
5811  		goto out_failed;
5812  	}
5813  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5814  		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
5815  		    ioc_status);
5816  		goto out_failed;
5817  	}
5818  	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5819  
5820  	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5821  	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
5822  		ioc_err(mrioc, "driver page1 read failed\n");
5823  		goto out_failed;
5824  	}
5825  	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5826  		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
5827  		    ioc_status);
5828  		goto out_failed;
5829  	}
5830  	return 0;
5831  out_failed:
5832  	return -1;
5833  }
5834