xref: /openbmc/linux/drivers/scsi/megaraid/megaraid_sas_base.c (revision ed4543328f7108e1047b83b96ca7f7208747d930)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  Linux MegaRAID driver for SAS based RAID controllers
4   *
5   *  Copyright (c) 2003-2013  LSI Corporation
6   *  Copyright (c) 2013-2016  Avago Technologies
7   *  Copyright (c) 2016-2018  Broadcom Inc.
8   *
9   *  Authors: Broadcom Inc.
10   *           Sreenivas Bagalkote
11   *           Sumant Patro
12   *           Bo Yang
13   *           Adam Radford
14   *           Kashyap Desai <kashyap.desai@broadcom.com>
15   *           Sumit Saxena <sumit.saxena@broadcom.com>
16   *
17   *  Send feedback to: megaraidlinux.pdl@broadcom.com
18   */
19  
20  #include <linux/kernel.h>
21  #include <linux/types.h>
22  #include <linux/pci.h>
23  #include <linux/list.h>
24  #include <linux/moduleparam.h>
25  #include <linux/module.h>
26  #include <linux/spinlock.h>
27  #include <linux/interrupt.h>
28  #include <linux/delay.h>
29  #include <linux/uio.h>
30  #include <linux/slab.h>
31  #include <linux/uaccess.h>
32  #include <asm/unaligned.h>
33  #include <linux/fs.h>
34  #include <linux/compat.h>
35  #include <linux/blkdev.h>
36  #include <linux/mutex.h>
37  #include <linux/poll.h>
38  #include <linux/vmalloc.h>
39  #include <linux/irq_poll.h>
40  #include <linux/blk-mq-pci.h>
41  
42  #include <scsi/scsi.h>
43  #include <scsi/scsi_cmnd.h>
44  #include <scsi/scsi_device.h>
45  #include <scsi/scsi_host.h>
46  #include <scsi/scsi_tcq.h>
47  #include <scsi/scsi_dbg.h>
48  #include "megaraid_sas_fusion.h"
49  #include "megaraid_sas.h"
50  
51  /*
52   * Number of sectors per IO command
53   * Will be set in megasas_init_mfi if user does not provide
54   */
55  static unsigned int max_sectors;
56  module_param_named(max_sectors, max_sectors, int, 0444);
57  MODULE_PARM_DESC(max_sectors,
58  	"Maximum number of sectors per IO command");
59  
60  static int msix_disable;
61  module_param(msix_disable, int, 0444);
62  MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
63  
64  static unsigned int msix_vectors;
65  module_param(msix_vectors, int, 0444);
66  MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
67  
68  static int allow_vf_ioctls;
69  module_param(allow_vf_ioctls, int, 0444);
70  MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
71  
72  static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
73  module_param(throttlequeuedepth, int, 0444);
74  MODULE_PARM_DESC(throttlequeuedepth,
75  	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
76  
77  unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
78  module_param(resetwaittime, int, 0444);
79  MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
80  
81  static int smp_affinity_enable = 1;
82  module_param(smp_affinity_enable, int, 0444);
83  MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
84  
85  static int rdpq_enable = 1;
86  module_param(rdpq_enable, int, 0444);
87  MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
88  
89  unsigned int dual_qdepth_disable;
90  module_param(dual_qdepth_disable, int, 0444);
91  MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
92  
93  static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
94  module_param(scmd_timeout, int, 0444);
95  MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
96  
97  int perf_mode = -1;
98  module_param(perf_mode, int, 0444);
99  MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
100  		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
101  		"interrupt coalescing is enabled only on high iops queues\n\t\t"
102  		"1 - iops: High iops queues are not allocated &\n\t\t"
103  		"interrupt coalescing is enabled on all queues\n\t\t"
104  		"2 - latency: High iops queues are not allocated &\n\t\t"
105  		"interrupt coalescing is disabled on all queues\n\t\t"
106  		"default mode is 'balanced'"
107  		);
108  
109  int event_log_level = MFI_EVT_CLASS_CRITICAL;
110  module_param(event_log_level, int, 0644);
111  MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
112  
113  unsigned int enable_sdev_max_qd;
114  module_param(enable_sdev_max_qd, int, 0444);
115  MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
116  
117  int poll_queues;
118  module_param(poll_queues, int, 0444);
119  MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
120  		"This parameter is effective only if host_tagset_enable=1 &\n\t\t"
121  		"It is not applicable for MFI_SERIES. &\n\t\t"
122  		"Driver will work in latency mode. &\n\t\t"
123  		"High iops queues are not allocated &\n\t\t"
124  		);
125  
126  int host_tagset_enable = 1;
127  module_param(host_tagset_enable, int, 0444);
128  MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
129  
130  MODULE_LICENSE("GPL");
131  MODULE_VERSION(MEGASAS_VERSION);
132  MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
133  MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
134  
135  int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
136  static int megasas_get_pd_list(struct megasas_instance *instance);
137  static int megasas_ld_list_query(struct megasas_instance *instance,
138  				 u8 query_type);
139  static int megasas_issue_init_mfi(struct megasas_instance *instance);
140  static int megasas_register_aen(struct megasas_instance *instance,
141  				u32 seq_num, u32 class_locale_word);
142  static void megasas_get_pd_info(struct megasas_instance *instance,
143  				struct scsi_device *sdev);
144  static void
145  megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
146  
147  /*
148   * PCI ID table for all supported controllers
149   */
150  static struct pci_device_id megasas_pci_table[] = {
151  
152  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
153  	/* xscale IOP */
154  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
155  	/* ppc IOP */
156  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
157  	/* ppc IOP */
158  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
159  	/* gen2*/
160  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
161  	/* gen2*/
162  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
163  	/* skinny*/
164  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
165  	/* skinny*/
166  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
167  	/* xscale IOP, vega */
168  	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
169  	/* xscale IOP */
170  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
171  	/* Fusion */
172  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
173  	/* Plasma */
174  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
175  	/* Invader */
176  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
177  	/* Fury */
178  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
179  	/* Intruder */
180  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
181  	/* Intruder 24 port*/
182  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
183  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
184  	/* VENTURA */
185  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
186  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
187  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
188  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
189  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
190  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
191  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
192  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
193  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
194  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
195  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
196  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
197  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
198  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
199  	{}
200  };
201  
202  MODULE_DEVICE_TABLE(pci, megasas_pci_table);
203  
204  static int megasas_mgmt_majorno;
205  struct megasas_mgmt_info megasas_mgmt_info;
206  static struct fasync_struct *megasas_async_queue;
207  static DEFINE_MUTEX(megasas_async_queue_mutex);
208  
209  static int megasas_poll_wait_aen;
210  static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
211  static u32 support_poll_for_event;
212  u32 megasas_dbg_lvl;
213  static u32 support_device_change;
214  static bool support_nvme_encapsulation;
215  static bool support_pci_lane_margining;
216  
217  /* define lock for aen poll */
218  static DEFINE_SPINLOCK(poll_aen_lock);
219  
220  extern struct dentry *megasas_debugfs_root;
221  extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
222  
223  void
224  megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
225  		     u8 alt_status);
226  static u32
227  megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
228  static int
229  megasas_adp_reset_gen2(struct megasas_instance *instance,
230  		       struct megasas_register_set __iomem *reg_set);
231  static irqreturn_t megasas_isr(int irq, void *devp);
232  static u32
233  megasas_init_adapter_mfi(struct megasas_instance *instance);
234  u32
235  megasas_build_and_issue_cmd(struct megasas_instance *instance,
236  			    struct scsi_cmnd *scmd);
237  static void megasas_complete_cmd_dpc(unsigned long instance_addr);
238  int
239  wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
240  	int seconds);
241  void megasas_fusion_ocr_wq(struct work_struct *work);
242  static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
243  					 int initial);
244  static int
245  megasas_set_dma_mask(struct megasas_instance *instance);
246  static int
247  megasas_alloc_ctrl_mem(struct megasas_instance *instance);
248  static inline void
249  megasas_free_ctrl_mem(struct megasas_instance *instance);
250  static inline int
251  megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
252  static inline void
253  megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
254  static inline void
255  megasas_init_ctrl_params(struct megasas_instance *instance);
256  
megasas_readl(struct megasas_instance * instance,const volatile void __iomem * addr)257  u32 megasas_readl(struct megasas_instance *instance,
258  		  const volatile void __iomem *addr)
259  {
260  	u32 i = 0, ret_val;
261  	/*
262  	 * Due to a HW errata in Aero controllers, reads to certain
263  	 * Fusion registers could intermittently return all zeroes.
264  	 * This behavior is transient in nature and subsequent reads will
265  	 * return valid value. As a workaround in driver, retry readl for
266  	 * up to thirty times until a non-zero value is read.
267  	 */
268  	if (instance->adapter_type == AERO_SERIES) {
269  		do {
270  			ret_val = readl(addr);
271  			i++;
272  		} while (ret_val == 0 && i < 30);
273  		return ret_val;
274  	} else {
275  		return readl(addr);
276  	}
277  }
278  
279  /**
280   * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
281   * @instance:			Adapter soft state
282   * @dcmd:			DCMD frame inside MFI command
283   * @dma_addr:			DMA address of buffer to be passed to FW
284   * @dma_len:			Length of DMA buffer to be passed to FW
285   * @return:			void
286   */
megasas_set_dma_settings(struct megasas_instance * instance,struct megasas_dcmd_frame * dcmd,dma_addr_t dma_addr,u32 dma_len)287  void megasas_set_dma_settings(struct megasas_instance *instance,
288  			      struct megasas_dcmd_frame *dcmd,
289  			      dma_addr_t dma_addr, u32 dma_len)
290  {
291  	if (instance->consistent_mask_64bit) {
292  		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
293  		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
294  		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
295  
296  	} else {
297  		dcmd->sgl.sge32[0].phys_addr =
298  				cpu_to_le32(lower_32_bits(dma_addr));
299  		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
300  		dcmd->flags = cpu_to_le16(dcmd->flags);
301  	}
302  }
303  
304  static void
megasas_issue_dcmd(struct megasas_instance * instance,struct megasas_cmd * cmd)305  megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
306  {
307  	instance->instancet->fire_cmd(instance,
308  		cmd->frame_phys_addr, 0, instance->reg_set);
309  	return;
310  }
311  
312  /**
313   * megasas_get_cmd -	Get a command from the free pool
314   * @instance:		Adapter soft state
315   *
316   * Returns a free command from the pool
317   */
megasas_get_cmd(struct megasas_instance * instance)318  struct megasas_cmd *megasas_get_cmd(struct megasas_instance
319  						  *instance)
320  {
321  	unsigned long flags;
322  	struct megasas_cmd *cmd = NULL;
323  
324  	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
325  
326  	if (!list_empty(&instance->cmd_pool)) {
327  		cmd = list_entry((&instance->cmd_pool)->next,
328  				 struct megasas_cmd, list);
329  		list_del_init(&cmd->list);
330  	} else {
331  		dev_err(&instance->pdev->dev, "Command pool empty!\n");
332  	}
333  
334  	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
335  	return cmd;
336  }
337  
338  /**
339   * megasas_return_cmd -	Return a cmd to free command pool
340   * @instance:		Adapter soft state
341   * @cmd:		Command packet to be returned to free command pool
342   */
343  void
megasas_return_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)344  megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
345  {
346  	unsigned long flags;
347  	u32 blk_tags;
348  	struct megasas_cmd_fusion *cmd_fusion;
349  	struct fusion_context *fusion = instance->ctrl_context;
350  
351  	/* This flag is used only for fusion adapter.
352  	 * Wait for Interrupt for Polled mode DCMD
353  	 */
354  	if (cmd->flags & DRV_DCMD_POLLED_MODE)
355  		return;
356  
357  	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
358  
359  	if (fusion) {
360  		blk_tags = instance->max_scsi_cmds + cmd->index;
361  		cmd_fusion = fusion->cmd_list[blk_tags];
362  		megasas_return_cmd_fusion(instance, cmd_fusion);
363  	}
364  	cmd->scmd = NULL;
365  	cmd->frame_count = 0;
366  	cmd->flags = 0;
367  	memset(cmd->frame, 0, instance->mfi_frame_size);
368  	cmd->frame->io.context = cpu_to_le32(cmd->index);
369  	if (!fusion && reset_devices)
370  		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
371  	list_add(&cmd->list, (&instance->cmd_pool)->next);
372  
373  	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
374  
375  }
376  
377  static const char *
format_timestamp(uint32_t timestamp)378  format_timestamp(uint32_t timestamp)
379  {
380  	static char buffer[32];
381  
382  	if ((timestamp & 0xff000000) == 0xff000000)
383  		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
384  		0x00ffffff);
385  	else
386  		snprintf(buffer, sizeof(buffer), "%us", timestamp);
387  	return buffer;
388  }
389  
390  static const char *
format_class(int8_t class)391  format_class(int8_t class)
392  {
393  	static char buffer[6];
394  
395  	switch (class) {
396  	case MFI_EVT_CLASS_DEBUG:
397  		return "debug";
398  	case MFI_EVT_CLASS_PROGRESS:
399  		return "progress";
400  	case MFI_EVT_CLASS_INFO:
401  		return "info";
402  	case MFI_EVT_CLASS_WARNING:
403  		return "WARN";
404  	case MFI_EVT_CLASS_CRITICAL:
405  		return "CRIT";
406  	case MFI_EVT_CLASS_FATAL:
407  		return "FATAL";
408  	case MFI_EVT_CLASS_DEAD:
409  		return "DEAD";
410  	default:
411  		snprintf(buffer, sizeof(buffer), "%d", class);
412  		return buffer;
413  	}
414  }
415  
416  /**
417    * megasas_decode_evt: Decode FW AEN event and print critical event
418    * for information.
419    * @instance:			Adapter soft state
420    */
421  static void
megasas_decode_evt(struct megasas_instance * instance)422  megasas_decode_evt(struct megasas_instance *instance)
423  {
424  	struct megasas_evt_detail *evt_detail = instance->evt_detail;
425  	union megasas_evt_class_locale class_locale;
426  	class_locale.word = le32_to_cpu(evt_detail->cl.word);
427  
428  	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
429  	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
430  		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
431  		event_log_level = MFI_EVT_CLASS_CRITICAL;
432  	}
433  
434  	if (class_locale.members.class >= event_log_level)
435  		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
436  			le32_to_cpu(evt_detail->seq_num),
437  			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
438  			(class_locale.members.locale),
439  			format_class(class_locale.members.class),
440  			evt_detail->description);
441  
442  	if (megasas_dbg_lvl & LD_PD_DEBUG)
443  		dev_info(&instance->pdev->dev,
444  			 "evt_detail.args.ld.target_id/index %d/%d\n",
445  			 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
446  
447  }
448  
449  /*
450   * The following functions are defined for xscale
451   * (deviceid : 1064R, PERC5) controllers
452   */
453  
454  /**
455   * megasas_enable_intr_xscale -	Enables interrupts
456   * @instance:	Adapter soft state
457   */
458  static inline void
megasas_enable_intr_xscale(struct megasas_instance * instance)459  megasas_enable_intr_xscale(struct megasas_instance *instance)
460  {
461  	struct megasas_register_set __iomem *regs;
462  
463  	regs = instance->reg_set;
464  	writel(0, &(regs)->outbound_intr_mask);
465  
466  	/* Dummy readl to force pci flush */
467  	readl(&regs->outbound_intr_mask);
468  }
469  
470  /**
471   * megasas_disable_intr_xscale -Disables interrupt
472   * @instance:	Adapter soft state
473   */
474  static inline void
megasas_disable_intr_xscale(struct megasas_instance * instance)475  megasas_disable_intr_xscale(struct megasas_instance *instance)
476  {
477  	struct megasas_register_set __iomem *regs;
478  	u32 mask = 0x1f;
479  
480  	regs = instance->reg_set;
481  	writel(mask, &regs->outbound_intr_mask);
482  	/* Dummy readl to force pci flush */
483  	readl(&regs->outbound_intr_mask);
484  }
485  
486  /**
487   * megasas_read_fw_status_reg_xscale - returns the current FW status value
488   * @instance:	Adapter soft state
489   */
490  static u32
megasas_read_fw_status_reg_xscale(struct megasas_instance * instance)491  megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
492  {
493  	return readl(&instance->reg_set->outbound_msg_0);
494  }
495  /**
496   * megasas_clear_intr_xscale -	Check & clear interrupt
497   * @instance:	Adapter soft state
498   */
499  static int
megasas_clear_intr_xscale(struct megasas_instance * instance)500  megasas_clear_intr_xscale(struct megasas_instance *instance)
501  {
502  	u32 status;
503  	u32 mfiStatus = 0;
504  	struct megasas_register_set __iomem *regs;
505  	regs = instance->reg_set;
506  
507  	/*
508  	 * Check if it is our interrupt
509  	 */
510  	status = readl(&regs->outbound_intr_status);
511  
512  	if (status & MFI_OB_INTR_STATUS_MASK)
513  		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
514  	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
515  		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
516  
517  	/*
518  	 * Clear the interrupt by writing back the same value
519  	 */
520  	if (mfiStatus)
521  		writel(status, &regs->outbound_intr_status);
522  
523  	/* Dummy readl to force pci flush */
524  	readl(&regs->outbound_intr_status);
525  
526  	return mfiStatus;
527  }
528  
529  /**
530   * megasas_fire_cmd_xscale -	Sends command to the FW
531   * @instance:		Adapter soft state
532   * @frame_phys_addr :	Physical address of cmd
533   * @frame_count :	Number of frames for the command
534   * @regs :		MFI register set
535   */
536  static inline void
megasas_fire_cmd_xscale(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)537  megasas_fire_cmd_xscale(struct megasas_instance *instance,
538  		dma_addr_t frame_phys_addr,
539  		u32 frame_count,
540  		struct megasas_register_set __iomem *regs)
541  {
542  	unsigned long flags;
543  
544  	spin_lock_irqsave(&instance->hba_lock, flags);
545  	writel((frame_phys_addr >> 3)|(frame_count),
546  	       &(regs)->inbound_queue_port);
547  	spin_unlock_irqrestore(&instance->hba_lock, flags);
548  }
549  
550  /**
551   * megasas_adp_reset_xscale -  For controller reset
552   * @instance:	Adapter soft state
553   * @regs:	MFI register set
554   */
555  static int
megasas_adp_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)556  megasas_adp_reset_xscale(struct megasas_instance *instance,
557  	struct megasas_register_set __iomem *regs)
558  {
559  	u32 i;
560  	u32 pcidata;
561  
562  	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
563  
564  	for (i = 0; i < 3; i++)
565  		msleep(1000); /* sleep for 3 secs */
566  	pcidata  = 0;
567  	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
568  	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
569  	if (pcidata & 0x2) {
570  		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
571  		pcidata &= ~0x2;
572  		pci_write_config_dword(instance->pdev,
573  				MFI_1068_PCSR_OFFSET, pcidata);
574  
575  		for (i = 0; i < 2; i++)
576  			msleep(1000); /* need to wait 2 secs again */
577  
578  		pcidata  = 0;
579  		pci_read_config_dword(instance->pdev,
580  				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
581  		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
582  		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
583  			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
584  			pcidata = 0;
585  			pci_write_config_dword(instance->pdev,
586  				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
587  		}
588  	}
589  	return 0;
590  }
591  
592  /**
593   * megasas_check_reset_xscale -	For controller reset check
594   * @instance:	Adapter soft state
595   * @regs:	MFI register set
596   */
597  static int
megasas_check_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)598  megasas_check_reset_xscale(struct megasas_instance *instance,
599  		struct megasas_register_set __iomem *regs)
600  {
601  	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
602  	    (le32_to_cpu(*instance->consumer) ==
603  		MEGASAS_ADPRESET_INPROG_SIGN))
604  		return 1;
605  	return 0;
606  }
607  
608  static struct megasas_instance_template megasas_instance_template_xscale = {
609  
610  	.fire_cmd = megasas_fire_cmd_xscale,
611  	.enable_intr = megasas_enable_intr_xscale,
612  	.disable_intr = megasas_disable_intr_xscale,
613  	.clear_intr = megasas_clear_intr_xscale,
614  	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
615  	.adp_reset = megasas_adp_reset_xscale,
616  	.check_reset = megasas_check_reset_xscale,
617  	.service_isr = megasas_isr,
618  	.tasklet = megasas_complete_cmd_dpc,
619  	.init_adapter = megasas_init_adapter_mfi,
620  	.build_and_issue_cmd = megasas_build_and_issue_cmd,
621  	.issue_dcmd = megasas_issue_dcmd,
622  };
623  
624  /*
625   * This is the end of set of functions & definitions specific
626   * to xscale (deviceid : 1064R, PERC5) controllers
627   */
628  
629  /*
630   * The following functions are defined for ppc (deviceid : 0x60)
631   * controllers
632   */
633  
634  /**
635   * megasas_enable_intr_ppc -	Enables interrupts
636   * @instance:	Adapter soft state
637   */
638  static inline void
megasas_enable_intr_ppc(struct megasas_instance * instance)639  megasas_enable_intr_ppc(struct megasas_instance *instance)
640  {
641  	struct megasas_register_set __iomem *regs;
642  
643  	regs = instance->reg_set;
644  	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
645  
646  	writel(~0x80000000, &(regs)->outbound_intr_mask);
647  
648  	/* Dummy readl to force pci flush */
649  	readl(&regs->outbound_intr_mask);
650  }
651  
652  /**
653   * megasas_disable_intr_ppc -	Disable interrupt
654   * @instance:	Adapter soft state
655   */
656  static inline void
megasas_disable_intr_ppc(struct megasas_instance * instance)657  megasas_disable_intr_ppc(struct megasas_instance *instance)
658  {
659  	struct megasas_register_set __iomem *regs;
660  	u32 mask = 0xFFFFFFFF;
661  
662  	regs = instance->reg_set;
663  	writel(mask, &regs->outbound_intr_mask);
664  	/* Dummy readl to force pci flush */
665  	readl(&regs->outbound_intr_mask);
666  }
667  
668  /**
669   * megasas_read_fw_status_reg_ppc - returns the current FW status value
670   * @instance:	Adapter soft state
671   */
672  static u32
megasas_read_fw_status_reg_ppc(struct megasas_instance * instance)673  megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
674  {
675  	return readl(&instance->reg_set->outbound_scratch_pad_0);
676  }
677  
678  /**
679   * megasas_clear_intr_ppc -	Check & clear interrupt
680   * @instance:	Adapter soft state
681   */
682  static int
megasas_clear_intr_ppc(struct megasas_instance * instance)683  megasas_clear_intr_ppc(struct megasas_instance *instance)
684  {
685  	u32 status, mfiStatus = 0;
686  	struct megasas_register_set __iomem *regs;
687  	regs = instance->reg_set;
688  
689  	/*
690  	 * Check if it is our interrupt
691  	 */
692  	status = readl(&regs->outbound_intr_status);
693  
694  	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
695  		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
696  
697  	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
698  		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
699  
700  	/*
701  	 * Clear the interrupt by writing back the same value
702  	 */
703  	writel(status, &regs->outbound_doorbell_clear);
704  
705  	/* Dummy readl to force pci flush */
706  	readl(&regs->outbound_doorbell_clear);
707  
708  	return mfiStatus;
709  }
710  
711  /**
712   * megasas_fire_cmd_ppc -	Sends command to the FW
713   * @instance:		Adapter soft state
714   * @frame_phys_addr:	Physical address of cmd
715   * @frame_count:	Number of frames for the command
716   * @regs:		MFI register set
717   */
718  static inline void
megasas_fire_cmd_ppc(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)719  megasas_fire_cmd_ppc(struct megasas_instance *instance,
720  		dma_addr_t frame_phys_addr,
721  		u32 frame_count,
722  		struct megasas_register_set __iomem *regs)
723  {
724  	unsigned long flags;
725  
726  	spin_lock_irqsave(&instance->hba_lock, flags);
727  	writel((frame_phys_addr | (frame_count<<1))|1,
728  			&(regs)->inbound_queue_port);
729  	spin_unlock_irqrestore(&instance->hba_lock, flags);
730  }
731  
732  /**
733   * megasas_check_reset_ppc -	For controller reset check
734   * @instance:	Adapter soft state
735   * @regs:	MFI register set
736   */
737  static int
megasas_check_reset_ppc(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)738  megasas_check_reset_ppc(struct megasas_instance *instance,
739  			struct megasas_register_set __iomem *regs)
740  {
741  	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
742  		return 1;
743  
744  	return 0;
745  }
746  
747  static struct megasas_instance_template megasas_instance_template_ppc = {
748  
749  	.fire_cmd = megasas_fire_cmd_ppc,
750  	.enable_intr = megasas_enable_intr_ppc,
751  	.disable_intr = megasas_disable_intr_ppc,
752  	.clear_intr = megasas_clear_intr_ppc,
753  	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
754  	.adp_reset = megasas_adp_reset_xscale,
755  	.check_reset = megasas_check_reset_ppc,
756  	.service_isr = megasas_isr,
757  	.tasklet = megasas_complete_cmd_dpc,
758  	.init_adapter = megasas_init_adapter_mfi,
759  	.build_and_issue_cmd = megasas_build_and_issue_cmd,
760  	.issue_dcmd = megasas_issue_dcmd,
761  };
762  
763  /**
764   * megasas_enable_intr_skinny -	Enables interrupts
765   * @instance:	Adapter soft state
766   */
767  static inline void
megasas_enable_intr_skinny(struct megasas_instance * instance)768  megasas_enable_intr_skinny(struct megasas_instance *instance)
769  {
770  	struct megasas_register_set __iomem *regs;
771  
772  	regs = instance->reg_set;
773  	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
774  
775  	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
776  
777  	/* Dummy readl to force pci flush */
778  	readl(&regs->outbound_intr_mask);
779  }
780  
781  /**
782   * megasas_disable_intr_skinny -	Disables interrupt
783   * @instance:	Adapter soft state
784   */
785  static inline void
megasas_disable_intr_skinny(struct megasas_instance * instance)786  megasas_disable_intr_skinny(struct megasas_instance *instance)
787  {
788  	struct megasas_register_set __iomem *regs;
789  	u32 mask = 0xFFFFFFFF;
790  
791  	regs = instance->reg_set;
792  	writel(mask, &regs->outbound_intr_mask);
793  	/* Dummy readl to force pci flush */
794  	readl(&regs->outbound_intr_mask);
795  }
796  
797  /**
798   * megasas_read_fw_status_reg_skinny - returns the current FW status value
799   * @instance:	Adapter soft state
800   */
801  static u32
megasas_read_fw_status_reg_skinny(struct megasas_instance * instance)802  megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
803  {
804  	return readl(&instance->reg_set->outbound_scratch_pad_0);
805  }
806  
807  /**
808   * megasas_clear_intr_skinny -	Check & clear interrupt
809   * @instance:	Adapter soft state
810   */
811  static int
megasas_clear_intr_skinny(struct megasas_instance * instance)812  megasas_clear_intr_skinny(struct megasas_instance *instance)
813  {
814  	u32 status;
815  	u32 mfiStatus = 0;
816  	struct megasas_register_set __iomem *regs;
817  	regs = instance->reg_set;
818  
819  	/*
820  	 * Check if it is our interrupt
821  	 */
822  	status = readl(&regs->outbound_intr_status);
823  
824  	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
825  		return 0;
826  	}
827  
828  	/*
829  	 * Check if it is our interrupt
830  	 */
831  	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
832  	    MFI_STATE_FAULT) {
833  		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
834  	} else
835  		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
836  
837  	/*
838  	 * Clear the interrupt by writing back the same value
839  	 */
840  	writel(status, &regs->outbound_intr_status);
841  
842  	/*
843  	 * dummy read to flush PCI
844  	 */
845  	readl(&regs->outbound_intr_status);
846  
847  	return mfiStatus;
848  }
849  
850  /**
851   * megasas_fire_cmd_skinny -	Sends command to the FW
852   * @instance:		Adapter soft state
853   * @frame_phys_addr:	Physical address of cmd
854   * @frame_count:	Number of frames for the command
855   * @regs:		MFI register set
856   */
857  static inline void
megasas_fire_cmd_skinny(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)858  megasas_fire_cmd_skinny(struct megasas_instance *instance,
859  			dma_addr_t frame_phys_addr,
860  			u32 frame_count,
861  			struct megasas_register_set __iomem *regs)
862  {
863  	unsigned long flags;
864  
865  	spin_lock_irqsave(&instance->hba_lock, flags);
866  	writel(upper_32_bits(frame_phys_addr),
867  	       &(regs)->inbound_high_queue_port);
868  	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
869  	       &(regs)->inbound_low_queue_port);
870  	spin_unlock_irqrestore(&instance->hba_lock, flags);
871  }
872  
873  /**
874   * megasas_check_reset_skinny -	For controller reset check
875   * @instance:	Adapter soft state
876   * @regs:	MFI register set
877   */
878  static int
megasas_check_reset_skinny(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)879  megasas_check_reset_skinny(struct megasas_instance *instance,
880  				struct megasas_register_set __iomem *regs)
881  {
882  	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
883  		return 1;
884  
885  	return 0;
886  }
887  
888  static struct megasas_instance_template megasas_instance_template_skinny = {
889  
890  	.fire_cmd = megasas_fire_cmd_skinny,
891  	.enable_intr = megasas_enable_intr_skinny,
892  	.disable_intr = megasas_disable_intr_skinny,
893  	.clear_intr = megasas_clear_intr_skinny,
894  	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
895  	.adp_reset = megasas_adp_reset_gen2,
896  	.check_reset = megasas_check_reset_skinny,
897  	.service_isr = megasas_isr,
898  	.tasklet = megasas_complete_cmd_dpc,
899  	.init_adapter = megasas_init_adapter_mfi,
900  	.build_and_issue_cmd = megasas_build_and_issue_cmd,
901  	.issue_dcmd = megasas_issue_dcmd,
902  };
903  
904  
905  /*
906   * The following functions are defined for gen2 (deviceid : 0x78 0x79)
907   * controllers
908   */
909  
910  /**
911   * megasas_enable_intr_gen2 -  Enables interrupts
912   * @instance:	Adapter soft state
913   */
914  static inline void
megasas_enable_intr_gen2(struct megasas_instance * instance)915  megasas_enable_intr_gen2(struct megasas_instance *instance)
916  {
917  	struct megasas_register_set __iomem *regs;
918  
919  	regs = instance->reg_set;
920  	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
921  
922  	/* write ~0x00000005 (4 & 1) to the intr mask*/
923  	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
924  
925  	/* Dummy readl to force pci flush */
926  	readl(&regs->outbound_intr_mask);
927  }
928  
929  /**
930   * megasas_disable_intr_gen2 - Disables interrupt
931   * @instance:	Adapter soft state
932   */
933  static inline void
megasas_disable_intr_gen2(struct megasas_instance * instance)934  megasas_disable_intr_gen2(struct megasas_instance *instance)
935  {
936  	struct megasas_register_set __iomem *regs;
937  	u32 mask = 0xFFFFFFFF;
938  
939  	regs = instance->reg_set;
940  	writel(mask, &regs->outbound_intr_mask);
941  	/* Dummy readl to force pci flush */
942  	readl(&regs->outbound_intr_mask);
943  }
944  
945  /**
946   * megasas_read_fw_status_reg_gen2 - returns the current FW status value
947   * @instance:	Adapter soft state
948   */
949  static u32
megasas_read_fw_status_reg_gen2(struct megasas_instance * instance)950  megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
951  {
952  	return readl(&instance->reg_set->outbound_scratch_pad_0);
953  }
954  
955  /**
956   * megasas_clear_intr_gen2 -      Check & clear interrupt
957   * @instance:	Adapter soft state
958   */
959  static int
megasas_clear_intr_gen2(struct megasas_instance * instance)960  megasas_clear_intr_gen2(struct megasas_instance *instance)
961  {
962  	u32 status;
963  	u32 mfiStatus = 0;
964  	struct megasas_register_set __iomem *regs;
965  	regs = instance->reg_set;
966  
967  	/*
968  	 * Check if it is our interrupt
969  	 */
970  	status = readl(&regs->outbound_intr_status);
971  
972  	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
973  		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
974  	}
975  	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
976  		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
977  	}
978  
979  	/*
980  	 * Clear the interrupt by writing back the same value
981  	 */
982  	if (mfiStatus)
983  		writel(status, &regs->outbound_doorbell_clear);
984  
985  	/* Dummy readl to force pci flush */
986  	readl(&regs->outbound_intr_status);
987  
988  	return mfiStatus;
989  }
990  
991  /**
992   * megasas_fire_cmd_gen2 -     Sends command to the FW
993   * @instance:		Adapter soft state
994   * @frame_phys_addr:	Physical address of cmd
995   * @frame_count:	Number of frames for the command
996   * @regs:		MFI register set
997   */
998  static inline void
megasas_fire_cmd_gen2(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)999  megasas_fire_cmd_gen2(struct megasas_instance *instance,
1000  			dma_addr_t frame_phys_addr,
1001  			u32 frame_count,
1002  			struct megasas_register_set __iomem *regs)
1003  {
1004  	unsigned long flags;
1005  
1006  	spin_lock_irqsave(&instance->hba_lock, flags);
1007  	writel((frame_phys_addr | (frame_count<<1))|1,
1008  			&(regs)->inbound_queue_port);
1009  	spin_unlock_irqrestore(&instance->hba_lock, flags);
1010  }
1011  
1012  /**
1013   * megasas_adp_reset_gen2 -	For controller reset
1014   * @instance:	Adapter soft state
1015   * @reg_set:	MFI register set
1016   */
1017  static int
megasas_adp_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * reg_set)1018  megasas_adp_reset_gen2(struct megasas_instance *instance,
1019  			struct megasas_register_set __iomem *reg_set)
1020  {
1021  	u32 retry = 0 ;
1022  	u32 HostDiag;
1023  	u32 __iomem *seq_offset = &reg_set->seq_offset;
1024  	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
1025  
1026  	if (instance->instancet == &megasas_instance_template_skinny) {
1027  		seq_offset = &reg_set->fusion_seq_offset;
1028  		hostdiag_offset = &reg_set->fusion_host_diag;
1029  	}
1030  
1031  	writel(0, seq_offset);
1032  	writel(4, seq_offset);
1033  	writel(0xb, seq_offset);
1034  	writel(2, seq_offset);
1035  	writel(7, seq_offset);
1036  	writel(0xd, seq_offset);
1037  
1038  	msleep(1000);
1039  
1040  	HostDiag = (u32)readl(hostdiag_offset);
1041  
1042  	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1043  		msleep(100);
1044  		HostDiag = (u32)readl(hostdiag_offset);
1045  		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1046  					retry, HostDiag);
1047  
1048  		if (retry++ >= 100)
1049  			return 1;
1050  
1051  	}
1052  
1053  	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1054  
1055  	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1056  
1057  	ssleep(10);
1058  
1059  	HostDiag = (u32)readl(hostdiag_offset);
1060  	while (HostDiag & DIAG_RESET_ADAPTER) {
1061  		msleep(100);
1062  		HostDiag = (u32)readl(hostdiag_offset);
1063  		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1064  				retry, HostDiag);
1065  
1066  		if (retry++ >= 1000)
1067  			return 1;
1068  
1069  	}
1070  	return 0;
1071  }
1072  
1073  /**
1074   * megasas_check_reset_gen2 -	For controller reset check
1075   * @instance:	Adapter soft state
1076   * @regs:	MFI register set
1077   */
1078  static int
megasas_check_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)1079  megasas_check_reset_gen2(struct megasas_instance *instance,
1080  		struct megasas_register_set __iomem *regs)
1081  {
1082  	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1083  		return 1;
1084  
1085  	return 0;
1086  }
1087  
1088  static struct megasas_instance_template megasas_instance_template_gen2 = {
1089  
1090  	.fire_cmd = megasas_fire_cmd_gen2,
1091  	.enable_intr = megasas_enable_intr_gen2,
1092  	.disable_intr = megasas_disable_intr_gen2,
1093  	.clear_intr = megasas_clear_intr_gen2,
1094  	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1095  	.adp_reset = megasas_adp_reset_gen2,
1096  	.check_reset = megasas_check_reset_gen2,
1097  	.service_isr = megasas_isr,
1098  	.tasklet = megasas_complete_cmd_dpc,
1099  	.init_adapter = megasas_init_adapter_mfi,
1100  	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1101  	.issue_dcmd = megasas_issue_dcmd,
1102  };
1103  
1104  /*
1105   * This is the end of set of functions & definitions
1106   * specific to gen2 (deviceid : 0x78, 0x79) controllers
1107   */
1108  
1109  /*
1110   * Template added for TB (Fusion)
1111   */
1112  extern struct megasas_instance_template megasas_instance_template_fusion;
1113  
1114  /**
1115   * megasas_issue_polled -	Issues a polling command
1116   * @instance:			Adapter soft state
1117   * @cmd:			Command packet to be issued
1118   *
1119   * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1120   */
1121  int
megasas_issue_polled(struct megasas_instance * instance,struct megasas_cmd * cmd)1122  megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1123  {
1124  	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1125  
1126  	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1127  	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1128  
1129  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1130  		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1131  			__func__, __LINE__);
1132  		return DCMD_INIT;
1133  	}
1134  
1135  	instance->instancet->issue_dcmd(instance, cmd);
1136  
1137  	return wait_and_poll(instance, cmd, instance->requestorId ?
1138  			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1139  }
1140  
1141  /**
1142   * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1143   * @instance:			Adapter soft state
1144   * @cmd:			Command to be issued
1145   * @timeout:			Timeout in seconds
1146   *
1147   * This function waits on an event for the command to be returned from ISR.
1148   * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1149   * Used to issue ioctl commands.
1150   */
1151  int
megasas_issue_blocked_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,int timeout)1152  megasas_issue_blocked_cmd(struct megasas_instance *instance,
1153  			  struct megasas_cmd *cmd, int timeout)
1154  {
1155  	int ret = 0;
1156  	cmd->cmd_status_drv = DCMD_INIT;
1157  
1158  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1159  		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1160  			__func__, __LINE__);
1161  		return DCMD_INIT;
1162  	}
1163  
1164  	instance->instancet->issue_dcmd(instance, cmd);
1165  
1166  	if (timeout) {
1167  		ret = wait_event_timeout(instance->int_cmd_wait_q,
1168  		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1169  		if (!ret) {
1170  			dev_err(&instance->pdev->dev,
1171  				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1172  				cmd->frame->dcmd.opcode, __func__);
1173  			return DCMD_TIMEOUT;
1174  		}
1175  	} else
1176  		wait_event(instance->int_cmd_wait_q,
1177  				cmd->cmd_status_drv != DCMD_INIT);
1178  
1179  	return cmd->cmd_status_drv;
1180  }
1181  
1182  /**
1183   * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1184   * @instance:				Adapter soft state
1185   * @cmd_to_abort:			Previously issued cmd to be aborted
1186   * @timeout:				Timeout in seconds
1187   *
1188   * MFI firmware can abort previously issued AEN comamnd (automatic event
1189   * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1190   * cmd and waits for return status.
1191   * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1192   */
1193  static int
megasas_issue_blocked_abort_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort,int timeout)1194  megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1195  				struct megasas_cmd *cmd_to_abort, int timeout)
1196  {
1197  	struct megasas_cmd *cmd;
1198  	struct megasas_abort_frame *abort_fr;
1199  	int ret = 0;
1200  	u32 opcode;
1201  
1202  	cmd = megasas_get_cmd(instance);
1203  
1204  	if (!cmd)
1205  		return -1;
1206  
1207  	abort_fr = &cmd->frame->abort;
1208  
1209  	/*
1210  	 * Prepare and issue the abort frame
1211  	 */
1212  	abort_fr->cmd = MFI_CMD_ABORT;
1213  	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1214  	abort_fr->flags = cpu_to_le16(0);
1215  	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1216  	abort_fr->abort_mfi_phys_addr_lo =
1217  		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1218  	abort_fr->abort_mfi_phys_addr_hi =
1219  		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1220  
1221  	cmd->sync_cmd = 1;
1222  	cmd->cmd_status_drv = DCMD_INIT;
1223  
1224  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1225  		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1226  			__func__, __LINE__);
1227  		return DCMD_INIT;
1228  	}
1229  
1230  	instance->instancet->issue_dcmd(instance, cmd);
1231  
1232  	if (timeout) {
1233  		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1234  		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1235  		if (!ret) {
1236  			opcode = cmd_to_abort->frame->dcmd.opcode;
1237  			dev_err(&instance->pdev->dev,
1238  				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1239  				opcode,  __func__);
1240  			return DCMD_TIMEOUT;
1241  		}
1242  	} else
1243  		wait_event(instance->abort_cmd_wait_q,
1244  		cmd->cmd_status_drv != DCMD_INIT);
1245  
1246  	cmd->sync_cmd = 0;
1247  
1248  	megasas_return_cmd(instance, cmd);
1249  	return cmd->cmd_status_drv;
1250  }
1251  
1252  /**
1253   * megasas_make_sgl32 -	Prepares 32-bit SGL
1254   * @instance:		Adapter soft state
1255   * @scp:		SCSI command from the mid-layer
1256   * @mfi_sgl:		SGL to be filled in
1257   *
1258   * If successful, this function returns the number of SG elements. Otherwise,
1259   * it returnes -1.
1260   */
1261  static int
megasas_make_sgl32(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1262  megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1263  		   union megasas_sgl *mfi_sgl)
1264  {
1265  	int i;
1266  	int sge_count;
1267  	struct scatterlist *os_sgl;
1268  
1269  	sge_count = scsi_dma_map(scp);
1270  	BUG_ON(sge_count < 0);
1271  
1272  	if (sge_count) {
1273  		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1274  			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1275  			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1276  		}
1277  	}
1278  	return sge_count;
1279  }
1280  
1281  /**
1282   * megasas_make_sgl64 -	Prepares 64-bit SGL
1283   * @instance:		Adapter soft state
1284   * @scp:		SCSI command from the mid-layer
1285   * @mfi_sgl:		SGL to be filled in
1286   *
1287   * If successful, this function returns the number of SG elements. Otherwise,
1288   * it returnes -1.
1289   */
1290  static int
megasas_make_sgl64(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1291  megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1292  		   union megasas_sgl *mfi_sgl)
1293  {
1294  	int i;
1295  	int sge_count;
1296  	struct scatterlist *os_sgl;
1297  
1298  	sge_count = scsi_dma_map(scp);
1299  	BUG_ON(sge_count < 0);
1300  
1301  	if (sge_count) {
1302  		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1303  			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1304  			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1305  		}
1306  	}
1307  	return sge_count;
1308  }
1309  
1310  /**
1311   * megasas_make_sgl_skinny - Prepares IEEE SGL
1312   * @instance:           Adapter soft state
1313   * @scp:                SCSI command from the mid-layer
1314   * @mfi_sgl:            SGL to be filled in
1315   *
1316   * If successful, this function returns the number of SG elements. Otherwise,
1317   * it returnes -1.
1318   */
1319  static int
megasas_make_sgl_skinny(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1320  megasas_make_sgl_skinny(struct megasas_instance *instance,
1321  		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1322  {
1323  	int i;
1324  	int sge_count;
1325  	struct scatterlist *os_sgl;
1326  
1327  	sge_count = scsi_dma_map(scp);
1328  
1329  	if (sge_count) {
1330  		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1331  			mfi_sgl->sge_skinny[i].length =
1332  				cpu_to_le32(sg_dma_len(os_sgl));
1333  			mfi_sgl->sge_skinny[i].phys_addr =
1334  				cpu_to_le64(sg_dma_address(os_sgl));
1335  			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1336  		}
1337  	}
1338  	return sge_count;
1339  }
1340  
1341   /**
1342   * megasas_get_frame_count - Computes the number of frames
1343   * @frame_type		: type of frame- io or pthru frame
1344   * @sge_count		: number of sg elements
1345   *
1346   * Returns the number of frames required for numnber of sge's (sge_count)
1347   */
1348  
megasas_get_frame_count(struct megasas_instance * instance,u8 sge_count,u8 frame_type)1349  static u32 megasas_get_frame_count(struct megasas_instance *instance,
1350  			u8 sge_count, u8 frame_type)
1351  {
1352  	int num_cnt;
1353  	int sge_bytes;
1354  	u32 sge_sz;
1355  	u32 frame_count = 0;
1356  
1357  	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1358  	    sizeof(struct megasas_sge32);
1359  
1360  	if (instance->flag_ieee) {
1361  		sge_sz = sizeof(struct megasas_sge_skinny);
1362  	}
1363  
1364  	/*
1365  	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1366  	 * 3 SGEs for 32-bit SGLs for ldio &
1367  	 * 1 SGEs for 64-bit SGLs and
1368  	 * 2 SGEs for 32-bit SGLs for pthru frame
1369  	 */
1370  	if (unlikely(frame_type == PTHRU_FRAME)) {
1371  		if (instance->flag_ieee == 1) {
1372  			num_cnt = sge_count - 1;
1373  		} else if (IS_DMA64)
1374  			num_cnt = sge_count - 1;
1375  		else
1376  			num_cnt = sge_count - 2;
1377  	} else {
1378  		if (instance->flag_ieee == 1) {
1379  			num_cnt = sge_count - 1;
1380  		} else if (IS_DMA64)
1381  			num_cnt = sge_count - 2;
1382  		else
1383  			num_cnt = sge_count - 3;
1384  	}
1385  
1386  	if (num_cnt > 0) {
1387  		sge_bytes = sge_sz * num_cnt;
1388  
1389  		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1390  		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1391  	}
1392  	/* Main frame */
1393  	frame_count += 1;
1394  
1395  	if (frame_count > 7)
1396  		frame_count = 8;
1397  	return frame_count;
1398  }
1399  
1400  /**
1401   * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1402   * @instance:		Adapter soft state
1403   * @scp:		SCSI command
1404   * @cmd:		Command to be prepared in
1405   *
1406   * This function prepares CDB commands. These are typcially pass-through
1407   * commands to the devices.
1408   */
1409  static int
megasas_build_dcdb(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1410  megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1411  		   struct megasas_cmd *cmd)
1412  {
1413  	u32 is_logical;
1414  	u32 device_id;
1415  	u16 flags = 0;
1416  	struct megasas_pthru_frame *pthru;
1417  
1418  	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1419  	device_id = MEGASAS_DEV_INDEX(scp);
1420  	pthru = (struct megasas_pthru_frame *)cmd->frame;
1421  
1422  	if (scp->sc_data_direction == DMA_TO_DEVICE)
1423  		flags = MFI_FRAME_DIR_WRITE;
1424  	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1425  		flags = MFI_FRAME_DIR_READ;
1426  	else if (scp->sc_data_direction == DMA_NONE)
1427  		flags = MFI_FRAME_DIR_NONE;
1428  
1429  	if (instance->flag_ieee == 1) {
1430  		flags |= MFI_FRAME_IEEE;
1431  	}
1432  
1433  	/*
1434  	 * Prepare the DCDB frame
1435  	 */
1436  	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1437  	pthru->cmd_status = 0x0;
1438  	pthru->scsi_status = 0x0;
1439  	pthru->target_id = device_id;
1440  	pthru->lun = scp->device->lun;
1441  	pthru->cdb_len = scp->cmd_len;
1442  	pthru->timeout = 0;
1443  	pthru->pad_0 = 0;
1444  	pthru->flags = cpu_to_le16(flags);
1445  	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1446  
1447  	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1448  
1449  	/*
1450  	 * If the command is for the tape device, set the
1451  	 * pthru timeout to the os layer timeout value.
1452  	 */
1453  	if (scp->device->type == TYPE_TAPE) {
1454  		if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
1455  			pthru->timeout = cpu_to_le16(0xFFFF);
1456  		else
1457  			pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
1458  	}
1459  
1460  	/*
1461  	 * Construct SGL
1462  	 */
1463  	if (instance->flag_ieee == 1) {
1464  		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1465  		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1466  						      &pthru->sgl);
1467  	} else if (IS_DMA64) {
1468  		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1469  		pthru->sge_count = megasas_make_sgl64(instance, scp,
1470  						      &pthru->sgl);
1471  	} else
1472  		pthru->sge_count = megasas_make_sgl32(instance, scp,
1473  						      &pthru->sgl);
1474  
1475  	if (pthru->sge_count > instance->max_num_sge) {
1476  		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1477  			pthru->sge_count);
1478  		return 0;
1479  	}
1480  
1481  	/*
1482  	 * Sense info specific
1483  	 */
1484  	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1485  	pthru->sense_buf_phys_addr_hi =
1486  		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1487  	pthru->sense_buf_phys_addr_lo =
1488  		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1489  
1490  	/*
1491  	 * Compute the total number of frames this command consumes. FW uses
1492  	 * this number to pull sufficient number of frames from host memory.
1493  	 */
1494  	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1495  							PTHRU_FRAME);
1496  
1497  	return cmd->frame_count;
1498  }
1499  
1500  /**
1501   * megasas_build_ldio -	Prepares IOs to logical devices
1502   * @instance:		Adapter soft state
1503   * @scp:		SCSI command
1504   * @cmd:		Command to be prepared
1505   *
1506   * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1507   */
1508  static int
megasas_build_ldio(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1509  megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1510  		   struct megasas_cmd *cmd)
1511  {
1512  	u32 device_id;
1513  	u8 sc = scp->cmnd[0];
1514  	u16 flags = 0;
1515  	struct megasas_io_frame *ldio;
1516  
1517  	device_id = MEGASAS_DEV_INDEX(scp);
1518  	ldio = (struct megasas_io_frame *)cmd->frame;
1519  
1520  	if (scp->sc_data_direction == DMA_TO_DEVICE)
1521  		flags = MFI_FRAME_DIR_WRITE;
1522  	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1523  		flags = MFI_FRAME_DIR_READ;
1524  
1525  	if (instance->flag_ieee == 1) {
1526  		flags |= MFI_FRAME_IEEE;
1527  	}
1528  
1529  	/*
1530  	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1531  	 */
1532  	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1533  	ldio->cmd_status = 0x0;
1534  	ldio->scsi_status = 0x0;
1535  	ldio->target_id = device_id;
1536  	ldio->timeout = 0;
1537  	ldio->reserved_0 = 0;
1538  	ldio->pad_0 = 0;
1539  	ldio->flags = cpu_to_le16(flags);
1540  	ldio->start_lba_hi = 0;
1541  	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1542  
1543  	/*
1544  	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1545  	 */
1546  	if (scp->cmd_len == 6) {
1547  		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1548  		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1549  						 ((u32) scp->cmnd[2] << 8) |
1550  						 (u32) scp->cmnd[3]);
1551  
1552  		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1553  	}
1554  
1555  	/*
1556  	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1557  	 */
1558  	else if (scp->cmd_len == 10) {
1559  		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1560  					      ((u32) scp->cmnd[7] << 8));
1561  		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1562  						 ((u32) scp->cmnd[3] << 16) |
1563  						 ((u32) scp->cmnd[4] << 8) |
1564  						 (u32) scp->cmnd[5]);
1565  	}
1566  
1567  	/*
1568  	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1569  	 */
1570  	else if (scp->cmd_len == 12) {
1571  		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1572  					      ((u32) scp->cmnd[7] << 16) |
1573  					      ((u32) scp->cmnd[8] << 8) |
1574  					      (u32) scp->cmnd[9]);
1575  
1576  		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1577  						 ((u32) scp->cmnd[3] << 16) |
1578  						 ((u32) scp->cmnd[4] << 8) |
1579  						 (u32) scp->cmnd[5]);
1580  	}
1581  
1582  	/*
1583  	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1584  	 */
1585  	else if (scp->cmd_len == 16) {
1586  		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1587  					      ((u32) scp->cmnd[11] << 16) |
1588  					      ((u32) scp->cmnd[12] << 8) |
1589  					      (u32) scp->cmnd[13]);
1590  
1591  		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1592  						 ((u32) scp->cmnd[7] << 16) |
1593  						 ((u32) scp->cmnd[8] << 8) |
1594  						 (u32) scp->cmnd[9]);
1595  
1596  		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1597  						 ((u32) scp->cmnd[3] << 16) |
1598  						 ((u32) scp->cmnd[4] << 8) |
1599  						 (u32) scp->cmnd[5]);
1600  
1601  	}
1602  
1603  	/*
1604  	 * Construct SGL
1605  	 */
1606  	if (instance->flag_ieee) {
1607  		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1608  		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1609  					      &ldio->sgl);
1610  	} else if (IS_DMA64) {
1611  		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1612  		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1613  	} else
1614  		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1615  
1616  	if (ldio->sge_count > instance->max_num_sge) {
1617  		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1618  			ldio->sge_count);
1619  		return 0;
1620  	}
1621  
1622  	/*
1623  	 * Sense info specific
1624  	 */
1625  	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1626  	ldio->sense_buf_phys_addr_hi = 0;
1627  	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1628  
1629  	/*
1630  	 * Compute the total number of frames this command consumes. FW uses
1631  	 * this number to pull sufficient number of frames from host memory.
1632  	 */
1633  	cmd->frame_count = megasas_get_frame_count(instance,
1634  			ldio->sge_count, IO_FRAME);
1635  
1636  	return cmd->frame_count;
1637  }
1638  
1639  /**
1640   * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1641   *				and whether it's RW or non RW
1642   * @cmd:			SCSI command
1643   *
1644   */
megasas_cmd_type(struct scsi_cmnd * cmd)1645  inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1646  {
1647  	int ret;
1648  
1649  	switch (cmd->cmnd[0]) {
1650  	case READ_10:
1651  	case WRITE_10:
1652  	case READ_12:
1653  	case WRITE_12:
1654  	case READ_6:
1655  	case WRITE_6:
1656  	case READ_16:
1657  	case WRITE_16:
1658  		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1659  			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1660  		break;
1661  	default:
1662  		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1663  			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1664  	}
1665  	return ret;
1666  }
1667  
1668   /**
1669   * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1670   *					in FW
1671   * @instance:				Adapter soft state
1672   */
1673  static inline void
megasas_dump_pending_frames(struct megasas_instance * instance)1674  megasas_dump_pending_frames(struct megasas_instance *instance)
1675  {
1676  	struct megasas_cmd *cmd;
1677  	int i,n;
1678  	union megasas_sgl *mfi_sgl;
1679  	struct megasas_io_frame *ldio;
1680  	struct megasas_pthru_frame *pthru;
1681  	u32 sgcount;
1682  	u16 max_cmd = instance->max_fw_cmds;
1683  
1684  	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1685  	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1686  	if (IS_DMA64)
1687  		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1688  	else
1689  		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1690  
1691  	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1692  	for (i = 0; i < max_cmd; i++) {
1693  		cmd = instance->cmd_list[i];
1694  		if (!cmd->scmd)
1695  			continue;
1696  		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1697  		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1698  			ldio = (struct megasas_io_frame *)cmd->frame;
1699  			mfi_sgl = &ldio->sgl;
1700  			sgcount = ldio->sge_count;
1701  			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1702  			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1703  			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1704  			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1705  			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1706  		} else {
1707  			pthru = (struct megasas_pthru_frame *) cmd->frame;
1708  			mfi_sgl = &pthru->sgl;
1709  			sgcount = pthru->sge_count;
1710  			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1711  			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1712  			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1713  			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1714  			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1715  		}
1716  		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1717  			for (n = 0; n < sgcount; n++) {
1718  				if (IS_DMA64)
1719  					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1720  						le32_to_cpu(mfi_sgl->sge64[n].length),
1721  						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1722  				else
1723  					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1724  						le32_to_cpu(mfi_sgl->sge32[n].length),
1725  						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1726  			}
1727  		}
1728  	} /*for max_cmd*/
1729  	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1730  	for (i = 0; i < max_cmd; i++) {
1731  
1732  		cmd = instance->cmd_list[i];
1733  
1734  		if (cmd->sync_cmd == 1)
1735  			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1736  	}
1737  	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1738  }
1739  
1740  u32
megasas_build_and_issue_cmd(struct megasas_instance * instance,struct scsi_cmnd * scmd)1741  megasas_build_and_issue_cmd(struct megasas_instance *instance,
1742  			    struct scsi_cmnd *scmd)
1743  {
1744  	struct megasas_cmd *cmd;
1745  	u32 frame_count;
1746  
1747  	cmd = megasas_get_cmd(instance);
1748  	if (!cmd)
1749  		return SCSI_MLQUEUE_HOST_BUSY;
1750  
1751  	/*
1752  	 * Logical drive command
1753  	 */
1754  	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1755  		frame_count = megasas_build_ldio(instance, scmd, cmd);
1756  	else
1757  		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1758  
1759  	if (!frame_count)
1760  		goto out_return_cmd;
1761  
1762  	cmd->scmd = scmd;
1763  	megasas_priv(scmd)->cmd_priv = cmd;
1764  
1765  	/*
1766  	 * Issue the command to the FW
1767  	 */
1768  	atomic_inc(&instance->fw_outstanding);
1769  
1770  	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1771  				cmd->frame_count-1, instance->reg_set);
1772  
1773  	return 0;
1774  out_return_cmd:
1775  	megasas_return_cmd(instance, cmd);
1776  	return SCSI_MLQUEUE_HOST_BUSY;
1777  }
1778  
1779  
1780  /**
1781   * megasas_queue_command -	Queue entry point
1782   * @shost:			adapter SCSI host
1783   * @scmd:			SCSI command to be queued
1784   */
1785  static int
megasas_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1786  megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1787  {
1788  	struct megasas_instance *instance;
1789  	struct MR_PRIV_DEVICE *mr_device_priv_data;
1790  	u32 ld_tgt_id;
1791  
1792  	instance = (struct megasas_instance *)
1793  	    scmd->device->host->hostdata;
1794  
1795  	if (instance->unload == 1) {
1796  		scmd->result = DID_NO_CONNECT << 16;
1797  		scsi_done(scmd);
1798  		return 0;
1799  	}
1800  
1801  	if (instance->issuepend_done == 0)
1802  		return SCSI_MLQUEUE_HOST_BUSY;
1803  
1804  
1805  	/* Check for an mpio path and adjust behavior */
1806  	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1807  		if (megasas_check_mpio_paths(instance, scmd) ==
1808  		    (DID_REQUEUE << 16)) {
1809  			return SCSI_MLQUEUE_HOST_BUSY;
1810  		} else {
1811  			scmd->result = DID_NO_CONNECT << 16;
1812  			scsi_done(scmd);
1813  			return 0;
1814  		}
1815  	}
1816  
1817  	mr_device_priv_data = scmd->device->hostdata;
1818  	if (!mr_device_priv_data ||
1819  	    (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
1820  		scmd->result = DID_NO_CONNECT << 16;
1821  		scsi_done(scmd);
1822  		return 0;
1823  	}
1824  
1825  	if (MEGASAS_IS_LOGICAL(scmd->device)) {
1826  		ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
1827  		if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
1828  			scmd->result = DID_NO_CONNECT << 16;
1829  			scsi_done(scmd);
1830  			return 0;
1831  		}
1832  	}
1833  
1834  	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1835  		return SCSI_MLQUEUE_HOST_BUSY;
1836  
1837  	if (mr_device_priv_data->tm_busy)
1838  		return SCSI_MLQUEUE_DEVICE_BUSY;
1839  
1840  
1841  	scmd->result = 0;
1842  
1843  	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1844  	    (scmd->device->id >= instance->fw_supported_vd_count ||
1845  		scmd->device->lun)) {
1846  		scmd->result = DID_BAD_TARGET << 16;
1847  		goto out_done;
1848  	}
1849  
1850  	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1851  	    MEGASAS_IS_LOGICAL(scmd->device) &&
1852  	    (!instance->fw_sync_cache_support)) {
1853  		scmd->result = DID_OK << 16;
1854  		goto out_done;
1855  	}
1856  
1857  	return instance->instancet->build_and_issue_cmd(instance, scmd);
1858  
1859   out_done:
1860  	scsi_done(scmd);
1861  	return 0;
1862  }
1863  
megasas_lookup_instance(u16 host_no)1864  static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1865  {
1866  	int i;
1867  
1868  	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1869  
1870  		if ((megasas_mgmt_info.instance[i]) &&
1871  		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1872  			return megasas_mgmt_info.instance[i];
1873  	}
1874  
1875  	return NULL;
1876  }
1877  
1878  /*
1879  * megasas_set_dynamic_target_properties -
1880  * Device property set by driver may not be static and it is required to be
1881  * updated after OCR
1882  *
1883  * set tm_capable.
1884  * set dma alignment (only for eedp protection enable vd).
1885  *
1886  * @sdev: OS provided scsi device
1887  *
1888  * Returns void
1889  */
megasas_set_dynamic_target_properties(struct scsi_device * sdev,bool is_target_prop)1890  void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1891  					   bool is_target_prop)
1892  {
1893  	u16 pd_index = 0, ld;
1894  	u32 device_id;
1895  	struct megasas_instance *instance;
1896  	struct fusion_context *fusion;
1897  	struct MR_PRIV_DEVICE *mr_device_priv_data;
1898  	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1899  	struct MR_LD_RAID *raid;
1900  	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1901  
1902  	instance = megasas_lookup_instance(sdev->host->host_no);
1903  	fusion = instance->ctrl_context;
1904  	mr_device_priv_data = sdev->hostdata;
1905  
1906  	if (!fusion || !mr_device_priv_data)
1907  		return;
1908  
1909  	if (MEGASAS_IS_LOGICAL(sdev)) {
1910  		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1911  					+ sdev->id;
1912  		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1913  		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1914  		if (ld >= instance->fw_supported_vd_count)
1915  			return;
1916  		raid = MR_LdRaidGet(ld, local_map_ptr);
1917  
1918  		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1919  			blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1920  
1921  		mr_device_priv_data->is_tm_capable =
1922  			raid->capability.tmCapable;
1923  
1924  		if (!raid->flags.isEPD)
1925  			sdev->no_write_same = 1;
1926  
1927  	} else if (instance->use_seqnum_jbod_fp) {
1928  		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1929  			sdev->id;
1930  		pd_sync = (void *)fusion->pd_seq_sync
1931  				[(instance->pd_seq_map_id - 1) & 1];
1932  		mr_device_priv_data->is_tm_capable =
1933  			pd_sync->seq[pd_index].capability.tmCapable;
1934  	}
1935  
1936  	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1937  		/*
1938  		 * If FW provides a target reset timeout value, driver will use
1939  		 * it. If not set, fallback to default values.
1940  		 */
1941  		mr_device_priv_data->target_reset_tmo =
1942  			min_t(u8, instance->max_reset_tmo,
1943  			      instance->tgt_prop->reset_tmo);
1944  		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1945  	} else {
1946  		mr_device_priv_data->target_reset_tmo =
1947  						MEGASAS_DEFAULT_TM_TIMEOUT;
1948  		mr_device_priv_data->task_abort_tmo =
1949  						MEGASAS_DEFAULT_TM_TIMEOUT;
1950  	}
1951  }
1952  
1953  /*
1954   * megasas_set_nvme_device_properties -
1955   * set nomerges=2
1956   * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1957   * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1958   *
1959   * MR firmware provides value in KB. Caller of this function converts
1960   * kb into bytes.
1961   *
1962   * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1963   * MR firmware provides value 128 as (32 * 4K) = 128K.
1964   *
1965   * @sdev:				scsi device
1966   * @max_io_size:				maximum io transfer size
1967   *
1968   */
1969  static inline void
megasas_set_nvme_device_properties(struct scsi_device * sdev,u32 max_io_size)1970  megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1971  {
1972  	struct megasas_instance *instance;
1973  	u32 mr_nvme_pg_size;
1974  
1975  	instance = (struct megasas_instance *)sdev->host->hostdata;
1976  	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1977  				MR_DEFAULT_NVME_PAGE_SIZE);
1978  
1979  	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1980  
1981  	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1982  	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1983  }
1984  
1985  /*
1986   * megasas_set_fw_assisted_qd -
1987   * set device queue depth to can_queue
1988   * set device queue depth to fw assisted qd
1989   *
1990   * @sdev:				scsi device
1991   * @is_target_prop			true, if fw provided target properties.
1992   */
megasas_set_fw_assisted_qd(struct scsi_device * sdev,bool is_target_prop)1993  static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1994  						 bool is_target_prop)
1995  {
1996  	u8 interface_type;
1997  	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1998  	u32 tgt_device_qd;
1999  	struct megasas_instance *instance;
2000  	struct MR_PRIV_DEVICE *mr_device_priv_data;
2001  
2002  	instance = megasas_lookup_instance(sdev->host->host_no);
2003  	mr_device_priv_data = sdev->hostdata;
2004  	interface_type  = mr_device_priv_data->interface_type;
2005  
2006  	switch (interface_type) {
2007  	case SAS_PD:
2008  		device_qd = MEGASAS_SAS_QD;
2009  		break;
2010  	case SATA_PD:
2011  		device_qd = MEGASAS_SATA_QD;
2012  		break;
2013  	case NVME_PD:
2014  		device_qd = MEGASAS_NVME_QD;
2015  		break;
2016  	}
2017  
2018  	if (is_target_prop) {
2019  		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
2020  		if (tgt_device_qd)
2021  			device_qd = min(instance->host->can_queue,
2022  					(int)tgt_device_qd);
2023  	}
2024  
2025  	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
2026  		device_qd = instance->host->can_queue;
2027  
2028  	scsi_change_queue_depth(sdev, device_qd);
2029  }
2030  
2031  /*
2032   * megasas_set_static_target_properties -
2033   * Device property set by driver are static and it is not required to be
2034   * updated after OCR.
2035   *
2036   * set io timeout
2037   * set device queue depth
2038   * set nvme device properties. see - megasas_set_nvme_device_properties
2039   *
2040   * @sdev:				scsi device
2041   * @is_target_prop			true, if fw provided target properties.
2042   */
megasas_set_static_target_properties(struct scsi_device * sdev,bool is_target_prop)2043  static void megasas_set_static_target_properties(struct scsi_device *sdev,
2044  						 bool is_target_prop)
2045  {
2046  	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2047  	struct megasas_instance *instance;
2048  
2049  	instance = megasas_lookup_instance(sdev->host->host_no);
2050  
2051  	/*
2052  	 * The RAID firmware may require extended timeouts.
2053  	 */
2054  	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2055  
2056  	/* max_io_size_kb will be set to non zero for
2057  	 * nvme based vd and syspd.
2058  	 */
2059  	if (is_target_prop)
2060  		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2061  
2062  	if (instance->nvme_page_size && max_io_size_kb)
2063  		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2064  
2065  	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2066  }
2067  
2068  
megasas_slave_configure(struct scsi_device * sdev)2069  static int megasas_slave_configure(struct scsi_device *sdev)
2070  {
2071  	u16 pd_index = 0;
2072  	struct megasas_instance *instance;
2073  	int ret_target_prop = DCMD_FAILED;
2074  	bool is_target_prop = false;
2075  
2076  	instance = megasas_lookup_instance(sdev->host->host_no);
2077  	if (instance->pd_list_not_supported) {
2078  		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2079  			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2080  				sdev->id;
2081  			if (instance->pd_list[pd_index].driveState !=
2082  				MR_PD_STATE_SYSTEM)
2083  				return -ENXIO;
2084  		}
2085  	}
2086  
2087  	mutex_lock(&instance->reset_mutex);
2088  	/* Send DCMD to Firmware and cache the information */
2089  	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2090  		megasas_get_pd_info(instance, sdev);
2091  
2092  	/* Some ventura firmware may not have instance->nvme_page_size set.
2093  	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2094  	 */
2095  	if ((instance->tgt_prop) && (instance->nvme_page_size))
2096  		ret_target_prop = megasas_get_target_prop(instance, sdev);
2097  
2098  	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2099  	megasas_set_static_target_properties(sdev, is_target_prop);
2100  
2101  	/* This sdev property may change post OCR */
2102  	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2103  
2104  	mutex_unlock(&instance->reset_mutex);
2105  
2106  	return 0;
2107  }
2108  
megasas_slave_alloc(struct scsi_device * sdev)2109  static int megasas_slave_alloc(struct scsi_device *sdev)
2110  {
2111  	u16 pd_index = 0, ld_tgt_id;
2112  	struct megasas_instance *instance ;
2113  	struct MR_PRIV_DEVICE *mr_device_priv_data;
2114  
2115  	instance = megasas_lookup_instance(sdev->host->host_no);
2116  	if (!MEGASAS_IS_LOGICAL(sdev)) {
2117  		/*
2118  		 * Open the OS scan to the SYSTEM PD
2119  		 */
2120  		pd_index =
2121  			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2122  			sdev->id;
2123  		if ((instance->pd_list_not_supported ||
2124  			instance->pd_list[pd_index].driveState ==
2125  			MR_PD_STATE_SYSTEM)) {
2126  			goto scan_target;
2127  		}
2128  		return -ENXIO;
2129  	} else if (!MEGASAS_IS_LUN_VALID(sdev)) {
2130  		sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
2131  		return -ENXIO;
2132  	}
2133  
2134  scan_target:
2135  	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2136  					GFP_KERNEL);
2137  	if (!mr_device_priv_data)
2138  		return -ENOMEM;
2139  
2140  	if (MEGASAS_IS_LOGICAL(sdev)) {
2141  		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2142  		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
2143  		if (megasas_dbg_lvl & LD_PD_DEBUG)
2144  			sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
2145  	}
2146  
2147  	sdev->hostdata = mr_device_priv_data;
2148  
2149  	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2150  		   instance->r1_ldio_hint_default);
2151  	return 0;
2152  }
2153  
megasas_slave_destroy(struct scsi_device * sdev)2154  static void megasas_slave_destroy(struct scsi_device *sdev)
2155  {
2156  	u16 ld_tgt_id;
2157  	struct megasas_instance *instance;
2158  
2159  	instance = megasas_lookup_instance(sdev->host->host_no);
2160  
2161  	if (MEGASAS_IS_LOGICAL(sdev)) {
2162  		if (!MEGASAS_IS_LUN_VALID(sdev)) {
2163  			sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
2164  			return;
2165  		}
2166  		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2167  		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
2168  		if (megasas_dbg_lvl & LD_PD_DEBUG)
2169  			sdev_printk(KERN_INFO, sdev,
2170  				    "LD target ID %d removed from OS stack\n", ld_tgt_id);
2171  	}
2172  
2173  	kfree(sdev->hostdata);
2174  	sdev->hostdata = NULL;
2175  }
2176  
2177  /*
2178  * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2179  *                                       kill adapter
2180  * @instance:				Adapter soft state
2181  *
2182  */
megasas_complete_outstanding_ioctls(struct megasas_instance * instance)2183  static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2184  {
2185  	int i;
2186  	struct megasas_cmd *cmd_mfi;
2187  	struct megasas_cmd_fusion *cmd_fusion;
2188  	struct fusion_context *fusion = instance->ctrl_context;
2189  
2190  	/* Find all outstanding ioctls */
2191  	if (fusion) {
2192  		for (i = 0; i < instance->max_fw_cmds; i++) {
2193  			cmd_fusion = fusion->cmd_list[i];
2194  			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2195  				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2196  				if (cmd_mfi->sync_cmd &&
2197  				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2198  					cmd_mfi->frame->hdr.cmd_status =
2199  							MFI_STAT_WRONG_STATE;
2200  					megasas_complete_cmd(instance,
2201  							     cmd_mfi, DID_OK);
2202  				}
2203  			}
2204  		}
2205  	} else {
2206  		for (i = 0; i < instance->max_fw_cmds; i++) {
2207  			cmd_mfi = instance->cmd_list[i];
2208  			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2209  				MFI_CMD_ABORT)
2210  				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2211  		}
2212  	}
2213  }
2214  
2215  
megaraid_sas_kill_hba(struct megasas_instance * instance)2216  void megaraid_sas_kill_hba(struct megasas_instance *instance)
2217  {
2218  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2219  		dev_warn(&instance->pdev->dev,
2220  			 "Adapter already dead, skipping kill HBA\n");
2221  		return;
2222  	}
2223  
2224  	/* Set critical error to block I/O & ioctls in case caller didn't */
2225  	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2226  	/* Wait 1 second to ensure IO or ioctls in build have posted */
2227  	msleep(1000);
2228  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2229  		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2230  		(instance->adapter_type != MFI_SERIES)) {
2231  		if (!instance->requestorId) {
2232  			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2233  			/* Flush */
2234  			readl(&instance->reg_set->doorbell);
2235  		}
2236  		if (instance->requestorId && instance->peerIsPresent)
2237  			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2238  	} else {
2239  		writel(MFI_STOP_ADP,
2240  			&instance->reg_set->inbound_doorbell);
2241  	}
2242  	/* Complete outstanding ioctls when adapter is killed */
2243  	megasas_complete_outstanding_ioctls(instance);
2244  }
2245  
2246   /**
2247    * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2248    *					restored to max value
2249    * @instance:			Adapter soft state
2250    *
2251    */
2252  void
megasas_check_and_restore_queue_depth(struct megasas_instance * instance)2253  megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2254  {
2255  	unsigned long flags;
2256  
2257  	if (instance->flag & MEGASAS_FW_BUSY
2258  	    && time_after(jiffies, instance->last_time + 5 * HZ)
2259  	    && atomic_read(&instance->fw_outstanding) <
2260  	    instance->throttlequeuedepth + 1) {
2261  
2262  		spin_lock_irqsave(instance->host->host_lock, flags);
2263  		instance->flag &= ~MEGASAS_FW_BUSY;
2264  
2265  		instance->host->can_queue = instance->cur_can_queue;
2266  		spin_unlock_irqrestore(instance->host->host_lock, flags);
2267  	}
2268  }
2269  
2270  /**
2271   * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2272   * @instance_addr:			Address of adapter soft state
2273   *
2274   * Tasklet to complete cmds
2275   */
megasas_complete_cmd_dpc(unsigned long instance_addr)2276  static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2277  {
2278  	u32 producer;
2279  	u32 consumer;
2280  	u32 context;
2281  	struct megasas_cmd *cmd;
2282  	struct megasas_instance *instance =
2283  				(struct megasas_instance *)instance_addr;
2284  	unsigned long flags;
2285  
2286  	/* If we have already declared adapter dead, donot complete cmds */
2287  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2288  		return;
2289  
2290  	spin_lock_irqsave(&instance->completion_lock, flags);
2291  
2292  	producer = le32_to_cpu(*instance->producer);
2293  	consumer = le32_to_cpu(*instance->consumer);
2294  
2295  	while (consumer != producer) {
2296  		context = le32_to_cpu(instance->reply_queue[consumer]);
2297  		if (context >= instance->max_fw_cmds) {
2298  			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2299  				context);
2300  			BUG();
2301  		}
2302  
2303  		cmd = instance->cmd_list[context];
2304  
2305  		megasas_complete_cmd(instance, cmd, DID_OK);
2306  
2307  		consumer++;
2308  		if (consumer == (instance->max_fw_cmds + 1)) {
2309  			consumer = 0;
2310  		}
2311  	}
2312  
2313  	*instance->consumer = cpu_to_le32(producer);
2314  
2315  	spin_unlock_irqrestore(&instance->completion_lock, flags);
2316  
2317  	/*
2318  	 * Check if we can restore can_queue
2319  	 */
2320  	megasas_check_and_restore_queue_depth(instance);
2321  }
2322  
2323  static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2324  
2325  /**
2326   * megasas_start_timer - Initializes sriov heartbeat timer object
2327   * @instance:		Adapter soft state
2328   *
2329   */
megasas_start_timer(struct megasas_instance * instance)2330  void megasas_start_timer(struct megasas_instance *instance)
2331  {
2332  	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2333  
2334  	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2335  	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2336  	add_timer(timer);
2337  }
2338  
2339  static void
2340  megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2341  
2342  static void
2343  process_fw_state_change_wq(struct work_struct *work);
2344  
megasas_do_ocr(struct megasas_instance * instance)2345  static void megasas_do_ocr(struct megasas_instance *instance)
2346  {
2347  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2348  	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2349  	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2350  		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2351  	}
2352  	instance->instancet->disable_intr(instance);
2353  	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2354  	instance->issuepend_done = 0;
2355  
2356  	atomic_set(&instance->fw_outstanding, 0);
2357  	megasas_internal_reset_defer_cmds(instance);
2358  	process_fw_state_change_wq(&instance->work_init);
2359  }
2360  
megasas_get_ld_vf_affiliation_111(struct megasas_instance * instance,int initial)2361  static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2362  					    int initial)
2363  {
2364  	struct megasas_cmd *cmd;
2365  	struct megasas_dcmd_frame *dcmd;
2366  	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2367  	dma_addr_t new_affiliation_111_h;
2368  	int ld, retval = 0;
2369  	u8 thisVf;
2370  
2371  	cmd = megasas_get_cmd(instance);
2372  
2373  	if (!cmd) {
2374  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2375  		       "Failed to get cmd for scsi%d\n",
2376  			instance->host->host_no);
2377  		return -ENOMEM;
2378  	}
2379  
2380  	dcmd = &cmd->frame->dcmd;
2381  
2382  	if (!instance->vf_affiliation_111) {
2383  		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2384  		       "affiliation for scsi%d\n", instance->host->host_no);
2385  		megasas_return_cmd(instance, cmd);
2386  		return -ENOMEM;
2387  	}
2388  
2389  	if (initial)
2390  			memset(instance->vf_affiliation_111, 0,
2391  			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2392  	else {
2393  		new_affiliation_111 =
2394  			dma_alloc_coherent(&instance->pdev->dev,
2395  					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2396  					   &new_affiliation_111_h, GFP_KERNEL);
2397  		if (!new_affiliation_111) {
2398  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2399  			       "memory for new affiliation for scsi%d\n",
2400  			       instance->host->host_no);
2401  			megasas_return_cmd(instance, cmd);
2402  			return -ENOMEM;
2403  		}
2404  	}
2405  
2406  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2407  
2408  	dcmd->cmd = MFI_CMD_DCMD;
2409  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2410  	dcmd->sge_count = 1;
2411  	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2412  	dcmd->timeout = 0;
2413  	dcmd->pad_0 = 0;
2414  	dcmd->data_xfer_len =
2415  		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2416  	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2417  
2418  	if (initial)
2419  		dcmd->sgl.sge32[0].phys_addr =
2420  			cpu_to_le32(instance->vf_affiliation_111_h);
2421  	else
2422  		dcmd->sgl.sge32[0].phys_addr =
2423  			cpu_to_le32(new_affiliation_111_h);
2424  
2425  	dcmd->sgl.sge32[0].length = cpu_to_le32(
2426  		sizeof(struct MR_LD_VF_AFFILIATION_111));
2427  
2428  	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2429  	       "scsi%d\n", instance->host->host_no);
2430  
2431  	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2432  		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2433  		       " failed with status 0x%x for scsi%d\n",
2434  		       dcmd->cmd_status, instance->host->host_no);
2435  		retval = 1; /* Do a scan if we couldn't get affiliation */
2436  		goto out;
2437  	}
2438  
2439  	if (!initial) {
2440  		thisVf = new_affiliation_111->thisVf;
2441  		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2442  			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2443  			    new_affiliation_111->map[ld].policy[thisVf]) {
2444  				dev_warn(&instance->pdev->dev, "SR-IOV: "
2445  				       "Got new LD/VF affiliation for scsi%d\n",
2446  				       instance->host->host_no);
2447  				memcpy(instance->vf_affiliation_111,
2448  				       new_affiliation_111,
2449  				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2450  				retval = 1;
2451  				goto out;
2452  			}
2453  	}
2454  out:
2455  	if (new_affiliation_111) {
2456  		dma_free_coherent(&instance->pdev->dev,
2457  				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2458  				    new_affiliation_111,
2459  				    new_affiliation_111_h);
2460  	}
2461  
2462  	megasas_return_cmd(instance, cmd);
2463  
2464  	return retval;
2465  }
2466  
megasas_get_ld_vf_affiliation_12(struct megasas_instance * instance,int initial)2467  static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2468  					    int initial)
2469  {
2470  	struct megasas_cmd *cmd;
2471  	struct megasas_dcmd_frame *dcmd;
2472  	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2473  	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2474  	dma_addr_t new_affiliation_h;
2475  	int i, j, retval = 0, found = 0, doscan = 0;
2476  	u8 thisVf;
2477  
2478  	cmd = megasas_get_cmd(instance);
2479  
2480  	if (!cmd) {
2481  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2482  		       "Failed to get cmd for scsi%d\n",
2483  		       instance->host->host_no);
2484  		return -ENOMEM;
2485  	}
2486  
2487  	dcmd = &cmd->frame->dcmd;
2488  
2489  	if (!instance->vf_affiliation) {
2490  		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2491  		       "affiliation for scsi%d\n", instance->host->host_no);
2492  		megasas_return_cmd(instance, cmd);
2493  		return -ENOMEM;
2494  	}
2495  
2496  	if (initial)
2497  		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2498  		       sizeof(struct MR_LD_VF_AFFILIATION));
2499  	else {
2500  		new_affiliation =
2501  			dma_alloc_coherent(&instance->pdev->dev,
2502  					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2503  					   &new_affiliation_h, GFP_KERNEL);
2504  		if (!new_affiliation) {
2505  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2506  			       "memory for new affiliation for scsi%d\n",
2507  			       instance->host->host_no);
2508  			megasas_return_cmd(instance, cmd);
2509  			return -ENOMEM;
2510  		}
2511  	}
2512  
2513  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2514  
2515  	dcmd->cmd = MFI_CMD_DCMD;
2516  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2517  	dcmd->sge_count = 1;
2518  	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2519  	dcmd->timeout = 0;
2520  	dcmd->pad_0 = 0;
2521  	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2522  		sizeof(struct MR_LD_VF_AFFILIATION));
2523  	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2524  
2525  	if (initial)
2526  		dcmd->sgl.sge32[0].phys_addr =
2527  			cpu_to_le32(instance->vf_affiliation_h);
2528  	else
2529  		dcmd->sgl.sge32[0].phys_addr =
2530  			cpu_to_le32(new_affiliation_h);
2531  
2532  	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2533  		sizeof(struct MR_LD_VF_AFFILIATION));
2534  
2535  	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2536  	       "scsi%d\n", instance->host->host_no);
2537  
2538  
2539  	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2540  		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2541  		       " failed with status 0x%x for scsi%d\n",
2542  		       dcmd->cmd_status, instance->host->host_no);
2543  		retval = 1; /* Do a scan if we couldn't get affiliation */
2544  		goto out;
2545  	}
2546  
2547  	if (!initial) {
2548  		if (!new_affiliation->ldCount) {
2549  			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2550  			       "affiliation for passive path for scsi%d\n",
2551  			       instance->host->host_no);
2552  			retval = 1;
2553  			goto out;
2554  		}
2555  		newmap = new_affiliation->map;
2556  		savedmap = instance->vf_affiliation->map;
2557  		thisVf = new_affiliation->thisVf;
2558  		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2559  			found = 0;
2560  			for (j = 0; j < instance->vf_affiliation->ldCount;
2561  			     j++) {
2562  				if (newmap->ref.targetId ==
2563  				    savedmap->ref.targetId) {
2564  					found = 1;
2565  					if (newmap->policy[thisVf] !=
2566  					    savedmap->policy[thisVf]) {
2567  						doscan = 1;
2568  						goto out;
2569  					}
2570  				}
2571  				savedmap = (struct MR_LD_VF_MAP *)
2572  					((unsigned char *)savedmap +
2573  					 savedmap->size);
2574  			}
2575  			if (!found && newmap->policy[thisVf] !=
2576  			    MR_LD_ACCESS_HIDDEN) {
2577  				doscan = 1;
2578  				goto out;
2579  			}
2580  			newmap = (struct MR_LD_VF_MAP *)
2581  				((unsigned char *)newmap + newmap->size);
2582  		}
2583  
2584  		newmap = new_affiliation->map;
2585  		savedmap = instance->vf_affiliation->map;
2586  
2587  		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2588  			found = 0;
2589  			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2590  				if (savedmap->ref.targetId ==
2591  				    newmap->ref.targetId) {
2592  					found = 1;
2593  					if (savedmap->policy[thisVf] !=
2594  					    newmap->policy[thisVf]) {
2595  						doscan = 1;
2596  						goto out;
2597  					}
2598  				}
2599  				newmap = (struct MR_LD_VF_MAP *)
2600  					((unsigned char *)newmap +
2601  					 newmap->size);
2602  			}
2603  			if (!found && savedmap->policy[thisVf] !=
2604  			    MR_LD_ACCESS_HIDDEN) {
2605  				doscan = 1;
2606  				goto out;
2607  			}
2608  			savedmap = (struct MR_LD_VF_MAP *)
2609  				((unsigned char *)savedmap +
2610  				 savedmap->size);
2611  		}
2612  	}
2613  out:
2614  	if (doscan) {
2615  		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2616  		       "affiliation for scsi%d\n", instance->host->host_no);
2617  		memcpy(instance->vf_affiliation, new_affiliation,
2618  		       new_affiliation->size);
2619  		retval = 1;
2620  	}
2621  
2622  	if (new_affiliation)
2623  		dma_free_coherent(&instance->pdev->dev,
2624  				    (MAX_LOGICAL_DRIVES + 1) *
2625  				    sizeof(struct MR_LD_VF_AFFILIATION),
2626  				    new_affiliation, new_affiliation_h);
2627  	megasas_return_cmd(instance, cmd);
2628  
2629  	return retval;
2630  }
2631  
2632  /* This function will get the current SR-IOV LD/VF affiliation */
megasas_get_ld_vf_affiliation(struct megasas_instance * instance,int initial)2633  static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2634  	int initial)
2635  {
2636  	int retval;
2637  
2638  	if (instance->PlasmaFW111)
2639  		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2640  	else
2641  		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2642  	return retval;
2643  }
2644  
2645  /* This function will tell FW to start the SR-IOV heartbeat */
megasas_sriov_start_heartbeat(struct megasas_instance * instance,int initial)2646  int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2647  					 int initial)
2648  {
2649  	struct megasas_cmd *cmd;
2650  	struct megasas_dcmd_frame *dcmd;
2651  	int retval = 0;
2652  
2653  	cmd = megasas_get_cmd(instance);
2654  
2655  	if (!cmd) {
2656  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2657  		       "Failed to get cmd for scsi%d\n",
2658  		       instance->host->host_no);
2659  		return -ENOMEM;
2660  	}
2661  
2662  	dcmd = &cmd->frame->dcmd;
2663  
2664  	if (initial) {
2665  		instance->hb_host_mem =
2666  			dma_alloc_coherent(&instance->pdev->dev,
2667  					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2668  					   &instance->hb_host_mem_h,
2669  					   GFP_KERNEL);
2670  		if (!instance->hb_host_mem) {
2671  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2672  			       " memory for heartbeat host memory for scsi%d\n",
2673  			       instance->host->host_no);
2674  			retval = -ENOMEM;
2675  			goto out;
2676  		}
2677  	}
2678  
2679  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2680  
2681  	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2682  	dcmd->cmd = MFI_CMD_DCMD;
2683  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2684  	dcmd->sge_count = 1;
2685  	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2686  	dcmd->timeout = 0;
2687  	dcmd->pad_0 = 0;
2688  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2689  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2690  
2691  	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2692  				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2693  
2694  	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2695  	       instance->host->host_no);
2696  
2697  	if ((instance->adapter_type != MFI_SERIES) &&
2698  	    !instance->mask_interrupts)
2699  		retval = megasas_issue_blocked_cmd(instance, cmd,
2700  			MEGASAS_ROUTINE_WAIT_TIME_VF);
2701  	else
2702  		retval = megasas_issue_polled(instance, cmd);
2703  
2704  	if (retval) {
2705  		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2706  			"_MEM_ALLOC DCMD %s for scsi%d\n",
2707  			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2708  			"timed out" : "failed", instance->host->host_no);
2709  		retval = 1;
2710  	}
2711  
2712  out:
2713  	megasas_return_cmd(instance, cmd);
2714  
2715  	return retval;
2716  }
2717  
2718  /* Handler for SR-IOV heartbeat */
megasas_sriov_heartbeat_handler(struct timer_list * t)2719  static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2720  {
2721  	struct megasas_instance *instance =
2722  		from_timer(instance, t, sriov_heartbeat_timer);
2723  
2724  	if (instance->hb_host_mem->HB.fwCounter !=
2725  	    instance->hb_host_mem->HB.driverCounter) {
2726  		instance->hb_host_mem->HB.driverCounter =
2727  			instance->hb_host_mem->HB.fwCounter;
2728  		mod_timer(&instance->sriov_heartbeat_timer,
2729  			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2730  	} else {
2731  		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2732  		       "completed for scsi%d\n", instance->host->host_no);
2733  		schedule_work(&instance->work_init);
2734  	}
2735  }
2736  
2737  /**
2738   * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2739   * @instance:				Adapter soft state
2740   *
2741   * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2742   * complete all its outstanding commands. Returns error if one or more IOs
2743   * are pending after this time period. It also marks the controller dead.
2744   */
megasas_wait_for_outstanding(struct megasas_instance * instance)2745  static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2746  {
2747  	int i, sl, outstanding;
2748  	u32 reset_index;
2749  	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2750  	unsigned long flags;
2751  	struct list_head clist_local;
2752  	struct megasas_cmd *reset_cmd;
2753  	u32 fw_state;
2754  
2755  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2756  		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2757  		__func__, __LINE__);
2758  		return FAILED;
2759  	}
2760  
2761  	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2762  
2763  		INIT_LIST_HEAD(&clist_local);
2764  		spin_lock_irqsave(&instance->hba_lock, flags);
2765  		list_splice_init(&instance->internal_reset_pending_q,
2766  				&clist_local);
2767  		spin_unlock_irqrestore(&instance->hba_lock, flags);
2768  
2769  		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2770  		for (i = 0; i < wait_time; i++) {
2771  			msleep(1000);
2772  			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2773  				break;
2774  		}
2775  
2776  		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2777  			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2778  			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2779  			return FAILED;
2780  		}
2781  
2782  		reset_index = 0;
2783  		while (!list_empty(&clist_local)) {
2784  			reset_cmd = list_entry((&clist_local)->next,
2785  						struct megasas_cmd, list);
2786  			list_del_init(&reset_cmd->list);
2787  			if (reset_cmd->scmd) {
2788  				reset_cmd->scmd->result = DID_REQUEUE << 16;
2789  				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2790  					reset_index, reset_cmd,
2791  					reset_cmd->scmd->cmnd[0]);
2792  
2793  				scsi_done(reset_cmd->scmd);
2794  				megasas_return_cmd(instance, reset_cmd);
2795  			} else if (reset_cmd->sync_cmd) {
2796  				dev_notice(&instance->pdev->dev, "%p synch cmds"
2797  						"reset queue\n",
2798  						reset_cmd);
2799  
2800  				reset_cmd->cmd_status_drv = DCMD_INIT;
2801  				instance->instancet->fire_cmd(instance,
2802  						reset_cmd->frame_phys_addr,
2803  						0, instance->reg_set);
2804  			} else {
2805  				dev_notice(&instance->pdev->dev, "%p unexpected"
2806  					"cmds lst\n",
2807  					reset_cmd);
2808  			}
2809  			reset_index++;
2810  		}
2811  
2812  		return SUCCESS;
2813  	}
2814  
2815  	for (i = 0; i < resetwaittime; i++) {
2816  		outstanding = atomic_read(&instance->fw_outstanding);
2817  
2818  		if (!outstanding)
2819  			break;
2820  
2821  		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2822  			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2823  			       "commands to complete\n",i,outstanding);
2824  			/*
2825  			 * Call cmd completion routine. Cmd to be
2826  			 * be completed directly without depending on isr.
2827  			 */
2828  			megasas_complete_cmd_dpc((unsigned long)instance);
2829  		}
2830  
2831  		msleep(1000);
2832  	}
2833  
2834  	i = 0;
2835  	outstanding = atomic_read(&instance->fw_outstanding);
2836  	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2837  
2838  	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2839  		goto no_outstanding;
2840  
2841  	if (instance->disableOnlineCtrlReset)
2842  		goto kill_hba_and_failed;
2843  	do {
2844  		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2845  			dev_info(&instance->pdev->dev,
2846  				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2847  				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2848  			if (i == 3)
2849  				goto kill_hba_and_failed;
2850  			megasas_do_ocr(instance);
2851  
2852  			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2853  				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2854  				__func__, __LINE__);
2855  				return FAILED;
2856  			}
2857  			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2858  				__func__, __LINE__);
2859  
2860  			for (sl = 0; sl < 10; sl++)
2861  				msleep(500);
2862  
2863  			outstanding = atomic_read(&instance->fw_outstanding);
2864  
2865  			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2866  			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2867  				goto no_outstanding;
2868  		}
2869  		i++;
2870  	} while (i <= 3);
2871  
2872  no_outstanding:
2873  
2874  	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2875  		__func__, __LINE__);
2876  	return SUCCESS;
2877  
2878  kill_hba_and_failed:
2879  
2880  	/* Reset not supported, kill adapter */
2881  	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2882  		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2883  		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2884  		atomic_read(&instance->fw_outstanding));
2885  	megasas_dump_pending_frames(instance);
2886  	megaraid_sas_kill_hba(instance);
2887  
2888  	return FAILED;
2889  }
2890  
2891  /**
2892   * megasas_generic_reset -	Generic reset routine
2893   * @scmd:			Mid-layer SCSI command
2894   *
2895   * This routine implements a generic reset handler for device, bus and host
2896   * reset requests. Device, bus and host specific reset handlers can use this
2897   * function after they do their specific tasks.
2898   */
megasas_generic_reset(struct scsi_cmnd * scmd)2899  static int megasas_generic_reset(struct scsi_cmnd *scmd)
2900  {
2901  	int ret_val;
2902  	struct megasas_instance *instance;
2903  
2904  	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2905  
2906  	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2907  		 scmd->cmnd[0], scmd->retries);
2908  
2909  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2910  		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2911  		return FAILED;
2912  	}
2913  
2914  	ret_val = megasas_wait_for_outstanding(instance);
2915  	if (ret_val == SUCCESS)
2916  		dev_notice(&instance->pdev->dev, "reset successful\n");
2917  	else
2918  		dev_err(&instance->pdev->dev, "failed to do reset\n");
2919  
2920  	return ret_val;
2921  }
2922  
2923  /**
2924   * megasas_reset_timer - quiesce the adapter if required
2925   * @scmd:		scsi cmnd
2926   *
2927   * Sets the FW busy flag and reduces the host->can_queue if the
2928   * cmd has not been completed within the timeout period.
2929   */
megasas_reset_timer(struct scsi_cmnd * scmd)2930  static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd)
2931  {
2932  	struct megasas_instance *instance;
2933  	unsigned long flags;
2934  
2935  	if (time_after(jiffies, scmd->jiffies_at_alloc +
2936  				(scmd_timeout * 2) * HZ)) {
2937  		return SCSI_EH_NOT_HANDLED;
2938  	}
2939  
2940  	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2941  	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2942  		/* FW is busy, throttle IO */
2943  		spin_lock_irqsave(instance->host->host_lock, flags);
2944  
2945  		instance->host->can_queue = instance->throttlequeuedepth;
2946  		instance->last_time = jiffies;
2947  		instance->flag |= MEGASAS_FW_BUSY;
2948  
2949  		spin_unlock_irqrestore(instance->host->host_lock, flags);
2950  	}
2951  	return SCSI_EH_RESET_TIMER;
2952  }
2953  
2954  /**
2955   * megasas_dump -	This function will print hexdump of provided buffer.
2956   * @buf:		Buffer to be dumped
2957   * @sz:		Size in bytes
2958   * @format:		Different formats of dumping e.g. format=n will
2959   *			cause only 'n' 32 bit words to be dumped in a single
2960   *			line.
2961   */
2962  inline void
megasas_dump(void * buf,int sz,int format)2963  megasas_dump(void *buf, int sz, int format)
2964  {
2965  	int i;
2966  	__le32 *buf_loc = (__le32 *)buf;
2967  
2968  	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2969  		if ((i % format) == 0) {
2970  			if (i != 0)
2971  				printk(KERN_CONT "\n");
2972  			printk(KERN_CONT "%08x: ", (i * 4));
2973  		}
2974  		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2975  	}
2976  	printk(KERN_CONT "\n");
2977  }
2978  
2979  /**
2980   * megasas_dump_reg_set -	This function will print hexdump of register set
2981   * @reg_set:	Register set to be dumped
2982   */
2983  inline void
megasas_dump_reg_set(void __iomem * reg_set)2984  megasas_dump_reg_set(void __iomem *reg_set)
2985  {
2986  	unsigned int i, sz = 256;
2987  	u32 __iomem *reg = (u32 __iomem *)reg_set;
2988  
2989  	for (i = 0; i < (sz / sizeof(u32)); i++)
2990  		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2991  }
2992  
2993  /**
2994   * megasas_dump_fusion_io -	This function will print key details
2995   *				of SCSI IO
2996   * @scmd:			SCSI command pointer of SCSI IO
2997   */
2998  void
megasas_dump_fusion_io(struct scsi_cmnd * scmd)2999  megasas_dump_fusion_io(struct scsi_cmnd *scmd)
3000  {
3001  	struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv;
3002  	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3003  	struct megasas_instance *instance;
3004  
3005  	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3006  
3007  	scmd_printk(KERN_INFO, scmd,
3008  		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
3009  		    scmd, scmd->retries, scmd->allowed);
3010  	scsi_print_command(scmd);
3011  
3012  	if (cmd) {
3013  		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
3014  		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
3015  		scmd_printk(KERN_INFO, scmd,
3016  			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
3017  			    req_desc->SCSIIO.RequestFlags,
3018  			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
3019  			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
3020  
3021  		printk(KERN_INFO "IO request frame:\n");
3022  		megasas_dump(cmd->io_request,
3023  			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
3024  		printk(KERN_INFO "Chain frame:\n");
3025  		megasas_dump(cmd->sg_frame,
3026  			     instance->max_chain_frame_sz, 8);
3027  	}
3028  
3029  }
3030  
3031  /*
3032   * megasas_dump_sys_regs - This function will dump system registers through
3033   *			    sysfs.
3034   * @reg_set:		    Pointer to System register set.
3035   * @buf:		    Buffer to which output is to be written.
3036   * @return:		    Number of bytes written to buffer.
3037   */
3038  static inline ssize_t
megasas_dump_sys_regs(void __iomem * reg_set,char * buf)3039  megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
3040  {
3041  	unsigned int i, sz = 256;
3042  	int bytes_wrote = 0;
3043  	char *loc = (char *)buf;
3044  	u32 __iomem *reg = (u32 __iomem *)reg_set;
3045  
3046  	for (i = 0; i < sz / sizeof(u32); i++) {
3047  		bytes_wrote += scnprintf(loc + bytes_wrote,
3048  					 PAGE_SIZE - bytes_wrote,
3049  					 "%08x: %08x\n", (i * 4),
3050  					 readl(&reg[i]));
3051  	}
3052  	return bytes_wrote;
3053  }
3054  
3055  /**
3056   * megasas_reset_bus_host -	Bus & host reset handler entry point
3057   * @scmd:			Mid-layer SCSI command
3058   */
megasas_reset_bus_host(struct scsi_cmnd * scmd)3059  static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3060  {
3061  	int ret;
3062  	struct megasas_instance *instance;
3063  
3064  	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3065  
3066  	scmd_printk(KERN_INFO, scmd,
3067  		"OCR is requested due to IO timeout!!\n");
3068  
3069  	scmd_printk(KERN_INFO, scmd,
3070  		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3071  		scmd->device->host->shost_state,
3072  		scsi_host_busy(scmd->device->host),
3073  		atomic_read(&instance->fw_outstanding));
3074  	/*
3075  	 * First wait for all commands to complete
3076  	 */
3077  	if (instance->adapter_type == MFI_SERIES) {
3078  		ret = megasas_generic_reset(scmd);
3079  	} else {
3080  		megasas_dump_fusion_io(scmd);
3081  		ret = megasas_reset_fusion(scmd->device->host,
3082  				SCSIIO_TIMEOUT_OCR);
3083  	}
3084  
3085  	return ret;
3086  }
3087  
3088  /**
3089   * megasas_task_abort - Issues task abort request to firmware
3090   *			(supported only for fusion adapters)
3091   * @scmd:		SCSI command pointer
3092   */
megasas_task_abort(struct scsi_cmnd * scmd)3093  static int megasas_task_abort(struct scsi_cmnd *scmd)
3094  {
3095  	int ret;
3096  	struct megasas_instance *instance;
3097  
3098  	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3099  
3100  	if (instance->adapter_type != MFI_SERIES)
3101  		ret = megasas_task_abort_fusion(scmd);
3102  	else {
3103  		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3104  		ret = FAILED;
3105  	}
3106  
3107  	return ret;
3108  }
3109  
3110  /**
3111   * megasas_reset_target:  Issues target reset request to firmware
3112   *                        (supported only for fusion adapters)
3113   * @scmd:                 SCSI command pointer
3114   */
megasas_reset_target(struct scsi_cmnd * scmd)3115  static int megasas_reset_target(struct scsi_cmnd *scmd)
3116  {
3117  	int ret;
3118  	struct megasas_instance *instance;
3119  
3120  	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3121  
3122  	if (instance->adapter_type != MFI_SERIES)
3123  		ret = megasas_reset_target_fusion(scmd);
3124  	else {
3125  		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3126  		ret = FAILED;
3127  	}
3128  
3129  	return ret;
3130  }
3131  
3132  /**
3133   * megasas_bios_param - Returns disk geometry for a disk
3134   * @sdev:		device handle
3135   * @bdev:		block device
3136   * @capacity:		drive capacity
3137   * @geom:		geometry parameters
3138   */
3139  static int
megasas_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])3140  megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3141  		 sector_t capacity, int geom[])
3142  {
3143  	int heads;
3144  	int sectors;
3145  	sector_t cylinders;
3146  	unsigned long tmp;
3147  
3148  	/* Default heads (64) & sectors (32) */
3149  	heads = 64;
3150  	sectors = 32;
3151  
3152  	tmp = heads * sectors;
3153  	cylinders = capacity;
3154  
3155  	sector_div(cylinders, tmp);
3156  
3157  	/*
3158  	 * Handle extended translation size for logical drives > 1Gb
3159  	 */
3160  
3161  	if (capacity >= 0x200000) {
3162  		heads = 255;
3163  		sectors = 63;
3164  		tmp = heads*sectors;
3165  		cylinders = capacity;
3166  		sector_div(cylinders, tmp);
3167  	}
3168  
3169  	geom[0] = heads;
3170  	geom[1] = sectors;
3171  	geom[2] = cylinders;
3172  
3173  	return 0;
3174  }
3175  
megasas_map_queues(struct Scsi_Host * shost)3176  static void megasas_map_queues(struct Scsi_Host *shost)
3177  {
3178  	struct megasas_instance *instance;
3179  	int qoff = 0, offset;
3180  	struct blk_mq_queue_map *map;
3181  
3182  	instance = (struct megasas_instance *)shost->hostdata;
3183  
3184  	if (shost->nr_hw_queues == 1)
3185  		return;
3186  
3187  	offset = instance->low_latency_index_start;
3188  
3189  	/* Setup Default hctx */
3190  	map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
3191  	map->nr_queues = instance->msix_vectors - offset;
3192  	map->queue_offset = 0;
3193  	blk_mq_pci_map_queues(map, instance->pdev, offset);
3194  	qoff += map->nr_queues;
3195  	offset += map->nr_queues;
3196  
3197  	/* we never use READ queue, so can't cheat blk-mq */
3198  	shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
3199  
3200  	/* Setup Poll hctx */
3201  	map = &shost->tag_set.map[HCTX_TYPE_POLL];
3202  	map->nr_queues = instance->iopoll_q_count;
3203  	if (map->nr_queues) {
3204  		/*
3205  		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
3206  		 * affinity), so use the regular blk-mq cpu mapping
3207  		 */
3208  		map->queue_offset = qoff;
3209  		blk_mq_map_queues(map);
3210  	}
3211  }
3212  
3213  static void megasas_aen_polling(struct work_struct *work);
3214  
3215  /**
3216   * megasas_service_aen -	Processes an event notification
3217   * @instance:			Adapter soft state
3218   * @cmd:			AEN command completed by the ISR
3219   *
3220   * For AEN, driver sends a command down to FW that is held by the FW till an
3221   * event occurs. When an event of interest occurs, FW completes the command
3222   * that it was previously holding.
3223   *
3224   * This routines sends SIGIO signal to processes that have registered with the
3225   * driver for AEN.
3226   */
3227  static void
megasas_service_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)3228  megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3229  {
3230  	unsigned long flags;
3231  
3232  	/*
3233  	 * Don't signal app if it is just an aborted previously registered aen
3234  	 */
3235  	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3236  		spin_lock_irqsave(&poll_aen_lock, flags);
3237  		megasas_poll_wait_aen = 1;
3238  		spin_unlock_irqrestore(&poll_aen_lock, flags);
3239  		wake_up(&megasas_poll_wait);
3240  		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3241  	}
3242  	else
3243  		cmd->abort_aen = 0;
3244  
3245  	instance->aen_cmd = NULL;
3246  
3247  	megasas_return_cmd(instance, cmd);
3248  
3249  	if ((instance->unload == 0) &&
3250  		((instance->issuepend_done == 1))) {
3251  		struct megasas_aen_event *ev;
3252  
3253  		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3254  		if (!ev) {
3255  			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3256  		} else {
3257  			ev->instance = instance;
3258  			instance->ev = ev;
3259  			INIT_DELAYED_WORK(&ev->hotplug_work,
3260  					  megasas_aen_polling);
3261  			schedule_delayed_work(&ev->hotplug_work, 0);
3262  		}
3263  	}
3264  }
3265  
3266  static ssize_t
fw_crash_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3267  fw_crash_buffer_store(struct device *cdev,
3268  	struct device_attribute *attr, const char *buf, size_t count)
3269  {
3270  	struct Scsi_Host *shost = class_to_shost(cdev);
3271  	struct megasas_instance *instance =
3272  		(struct megasas_instance *) shost->hostdata;
3273  	int val = 0;
3274  
3275  	if (kstrtoint(buf, 0, &val) != 0)
3276  		return -EINVAL;
3277  
3278  	mutex_lock(&instance->crashdump_lock);
3279  	instance->fw_crash_buffer_offset = val;
3280  	mutex_unlock(&instance->crashdump_lock);
3281  	return strlen(buf);
3282  }
3283  
3284  static ssize_t
fw_crash_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3285  fw_crash_buffer_show(struct device *cdev,
3286  	struct device_attribute *attr, char *buf)
3287  {
3288  	struct Scsi_Host *shost = class_to_shost(cdev);
3289  	struct megasas_instance *instance =
3290  		(struct megasas_instance *) shost->hostdata;
3291  	u32 size;
3292  	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3293  	unsigned long chunk_left_bytes;
3294  	unsigned long src_addr;
3295  	u32 buff_offset;
3296  
3297  	mutex_lock(&instance->crashdump_lock);
3298  	buff_offset = instance->fw_crash_buffer_offset;
3299  	if (!instance->crash_dump_buf ||
3300  		!((instance->fw_crash_state == AVAILABLE) ||
3301  		(instance->fw_crash_state == COPYING))) {
3302  		dev_err(&instance->pdev->dev,
3303  			"Firmware crash dump is not available\n");
3304  		mutex_unlock(&instance->crashdump_lock);
3305  		return -EINVAL;
3306  	}
3307  
3308  	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3309  		dev_err(&instance->pdev->dev,
3310  			"Firmware crash dump offset is out of range\n");
3311  		mutex_unlock(&instance->crashdump_lock);
3312  		return 0;
3313  	}
3314  
3315  	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3316  	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3317  	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3318  	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3319  
3320  	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3321  		(buff_offset % dmachunk);
3322  	memcpy(buf, (void *)src_addr, size);
3323  	mutex_unlock(&instance->crashdump_lock);
3324  
3325  	return size;
3326  }
3327  
3328  static ssize_t
fw_crash_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3329  fw_crash_buffer_size_show(struct device *cdev,
3330  	struct device_attribute *attr, char *buf)
3331  {
3332  	struct Scsi_Host *shost = class_to_shost(cdev);
3333  	struct megasas_instance *instance =
3334  		(struct megasas_instance *) shost->hostdata;
3335  
3336  	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3337  		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3338  }
3339  
3340  static ssize_t
fw_crash_state_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3341  fw_crash_state_store(struct device *cdev,
3342  	struct device_attribute *attr, const char *buf, size_t count)
3343  {
3344  	struct Scsi_Host *shost = class_to_shost(cdev);
3345  	struct megasas_instance *instance =
3346  		(struct megasas_instance *) shost->hostdata;
3347  	int val = 0;
3348  
3349  	if (kstrtoint(buf, 0, &val) != 0)
3350  		return -EINVAL;
3351  
3352  	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3353  		dev_err(&instance->pdev->dev, "application updates invalid "
3354  			"firmware crash state\n");
3355  		return -EINVAL;
3356  	}
3357  
3358  	instance->fw_crash_state = val;
3359  
3360  	if ((val == COPIED) || (val == COPY_ERROR)) {
3361  		mutex_lock(&instance->crashdump_lock);
3362  		megasas_free_host_crash_buffer(instance);
3363  		mutex_unlock(&instance->crashdump_lock);
3364  		if (val == COPY_ERROR)
3365  			dev_info(&instance->pdev->dev, "application failed to "
3366  				"copy Firmware crash dump\n");
3367  		else
3368  			dev_info(&instance->pdev->dev, "Firmware crash dump "
3369  				"copied successfully\n");
3370  	}
3371  	return strlen(buf);
3372  }
3373  
3374  static ssize_t
fw_crash_state_show(struct device * cdev,struct device_attribute * attr,char * buf)3375  fw_crash_state_show(struct device *cdev,
3376  	struct device_attribute *attr, char *buf)
3377  {
3378  	struct Scsi_Host *shost = class_to_shost(cdev);
3379  	struct megasas_instance *instance =
3380  		(struct megasas_instance *) shost->hostdata;
3381  
3382  	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3383  }
3384  
3385  static ssize_t
page_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3386  page_size_show(struct device *cdev,
3387  	struct device_attribute *attr, char *buf)
3388  {
3389  	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3390  }
3391  
3392  static ssize_t
ldio_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3393  ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3394  	char *buf)
3395  {
3396  	struct Scsi_Host *shost = class_to_shost(cdev);
3397  	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3398  
3399  	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3400  }
3401  
3402  static ssize_t
fw_cmds_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3403  fw_cmds_outstanding_show(struct device *cdev,
3404  				 struct device_attribute *attr, char *buf)
3405  {
3406  	struct Scsi_Host *shost = class_to_shost(cdev);
3407  	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3408  
3409  	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3410  }
3411  
3412  static ssize_t
enable_sdev_max_qd_show(struct device * cdev,struct device_attribute * attr,char * buf)3413  enable_sdev_max_qd_show(struct device *cdev,
3414  	struct device_attribute *attr, char *buf)
3415  {
3416  	struct Scsi_Host *shost = class_to_shost(cdev);
3417  	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3418  
3419  	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3420  }
3421  
3422  static ssize_t
enable_sdev_max_qd_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3423  enable_sdev_max_qd_store(struct device *cdev,
3424  	struct device_attribute *attr, const char *buf, size_t count)
3425  {
3426  	struct Scsi_Host *shost = class_to_shost(cdev);
3427  	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3428  	u32 val = 0;
3429  	bool is_target_prop;
3430  	int ret_target_prop = DCMD_FAILED;
3431  	struct scsi_device *sdev;
3432  
3433  	if (kstrtou32(buf, 0, &val) != 0) {
3434  		pr_err("megasas: could not set enable_sdev_max_qd\n");
3435  		return -EINVAL;
3436  	}
3437  
3438  	mutex_lock(&instance->reset_mutex);
3439  	if (val)
3440  		instance->enable_sdev_max_qd = true;
3441  	else
3442  		instance->enable_sdev_max_qd = false;
3443  
3444  	shost_for_each_device(sdev, shost) {
3445  		ret_target_prop = megasas_get_target_prop(instance, sdev);
3446  		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3447  		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3448  	}
3449  	mutex_unlock(&instance->reset_mutex);
3450  
3451  	return strlen(buf);
3452  }
3453  
3454  static ssize_t
dump_system_regs_show(struct device * cdev,struct device_attribute * attr,char * buf)3455  dump_system_regs_show(struct device *cdev,
3456  			       struct device_attribute *attr, char *buf)
3457  {
3458  	struct Scsi_Host *shost = class_to_shost(cdev);
3459  	struct megasas_instance *instance =
3460  			(struct megasas_instance *)shost->hostdata;
3461  
3462  	return megasas_dump_sys_regs(instance->reg_set, buf);
3463  }
3464  
3465  static ssize_t
raid_map_id_show(struct device * cdev,struct device_attribute * attr,char * buf)3466  raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3467  			  char *buf)
3468  {
3469  	struct Scsi_Host *shost = class_to_shost(cdev);
3470  	struct megasas_instance *instance =
3471  			(struct megasas_instance *)shost->hostdata;
3472  
3473  	return snprintf(buf, PAGE_SIZE, "%ld\n",
3474  			(unsigned long)instance->map_id);
3475  }
3476  
3477  static DEVICE_ATTR_RW(fw_crash_buffer);
3478  static DEVICE_ATTR_RO(fw_crash_buffer_size);
3479  static DEVICE_ATTR_RW(fw_crash_state);
3480  static DEVICE_ATTR_RO(page_size);
3481  static DEVICE_ATTR_RO(ldio_outstanding);
3482  static DEVICE_ATTR_RO(fw_cmds_outstanding);
3483  static DEVICE_ATTR_RW(enable_sdev_max_qd);
3484  static DEVICE_ATTR_RO(dump_system_regs);
3485  static DEVICE_ATTR_RO(raid_map_id);
3486  
3487  static struct attribute *megaraid_host_attrs[] = {
3488  	&dev_attr_fw_crash_buffer_size.attr,
3489  	&dev_attr_fw_crash_buffer.attr,
3490  	&dev_attr_fw_crash_state.attr,
3491  	&dev_attr_page_size.attr,
3492  	&dev_attr_ldio_outstanding.attr,
3493  	&dev_attr_fw_cmds_outstanding.attr,
3494  	&dev_attr_enable_sdev_max_qd.attr,
3495  	&dev_attr_dump_system_regs.attr,
3496  	&dev_attr_raid_map_id.attr,
3497  	NULL,
3498  };
3499  
3500  ATTRIBUTE_GROUPS(megaraid_host);
3501  
3502  /*
3503   * Scsi host template for megaraid_sas driver
3504   */
3505  static const struct scsi_host_template megasas_template = {
3506  
3507  	.module = THIS_MODULE,
3508  	.name = "Avago SAS based MegaRAID driver",
3509  	.proc_name = "megaraid_sas",
3510  	.slave_configure = megasas_slave_configure,
3511  	.slave_alloc = megasas_slave_alloc,
3512  	.slave_destroy = megasas_slave_destroy,
3513  	.queuecommand = megasas_queue_command,
3514  	.eh_target_reset_handler = megasas_reset_target,
3515  	.eh_abort_handler = megasas_task_abort,
3516  	.eh_host_reset_handler = megasas_reset_bus_host,
3517  	.eh_timed_out = megasas_reset_timer,
3518  	.shost_groups = megaraid_host_groups,
3519  	.bios_param = megasas_bios_param,
3520  	.map_queues = megasas_map_queues,
3521  	.mq_poll = megasas_blk_mq_poll,
3522  	.change_queue_depth = scsi_change_queue_depth,
3523  	.max_segment_size = 0xffffffff,
3524  	.cmd_size = sizeof(struct megasas_cmd_priv),
3525  };
3526  
3527  /**
3528   * megasas_complete_int_cmd -	Completes an internal command
3529   * @instance:			Adapter soft state
3530   * @cmd:			Command to be completed
3531   *
3532   * The megasas_issue_blocked_cmd() function waits for a command to complete
3533   * after it issues a command. This function wakes up that waiting routine by
3534   * calling wake_up() on the wait queue.
3535   */
3536  static void
megasas_complete_int_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)3537  megasas_complete_int_cmd(struct megasas_instance *instance,
3538  			 struct megasas_cmd *cmd)
3539  {
3540  	if (cmd->cmd_status_drv == DCMD_INIT)
3541  		cmd->cmd_status_drv =
3542  		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3543  		DCMD_SUCCESS : DCMD_FAILED;
3544  
3545  	wake_up(&instance->int_cmd_wait_q);
3546  }
3547  
3548  /**
3549   * megasas_complete_abort -	Completes aborting a command
3550   * @instance:			Adapter soft state
3551   * @cmd:			Cmd that was issued to abort another cmd
3552   *
3553   * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3554   * after it issues an abort on a previously issued command. This function
3555   * wakes up all functions waiting on the same wait queue.
3556   */
3557  static void
megasas_complete_abort(struct megasas_instance * instance,struct megasas_cmd * cmd)3558  megasas_complete_abort(struct megasas_instance *instance,
3559  		       struct megasas_cmd *cmd)
3560  {
3561  	if (cmd->sync_cmd) {
3562  		cmd->sync_cmd = 0;
3563  		cmd->cmd_status_drv = DCMD_SUCCESS;
3564  		wake_up(&instance->abort_cmd_wait_q);
3565  	}
3566  }
3567  
3568  static void
megasas_set_ld_removed_by_fw(struct megasas_instance * instance)3569  megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
3570  {
3571  	uint i;
3572  
3573  	for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
3574  		if (instance->ld_ids_prev[i] != 0xff &&
3575  		    instance->ld_ids_from_raidmap[i] == 0xff) {
3576  			if (megasas_dbg_lvl & LD_PD_DEBUG)
3577  				dev_info(&instance->pdev->dev,
3578  					 "LD target ID %d removed from RAID map\n", i);
3579  			instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
3580  		}
3581  	}
3582  }
3583  
3584  /**
3585   * megasas_complete_cmd -	Completes a command
3586   * @instance:			Adapter soft state
3587   * @cmd:			Command to be completed
3588   * @alt_status:			If non-zero, use this value as status to
3589   *				SCSI mid-layer instead of the value returned
3590   *				by the FW. This should be used if caller wants
3591   *				an alternate status (as in the case of aborted
3592   *				commands)
3593   */
3594  void
megasas_complete_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,u8 alt_status)3595  megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3596  		     u8 alt_status)
3597  {
3598  	int exception = 0;
3599  	struct megasas_header *hdr = &cmd->frame->hdr;
3600  	unsigned long flags;
3601  	struct fusion_context *fusion = instance->ctrl_context;
3602  	u32 opcode, status;
3603  
3604  	/* flag for the retry reset */
3605  	cmd->retry_for_fw_reset = 0;
3606  
3607  	if (cmd->scmd)
3608  		megasas_priv(cmd->scmd)->cmd_priv = NULL;
3609  
3610  	switch (hdr->cmd) {
3611  	case MFI_CMD_INVALID:
3612  		/* Some older 1068 controller FW may keep a pended
3613  		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3614  		   when booting the kdump kernel.  Ignore this command to
3615  		   prevent a kernel panic on shutdown of the kdump kernel. */
3616  		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3617  		       "completed\n");
3618  		dev_warn(&instance->pdev->dev, "If you have a controller "
3619  		       "other than PERC5, please upgrade your firmware\n");
3620  		break;
3621  	case MFI_CMD_PD_SCSI_IO:
3622  	case MFI_CMD_LD_SCSI_IO:
3623  
3624  		/*
3625  		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3626  		 * issued either through an IO path or an IOCTL path. If it
3627  		 * was via IOCTL, we will send it to internal completion.
3628  		 */
3629  		if (cmd->sync_cmd) {
3630  			cmd->sync_cmd = 0;
3631  			megasas_complete_int_cmd(instance, cmd);
3632  			break;
3633  		}
3634  		fallthrough;
3635  
3636  	case MFI_CMD_LD_READ:
3637  	case MFI_CMD_LD_WRITE:
3638  
3639  		if (alt_status) {
3640  			cmd->scmd->result = alt_status << 16;
3641  			exception = 1;
3642  		}
3643  
3644  		if (exception) {
3645  
3646  			atomic_dec(&instance->fw_outstanding);
3647  
3648  			scsi_dma_unmap(cmd->scmd);
3649  			scsi_done(cmd->scmd);
3650  			megasas_return_cmd(instance, cmd);
3651  
3652  			break;
3653  		}
3654  
3655  		switch (hdr->cmd_status) {
3656  
3657  		case MFI_STAT_OK:
3658  			cmd->scmd->result = DID_OK << 16;
3659  			break;
3660  
3661  		case MFI_STAT_SCSI_IO_FAILED:
3662  		case MFI_STAT_LD_INIT_IN_PROGRESS:
3663  			cmd->scmd->result =
3664  			    (DID_ERROR << 16) | hdr->scsi_status;
3665  			break;
3666  
3667  		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3668  
3669  			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3670  
3671  			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3672  				memset(cmd->scmd->sense_buffer, 0,
3673  				       SCSI_SENSE_BUFFERSIZE);
3674  				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3675  				       hdr->sense_len);
3676  			}
3677  
3678  			break;
3679  
3680  		case MFI_STAT_LD_OFFLINE:
3681  		case MFI_STAT_DEVICE_NOT_FOUND:
3682  			cmd->scmd->result = DID_BAD_TARGET << 16;
3683  			break;
3684  
3685  		default:
3686  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3687  			       hdr->cmd_status);
3688  			cmd->scmd->result = DID_ERROR << 16;
3689  			break;
3690  		}
3691  
3692  		atomic_dec(&instance->fw_outstanding);
3693  
3694  		scsi_dma_unmap(cmd->scmd);
3695  		scsi_done(cmd->scmd);
3696  		megasas_return_cmd(instance, cmd);
3697  
3698  		break;
3699  
3700  	case MFI_CMD_SMP:
3701  	case MFI_CMD_STP:
3702  	case MFI_CMD_NVME:
3703  	case MFI_CMD_TOOLBOX:
3704  		megasas_complete_int_cmd(instance, cmd);
3705  		break;
3706  
3707  	case MFI_CMD_DCMD:
3708  		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3709  		/* Check for LD map update */
3710  		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3711  			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3712  			fusion->fast_path_io = 0;
3713  			spin_lock_irqsave(instance->host->host_lock, flags);
3714  			status = cmd->frame->hdr.cmd_status;
3715  			instance->map_update_cmd = NULL;
3716  			if (status != MFI_STAT_OK) {
3717  				if (status != MFI_STAT_NOT_FOUND)
3718  					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3719  					       cmd->frame->hdr.cmd_status);
3720  				else {
3721  					megasas_return_cmd(instance, cmd);
3722  					spin_unlock_irqrestore(
3723  						instance->host->host_lock,
3724  						flags);
3725  					break;
3726  				}
3727  			}
3728  
3729  			megasas_return_cmd(instance, cmd);
3730  
3731  			/*
3732  			 * Set fast path IO to ZERO.
3733  			 * Validate Map will set proper value.
3734  			 * Meanwhile all IOs will go as LD IO.
3735  			 */
3736  			if (status == MFI_STAT_OK &&
3737  			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3738  				instance->map_id++;
3739  				fusion->fast_path_io = 1;
3740  			} else {
3741  				fusion->fast_path_io = 0;
3742  			}
3743  
3744  			if (instance->adapter_type >= INVADER_SERIES)
3745  				megasas_set_ld_removed_by_fw(instance);
3746  
3747  			megasas_sync_map_info(instance);
3748  			spin_unlock_irqrestore(instance->host->host_lock,
3749  					       flags);
3750  
3751  			break;
3752  		}
3753  		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3754  		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3755  			spin_lock_irqsave(&poll_aen_lock, flags);
3756  			megasas_poll_wait_aen = 0;
3757  			spin_unlock_irqrestore(&poll_aen_lock, flags);
3758  		}
3759  
3760  		/* FW has an updated PD sequence */
3761  		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3762  			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3763  
3764  			spin_lock_irqsave(instance->host->host_lock, flags);
3765  			status = cmd->frame->hdr.cmd_status;
3766  			instance->jbod_seq_cmd = NULL;
3767  			megasas_return_cmd(instance, cmd);
3768  
3769  			if (status == MFI_STAT_OK) {
3770  				instance->pd_seq_map_id++;
3771  				/* Re-register a pd sync seq num cmd */
3772  				if (megasas_sync_pd_seq_num(instance, true))
3773  					instance->use_seqnum_jbod_fp = false;
3774  			} else
3775  				instance->use_seqnum_jbod_fp = false;
3776  
3777  			spin_unlock_irqrestore(instance->host->host_lock, flags);
3778  			break;
3779  		}
3780  
3781  		/*
3782  		 * See if got an event notification
3783  		 */
3784  		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3785  			megasas_service_aen(instance, cmd);
3786  		else
3787  			megasas_complete_int_cmd(instance, cmd);
3788  
3789  		break;
3790  
3791  	case MFI_CMD_ABORT:
3792  		/*
3793  		 * Cmd issued to abort another cmd returned
3794  		 */
3795  		megasas_complete_abort(instance, cmd);
3796  		break;
3797  
3798  	default:
3799  		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3800  		       hdr->cmd);
3801  		megasas_complete_int_cmd(instance, cmd);
3802  		break;
3803  	}
3804  }
3805  
3806  /**
3807   * megasas_issue_pending_cmds_again -	issue all pending cmds
3808   *					in FW again because of the fw reset
3809   * @instance:				Adapter soft state
3810   */
3811  static inline void
megasas_issue_pending_cmds_again(struct megasas_instance * instance)3812  megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3813  {
3814  	struct megasas_cmd *cmd;
3815  	struct list_head clist_local;
3816  	union megasas_evt_class_locale class_locale;
3817  	unsigned long flags;
3818  	u32 seq_num;
3819  
3820  	INIT_LIST_HEAD(&clist_local);
3821  	spin_lock_irqsave(&instance->hba_lock, flags);
3822  	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3823  	spin_unlock_irqrestore(&instance->hba_lock, flags);
3824  
3825  	while (!list_empty(&clist_local)) {
3826  		cmd = list_entry((&clist_local)->next,
3827  					struct megasas_cmd, list);
3828  		list_del_init(&cmd->list);
3829  
3830  		if (cmd->sync_cmd || cmd->scmd) {
3831  			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3832  				"detected to be pending while HBA reset\n",
3833  					cmd, cmd->scmd, cmd->sync_cmd);
3834  
3835  			cmd->retry_for_fw_reset++;
3836  
3837  			if (cmd->retry_for_fw_reset == 3) {
3838  				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3839  					"was tried multiple times during reset."
3840  					"Shutting down the HBA\n",
3841  					cmd, cmd->scmd, cmd->sync_cmd);
3842  				instance->instancet->disable_intr(instance);
3843  				atomic_set(&instance->fw_reset_no_pci_access, 1);
3844  				megaraid_sas_kill_hba(instance);
3845  				return;
3846  			}
3847  		}
3848  
3849  		if (cmd->sync_cmd == 1) {
3850  			if (cmd->scmd) {
3851  				dev_notice(&instance->pdev->dev, "unexpected"
3852  					"cmd attached to internal command!\n");
3853  			}
3854  			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3855  						"on the internal reset queue,"
3856  						"issue it again.\n", cmd);
3857  			cmd->cmd_status_drv = DCMD_INIT;
3858  			instance->instancet->fire_cmd(instance,
3859  							cmd->frame_phys_addr,
3860  							0, instance->reg_set);
3861  		} else if (cmd->scmd) {
3862  			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3863  			"detected on the internal queue, issue again.\n",
3864  			cmd, cmd->scmd->cmnd[0]);
3865  
3866  			atomic_inc(&instance->fw_outstanding);
3867  			instance->instancet->fire_cmd(instance,
3868  					cmd->frame_phys_addr,
3869  					cmd->frame_count-1, instance->reg_set);
3870  		} else {
3871  			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3872  				"internal reset defer list while re-issue!!\n",
3873  				cmd);
3874  		}
3875  	}
3876  
3877  	if (instance->aen_cmd) {
3878  		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3879  		megasas_return_cmd(instance, instance->aen_cmd);
3880  
3881  		instance->aen_cmd = NULL;
3882  	}
3883  
3884  	/*
3885  	 * Initiate AEN (Asynchronous Event Notification)
3886  	 */
3887  	seq_num = instance->last_seq_num;
3888  	class_locale.members.reserved = 0;
3889  	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3890  	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3891  
3892  	megasas_register_aen(instance, seq_num, class_locale.word);
3893  }
3894  
3895  /*
3896   * Move the internal reset pending commands to a deferred queue.
3897   *
3898   * We move the commands pending at internal reset time to a
3899   * pending queue. This queue would be flushed after successful
3900   * completion of the internal reset sequence. if the internal reset
3901   * did not complete in time, the kernel reset handler would flush
3902   * these commands.
3903   */
3904  static void
megasas_internal_reset_defer_cmds(struct megasas_instance * instance)3905  megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3906  {
3907  	struct megasas_cmd *cmd;
3908  	int i;
3909  	u16 max_cmd = instance->max_fw_cmds;
3910  	u32 defer_index;
3911  	unsigned long flags;
3912  
3913  	defer_index = 0;
3914  	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3915  	for (i = 0; i < max_cmd; i++) {
3916  		cmd = instance->cmd_list[i];
3917  		if (cmd->sync_cmd == 1 || cmd->scmd) {
3918  			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3919  					"on the defer queue as internal\n",
3920  				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3921  
3922  			if (!list_empty(&cmd->list)) {
3923  				dev_notice(&instance->pdev->dev, "ERROR while"
3924  					" moving this cmd:%p, %d %p, it was"
3925  					"discovered on some list?\n",
3926  					cmd, cmd->sync_cmd, cmd->scmd);
3927  
3928  				list_del_init(&cmd->list);
3929  			}
3930  			defer_index++;
3931  			list_add_tail(&cmd->list,
3932  				&instance->internal_reset_pending_q);
3933  		}
3934  	}
3935  	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3936  }
3937  
3938  
3939  static void
process_fw_state_change_wq(struct work_struct * work)3940  process_fw_state_change_wq(struct work_struct *work)
3941  {
3942  	struct megasas_instance *instance =
3943  		container_of(work, struct megasas_instance, work_init);
3944  	u32 wait;
3945  	unsigned long flags;
3946  
3947  	if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3948  		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3949  			   atomic_read(&instance->adprecovery));
3950  		return ;
3951  	}
3952  
3953  	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3954  		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3955  					"state, restarting it...\n");
3956  
3957  		instance->instancet->disable_intr(instance);
3958  		atomic_set(&instance->fw_outstanding, 0);
3959  
3960  		atomic_set(&instance->fw_reset_no_pci_access, 1);
3961  		instance->instancet->adp_reset(instance, instance->reg_set);
3962  		atomic_set(&instance->fw_reset_no_pci_access, 0);
3963  
3964  		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3965  					"initiating next stage...\n");
3966  
3967  		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3968  					"state 2 starting...\n");
3969  
3970  		/* waiting for about 20 second before start the second init */
3971  		for (wait = 0; wait < 30; wait++) {
3972  			msleep(1000);
3973  		}
3974  
3975  		if (megasas_transition_to_ready(instance, 1)) {
3976  			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3977  
3978  			atomic_set(&instance->fw_reset_no_pci_access, 1);
3979  			megaraid_sas_kill_hba(instance);
3980  			return ;
3981  		}
3982  
3983  		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3984  			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3985  			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3986  			) {
3987  			*instance->consumer = *instance->producer;
3988  		} else {
3989  			*instance->consumer = 0;
3990  			*instance->producer = 0;
3991  		}
3992  
3993  		megasas_issue_init_mfi(instance);
3994  
3995  		spin_lock_irqsave(&instance->hba_lock, flags);
3996  		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3997  		spin_unlock_irqrestore(&instance->hba_lock, flags);
3998  		instance->instancet->enable_intr(instance);
3999  
4000  		megasas_issue_pending_cmds_again(instance);
4001  		instance->issuepend_done = 1;
4002  	}
4003  }
4004  
4005  /**
4006   * megasas_deplete_reply_queue -	Processes all completed commands
4007   * @instance:				Adapter soft state
4008   * @alt_status:				Alternate status to be returned to
4009   *					SCSI mid-layer instead of the status
4010   *					returned by the FW
4011   * Note: this must be called with hba lock held
4012   */
4013  static int
megasas_deplete_reply_queue(struct megasas_instance * instance,u8 alt_status)4014  megasas_deplete_reply_queue(struct megasas_instance *instance,
4015  					u8 alt_status)
4016  {
4017  	u32 mfiStatus;
4018  	u32 fw_state;
4019  
4020  	if (instance->instancet->check_reset(instance, instance->reg_set) == 1)
4021  		return IRQ_HANDLED;
4022  
4023  	mfiStatus = instance->instancet->clear_intr(instance);
4024  	if (mfiStatus == 0) {
4025  		/* Hardware may not set outbound_intr_status in MSI-X mode */
4026  		if (!instance->msix_vectors)
4027  			return IRQ_NONE;
4028  	}
4029  
4030  	instance->mfiStatus = mfiStatus;
4031  
4032  	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
4033  		fw_state = instance->instancet->read_fw_status_reg(
4034  				instance) & MFI_STATE_MASK;
4035  
4036  		if (fw_state != MFI_STATE_FAULT) {
4037  			dev_notice(&instance->pdev->dev, "fw state:%x\n",
4038  						fw_state);
4039  		}
4040  
4041  		if ((fw_state == MFI_STATE_FAULT) &&
4042  				(instance->disableOnlineCtrlReset == 0)) {
4043  			dev_notice(&instance->pdev->dev, "wait adp restart\n");
4044  
4045  			if ((instance->pdev->device ==
4046  					PCI_DEVICE_ID_LSI_SAS1064R) ||
4047  				(instance->pdev->device ==
4048  					PCI_DEVICE_ID_DELL_PERC5) ||
4049  				(instance->pdev->device ==
4050  					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
4051  
4052  				*instance->consumer =
4053  					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
4054  			}
4055  
4056  
4057  			instance->instancet->disable_intr(instance);
4058  			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4059  			instance->issuepend_done = 0;
4060  
4061  			atomic_set(&instance->fw_outstanding, 0);
4062  			megasas_internal_reset_defer_cmds(instance);
4063  
4064  			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
4065  					fw_state, atomic_read(&instance->adprecovery));
4066  
4067  			schedule_work(&instance->work_init);
4068  			return IRQ_HANDLED;
4069  
4070  		} else {
4071  			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
4072  				fw_state, instance->disableOnlineCtrlReset);
4073  		}
4074  	}
4075  
4076  	tasklet_schedule(&instance->isr_tasklet);
4077  	return IRQ_HANDLED;
4078  }
4079  
4080  /**
4081   * megasas_isr - isr entry point
4082   * @irq:	IRQ number
4083   * @devp:	IRQ context address
4084   */
megasas_isr(int irq,void * devp)4085  static irqreturn_t megasas_isr(int irq, void *devp)
4086  {
4087  	struct megasas_irq_context *irq_context = devp;
4088  	struct megasas_instance *instance = irq_context->instance;
4089  	unsigned long flags;
4090  	irqreturn_t rc;
4091  
4092  	if (atomic_read(&instance->fw_reset_no_pci_access))
4093  		return IRQ_HANDLED;
4094  
4095  	spin_lock_irqsave(&instance->hba_lock, flags);
4096  	rc = megasas_deplete_reply_queue(instance, DID_OK);
4097  	spin_unlock_irqrestore(&instance->hba_lock, flags);
4098  
4099  	return rc;
4100  }
4101  
4102  /**
4103   * megasas_transition_to_ready -	Move the FW to READY state
4104   * @instance:				Adapter soft state
4105   * @ocr:				Adapter reset state
4106   *
4107   * During the initialization, FW passes can potentially be in any one of
4108   * several possible states. If the FW in operational, waiting-for-handshake
4109   * states, driver must take steps to bring it to ready state. Otherwise, it
4110   * has to wait for the ready state.
4111   */
4112  int
megasas_transition_to_ready(struct megasas_instance * instance,int ocr)4113  megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
4114  {
4115  	int i;
4116  	u8 max_wait;
4117  	u32 fw_state;
4118  	u32 abs_state, curr_abs_state;
4119  
4120  	abs_state = instance->instancet->read_fw_status_reg(instance);
4121  	fw_state = abs_state & MFI_STATE_MASK;
4122  
4123  	if (fw_state != MFI_STATE_READY)
4124  		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4125  		       " state\n");
4126  
4127  	while (fw_state != MFI_STATE_READY) {
4128  
4129  		switch (fw_state) {
4130  
4131  		case MFI_STATE_FAULT:
4132  			dev_printk(KERN_ERR, &instance->pdev->dev,
4133  				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4134  				   abs_state & MFI_STATE_FAULT_CODE,
4135  				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4136  			if (ocr) {
4137  				max_wait = MEGASAS_RESET_WAIT_TIME;
4138  				break;
4139  			} else {
4140  				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4141  				megasas_dump_reg_set(instance->reg_set);
4142  				return -ENODEV;
4143  			}
4144  
4145  		case MFI_STATE_WAIT_HANDSHAKE:
4146  			/*
4147  			 * Set the CLR bit in inbound doorbell
4148  			 */
4149  			if ((instance->pdev->device ==
4150  				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4151  				(instance->pdev->device ==
4152  				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4153  				(instance->adapter_type != MFI_SERIES))
4154  				writel(
4155  				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4156  				  &instance->reg_set->doorbell);
4157  			else
4158  				writel(
4159  				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4160  					&instance->reg_set->inbound_doorbell);
4161  
4162  			max_wait = MEGASAS_RESET_WAIT_TIME;
4163  			break;
4164  
4165  		case MFI_STATE_BOOT_MESSAGE_PENDING:
4166  			if ((instance->pdev->device ==
4167  			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4168  				(instance->pdev->device ==
4169  				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4170  				(instance->adapter_type != MFI_SERIES))
4171  				writel(MFI_INIT_HOTPLUG,
4172  				       &instance->reg_set->doorbell);
4173  			else
4174  				writel(MFI_INIT_HOTPLUG,
4175  					&instance->reg_set->inbound_doorbell);
4176  
4177  			max_wait = MEGASAS_RESET_WAIT_TIME;
4178  			break;
4179  
4180  		case MFI_STATE_OPERATIONAL:
4181  			/*
4182  			 * Bring it to READY state; assuming max wait 10 secs
4183  			 */
4184  			instance->instancet->disable_intr(instance);
4185  			if ((instance->pdev->device ==
4186  				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4187  				(instance->pdev->device ==
4188  				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4189  				(instance->adapter_type != MFI_SERIES)) {
4190  				writel(MFI_RESET_FLAGS,
4191  					&instance->reg_set->doorbell);
4192  
4193  				if (instance->adapter_type != MFI_SERIES) {
4194  					for (i = 0; i < (10 * 1000); i += 20) {
4195  						if (megasas_readl(
4196  							    instance,
4197  							    &instance->
4198  							    reg_set->
4199  							    doorbell) & 1)
4200  							msleep(20);
4201  						else
4202  							break;
4203  					}
4204  				}
4205  			} else
4206  				writel(MFI_RESET_FLAGS,
4207  					&instance->reg_set->inbound_doorbell);
4208  
4209  			max_wait = MEGASAS_RESET_WAIT_TIME;
4210  			break;
4211  
4212  		case MFI_STATE_UNDEFINED:
4213  			/*
4214  			 * This state should not last for more than 2 seconds
4215  			 */
4216  			max_wait = MEGASAS_RESET_WAIT_TIME;
4217  			break;
4218  
4219  		case MFI_STATE_BB_INIT:
4220  			max_wait = MEGASAS_RESET_WAIT_TIME;
4221  			break;
4222  
4223  		case MFI_STATE_FW_INIT:
4224  			max_wait = MEGASAS_RESET_WAIT_TIME;
4225  			break;
4226  
4227  		case MFI_STATE_FW_INIT_2:
4228  			max_wait = MEGASAS_RESET_WAIT_TIME;
4229  			break;
4230  
4231  		case MFI_STATE_DEVICE_SCAN:
4232  			max_wait = MEGASAS_RESET_WAIT_TIME;
4233  			break;
4234  
4235  		case MFI_STATE_FLUSH_CACHE:
4236  			max_wait = MEGASAS_RESET_WAIT_TIME;
4237  			break;
4238  
4239  		default:
4240  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4241  			       fw_state);
4242  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4243  			megasas_dump_reg_set(instance->reg_set);
4244  			return -ENODEV;
4245  		}
4246  
4247  		/*
4248  		 * The cur_state should not last for more than max_wait secs
4249  		 */
4250  		for (i = 0; i < max_wait * 50; i++) {
4251  			curr_abs_state = instance->instancet->
4252  				read_fw_status_reg(instance);
4253  
4254  			if (abs_state == curr_abs_state) {
4255  				msleep(20);
4256  			} else
4257  				break;
4258  		}
4259  
4260  		/*
4261  		 * Return error if fw_state hasn't changed after max_wait
4262  		 */
4263  		if (curr_abs_state == abs_state) {
4264  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4265  			       "in %d secs\n", fw_state, max_wait);
4266  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4267  			megasas_dump_reg_set(instance->reg_set);
4268  			return -ENODEV;
4269  		}
4270  
4271  		abs_state = curr_abs_state;
4272  		fw_state = curr_abs_state & MFI_STATE_MASK;
4273  	}
4274  	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4275  
4276  	return 0;
4277  }
4278  
4279  /**
4280   * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4281   * @instance:				Adapter soft state
4282   */
megasas_teardown_frame_pool(struct megasas_instance * instance)4283  static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4284  {
4285  	int i;
4286  	u16 max_cmd = instance->max_mfi_cmds;
4287  	struct megasas_cmd *cmd;
4288  
4289  	if (!instance->frame_dma_pool)
4290  		return;
4291  
4292  	/*
4293  	 * Return all frames to pool
4294  	 */
4295  	for (i = 0; i < max_cmd; i++) {
4296  
4297  		cmd = instance->cmd_list[i];
4298  
4299  		if (cmd->frame)
4300  			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4301  				      cmd->frame_phys_addr);
4302  
4303  		if (cmd->sense)
4304  			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4305  				      cmd->sense_phys_addr);
4306  	}
4307  
4308  	/*
4309  	 * Now destroy the pool itself
4310  	 */
4311  	dma_pool_destroy(instance->frame_dma_pool);
4312  	dma_pool_destroy(instance->sense_dma_pool);
4313  
4314  	instance->frame_dma_pool = NULL;
4315  	instance->sense_dma_pool = NULL;
4316  }
4317  
4318  /**
4319   * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4320   * @instance:			Adapter soft state
4321   *
4322   * Each command packet has an embedded DMA memory buffer that is used for
4323   * filling MFI frame and the SG list that immediately follows the frame. This
4324   * function creates those DMA memory buffers for each command packet by using
4325   * PCI pool facility.
4326   */
megasas_create_frame_pool(struct megasas_instance * instance)4327  static int megasas_create_frame_pool(struct megasas_instance *instance)
4328  {
4329  	int i;
4330  	u16 max_cmd;
4331  	u32 frame_count;
4332  	struct megasas_cmd *cmd;
4333  
4334  	max_cmd = instance->max_mfi_cmds;
4335  
4336  	/*
4337  	 * For MFI controllers.
4338  	 * max_num_sge = 60
4339  	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4340  	 * Total 960 byte (15 MFI frame of 64 byte)
4341  	 *
4342  	 * Fusion adapter require only 3 extra frame.
4343  	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4344  	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4345  	 * Total 192 byte (3 MFI frame of 64 byte)
4346  	 */
4347  	frame_count = (instance->adapter_type == MFI_SERIES) ?
4348  			(15 + 1) : (3 + 1);
4349  	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4350  	/*
4351  	 * Use DMA pool facility provided by PCI layer
4352  	 */
4353  	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4354  					&instance->pdev->dev,
4355  					instance->mfi_frame_size, 256, 0);
4356  
4357  	if (!instance->frame_dma_pool) {
4358  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4359  		return -ENOMEM;
4360  	}
4361  
4362  	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4363  						   &instance->pdev->dev, 128,
4364  						   4, 0);
4365  
4366  	if (!instance->sense_dma_pool) {
4367  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4368  
4369  		dma_pool_destroy(instance->frame_dma_pool);
4370  		instance->frame_dma_pool = NULL;
4371  
4372  		return -ENOMEM;
4373  	}
4374  
4375  	/*
4376  	 * Allocate and attach a frame to each of the commands in cmd_list.
4377  	 * By making cmd->index as the context instead of the &cmd, we can
4378  	 * always use 32bit context regardless of the architecture
4379  	 */
4380  	for (i = 0; i < max_cmd; i++) {
4381  
4382  		cmd = instance->cmd_list[i];
4383  
4384  		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4385  					    GFP_KERNEL, &cmd->frame_phys_addr);
4386  
4387  		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4388  					    GFP_KERNEL, &cmd->sense_phys_addr);
4389  
4390  		/*
4391  		 * megasas_teardown_frame_pool() takes care of freeing
4392  		 * whatever has been allocated
4393  		 */
4394  		if (!cmd->frame || !cmd->sense) {
4395  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4396  			megasas_teardown_frame_pool(instance);
4397  			return -ENOMEM;
4398  		}
4399  
4400  		cmd->frame->io.context = cpu_to_le32(cmd->index);
4401  		cmd->frame->io.pad_0 = 0;
4402  		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4403  			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4404  	}
4405  
4406  	return 0;
4407  }
4408  
4409  /**
4410   * megasas_free_cmds -	Free all the cmds in the free cmd pool
4411   * @instance:		Adapter soft state
4412   */
megasas_free_cmds(struct megasas_instance * instance)4413  void megasas_free_cmds(struct megasas_instance *instance)
4414  {
4415  	int i;
4416  
4417  	/* First free the MFI frame pool */
4418  	megasas_teardown_frame_pool(instance);
4419  
4420  	/* Free all the commands in the cmd_list */
4421  	for (i = 0; i < instance->max_mfi_cmds; i++)
4422  
4423  		kfree(instance->cmd_list[i]);
4424  
4425  	/* Free the cmd_list buffer itself */
4426  	kfree(instance->cmd_list);
4427  	instance->cmd_list = NULL;
4428  
4429  	INIT_LIST_HEAD(&instance->cmd_pool);
4430  }
4431  
4432  /**
4433   * megasas_alloc_cmds -	Allocates the command packets
4434   * @instance:		Adapter soft state
4435   *
4436   * Each command that is issued to the FW, whether IO commands from the OS or
4437   * internal commands like IOCTLs, are wrapped in local data structure called
4438   * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4439   * the FW.
4440   *
4441   * Each frame has a 32-bit field called context (tag). This context is used
4442   * to get back the megasas_cmd from the frame when a frame gets completed in
4443   * the ISR. Typically the address of the megasas_cmd itself would be used as
4444   * the context. But we wanted to keep the differences between 32 and 64 bit
4445   * systems to the mininum. We always use 32 bit integers for the context. In
4446   * this driver, the 32 bit values are the indices into an array cmd_list.
4447   * This array is used only to look up the megasas_cmd given the context. The
4448   * free commands themselves are maintained in a linked list called cmd_pool.
4449   */
megasas_alloc_cmds(struct megasas_instance * instance)4450  int megasas_alloc_cmds(struct megasas_instance *instance)
4451  {
4452  	int i;
4453  	int j;
4454  	u16 max_cmd;
4455  	struct megasas_cmd *cmd;
4456  
4457  	max_cmd = instance->max_mfi_cmds;
4458  
4459  	/*
4460  	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4461  	 * Allocate the dynamic array first and then allocate individual
4462  	 * commands.
4463  	 */
4464  	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4465  
4466  	if (!instance->cmd_list) {
4467  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4468  		return -ENOMEM;
4469  	}
4470  
4471  	for (i = 0; i < max_cmd; i++) {
4472  		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4473  						GFP_KERNEL);
4474  
4475  		if (!instance->cmd_list[i]) {
4476  
4477  			for (j = 0; j < i; j++)
4478  				kfree(instance->cmd_list[j]);
4479  
4480  			kfree(instance->cmd_list);
4481  			instance->cmd_list = NULL;
4482  
4483  			return -ENOMEM;
4484  		}
4485  	}
4486  
4487  	for (i = 0; i < max_cmd; i++) {
4488  		cmd = instance->cmd_list[i];
4489  		memset(cmd, 0, sizeof(struct megasas_cmd));
4490  		cmd->index = i;
4491  		cmd->scmd = NULL;
4492  		cmd->instance = instance;
4493  
4494  		list_add_tail(&cmd->list, &instance->cmd_pool);
4495  	}
4496  
4497  	/*
4498  	 * Create a frame pool and assign one frame to each cmd
4499  	 */
4500  	if (megasas_create_frame_pool(instance)) {
4501  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4502  		megasas_free_cmds(instance);
4503  		return -ENOMEM;
4504  	}
4505  
4506  	return 0;
4507  }
4508  
4509  /*
4510   * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4511   * @instance:				Adapter soft state
4512   *
4513   * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4514   * or FW is not under OCR.
4515   */
4516  inline int
dcmd_timeout_ocr_possible(struct megasas_instance * instance)4517  dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4518  
4519  	if (instance->adapter_type == MFI_SERIES)
4520  		return KILL_ADAPTER;
4521  	else if (instance->unload ||
4522  			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4523  				 &instance->reset_flags))
4524  		return IGNORE_TIMEOUT;
4525  	else
4526  		return INITIATE_OCR;
4527  }
4528  
4529  static void
megasas_get_pd_info(struct megasas_instance * instance,struct scsi_device * sdev)4530  megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4531  {
4532  	int ret;
4533  	struct megasas_cmd *cmd;
4534  	struct megasas_dcmd_frame *dcmd;
4535  
4536  	struct MR_PRIV_DEVICE *mr_device_priv_data;
4537  	u16 device_id = 0;
4538  
4539  	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4540  	cmd = megasas_get_cmd(instance);
4541  
4542  	if (!cmd) {
4543  		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4544  		return;
4545  	}
4546  
4547  	dcmd = &cmd->frame->dcmd;
4548  
4549  	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4550  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4551  
4552  	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4553  	dcmd->cmd = MFI_CMD_DCMD;
4554  	dcmd->cmd_status = 0xFF;
4555  	dcmd->sge_count = 1;
4556  	dcmd->flags = MFI_FRAME_DIR_READ;
4557  	dcmd->timeout = 0;
4558  	dcmd->pad_0 = 0;
4559  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4560  	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4561  
4562  	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4563  				 sizeof(struct MR_PD_INFO));
4564  
4565  	if ((instance->adapter_type != MFI_SERIES) &&
4566  	    !instance->mask_interrupts)
4567  		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4568  	else
4569  		ret = megasas_issue_polled(instance, cmd);
4570  
4571  	switch (ret) {
4572  	case DCMD_SUCCESS:
4573  		mr_device_priv_data = sdev->hostdata;
4574  		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4575  		mr_device_priv_data->interface_type =
4576  				instance->pd_info->state.ddf.pdType.intf;
4577  		break;
4578  
4579  	case DCMD_TIMEOUT:
4580  
4581  		switch (dcmd_timeout_ocr_possible(instance)) {
4582  		case INITIATE_OCR:
4583  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4584  			mutex_unlock(&instance->reset_mutex);
4585  			megasas_reset_fusion(instance->host,
4586  				MFI_IO_TIMEOUT_OCR);
4587  			mutex_lock(&instance->reset_mutex);
4588  			break;
4589  		case KILL_ADAPTER:
4590  			megaraid_sas_kill_hba(instance);
4591  			break;
4592  		case IGNORE_TIMEOUT:
4593  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4594  				__func__, __LINE__);
4595  			break;
4596  		}
4597  
4598  		break;
4599  	}
4600  
4601  	if (ret != DCMD_TIMEOUT)
4602  		megasas_return_cmd(instance, cmd);
4603  
4604  	return;
4605  }
4606  /*
4607   * megasas_get_pd_list_info -	Returns FW's pd_list structure
4608   * @instance:				Adapter soft state
4609   * @pd_list:				pd_list structure
4610   *
4611   * Issues an internal command (DCMD) to get the FW's controller PD
4612   * list structure.  This information is mainly used to find out SYSTEM
4613   * supported by the FW.
4614   */
4615  static int
megasas_get_pd_list(struct megasas_instance * instance)4616  megasas_get_pd_list(struct megasas_instance *instance)
4617  {
4618  	int ret = 0, pd_index = 0;
4619  	struct megasas_cmd *cmd;
4620  	struct megasas_dcmd_frame *dcmd;
4621  	struct MR_PD_LIST *ci;
4622  	struct MR_PD_ADDRESS *pd_addr;
4623  
4624  	if (instance->pd_list_not_supported) {
4625  		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4626  		"not supported by firmware\n");
4627  		return ret;
4628  	}
4629  
4630  	ci = instance->pd_list_buf;
4631  
4632  	cmd = megasas_get_cmd(instance);
4633  
4634  	if (!cmd) {
4635  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4636  		return -ENOMEM;
4637  	}
4638  
4639  	dcmd = &cmd->frame->dcmd;
4640  
4641  	memset(ci, 0, sizeof(*ci));
4642  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4643  
4644  	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4645  	dcmd->mbox.b[1] = 0;
4646  	dcmd->cmd = MFI_CMD_DCMD;
4647  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4648  	dcmd->sge_count = 1;
4649  	dcmd->flags = MFI_FRAME_DIR_READ;
4650  	dcmd->timeout = 0;
4651  	dcmd->pad_0 = 0;
4652  	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4653  	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4654  
4655  	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4656  				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4657  
4658  	if ((instance->adapter_type != MFI_SERIES) &&
4659  	    !instance->mask_interrupts)
4660  		ret = megasas_issue_blocked_cmd(instance, cmd,
4661  			MFI_IO_TIMEOUT_SECS);
4662  	else
4663  		ret = megasas_issue_polled(instance, cmd);
4664  
4665  	switch (ret) {
4666  	case DCMD_FAILED:
4667  		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4668  			"failed/not supported by firmware\n");
4669  
4670  		if (instance->adapter_type != MFI_SERIES)
4671  			megaraid_sas_kill_hba(instance);
4672  		else
4673  			instance->pd_list_not_supported = 1;
4674  		break;
4675  	case DCMD_TIMEOUT:
4676  
4677  		switch (dcmd_timeout_ocr_possible(instance)) {
4678  		case INITIATE_OCR:
4679  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4680  			/*
4681  			 * DCMD failed from AEN path.
4682  			 * AEN path already hold reset_mutex to avoid PCI access
4683  			 * while OCR is in progress.
4684  			 */
4685  			mutex_unlock(&instance->reset_mutex);
4686  			megasas_reset_fusion(instance->host,
4687  						MFI_IO_TIMEOUT_OCR);
4688  			mutex_lock(&instance->reset_mutex);
4689  			break;
4690  		case KILL_ADAPTER:
4691  			megaraid_sas_kill_hba(instance);
4692  			break;
4693  		case IGNORE_TIMEOUT:
4694  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4695  				__func__, __LINE__);
4696  			break;
4697  		}
4698  
4699  		break;
4700  
4701  	case DCMD_SUCCESS:
4702  		pd_addr = ci->addr;
4703  		if (megasas_dbg_lvl & LD_PD_DEBUG)
4704  			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4705  				 __func__, le32_to_cpu(ci->count));
4706  
4707  		if ((le32_to_cpu(ci->count) >
4708  			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4709  			break;
4710  
4711  		memset(instance->local_pd_list, 0,
4712  				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4713  
4714  		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4715  			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4716  					le16_to_cpu(pd_addr->deviceId);
4717  			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4718  					pd_addr->scsiDevType;
4719  			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4720  					MR_PD_STATE_SYSTEM;
4721  			if (megasas_dbg_lvl & LD_PD_DEBUG)
4722  				dev_info(&instance->pdev->dev,
4723  					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4724  					 pd_index, le16_to_cpu(pd_addr->deviceId),
4725  					 pd_addr->scsiDevType);
4726  			pd_addr++;
4727  		}
4728  
4729  		memcpy(instance->pd_list, instance->local_pd_list,
4730  			sizeof(instance->pd_list));
4731  		break;
4732  
4733  	}
4734  
4735  	if (ret != DCMD_TIMEOUT)
4736  		megasas_return_cmd(instance, cmd);
4737  
4738  	return ret;
4739  }
4740  
4741  /*
4742   * megasas_get_ld_list_info -	Returns FW's ld_list structure
4743   * @instance:				Adapter soft state
4744   * @ld_list:				ld_list structure
4745   *
4746   * Issues an internal command (DCMD) to get the FW's controller PD
4747   * list structure.  This information is mainly used to find out SYSTEM
4748   * supported by the FW.
4749   */
4750  static int
megasas_get_ld_list(struct megasas_instance * instance)4751  megasas_get_ld_list(struct megasas_instance *instance)
4752  {
4753  	int ret = 0, ld_index = 0, ids = 0;
4754  	struct megasas_cmd *cmd;
4755  	struct megasas_dcmd_frame *dcmd;
4756  	struct MR_LD_LIST *ci;
4757  	dma_addr_t ci_h = 0;
4758  	u32 ld_count;
4759  
4760  	ci = instance->ld_list_buf;
4761  	ci_h = instance->ld_list_buf_h;
4762  
4763  	cmd = megasas_get_cmd(instance);
4764  
4765  	if (!cmd) {
4766  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4767  		return -ENOMEM;
4768  	}
4769  
4770  	dcmd = &cmd->frame->dcmd;
4771  
4772  	memset(ci, 0, sizeof(*ci));
4773  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4774  
4775  	if (instance->supportmax256vd)
4776  		dcmd->mbox.b[0] = 1;
4777  	dcmd->cmd = MFI_CMD_DCMD;
4778  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4779  	dcmd->sge_count = 1;
4780  	dcmd->flags = MFI_FRAME_DIR_READ;
4781  	dcmd->timeout = 0;
4782  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4783  	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4784  	dcmd->pad_0  = 0;
4785  
4786  	megasas_set_dma_settings(instance, dcmd, ci_h,
4787  				 sizeof(struct MR_LD_LIST));
4788  
4789  	if ((instance->adapter_type != MFI_SERIES) &&
4790  	    !instance->mask_interrupts)
4791  		ret = megasas_issue_blocked_cmd(instance, cmd,
4792  			MFI_IO_TIMEOUT_SECS);
4793  	else
4794  		ret = megasas_issue_polled(instance, cmd);
4795  
4796  	ld_count = le32_to_cpu(ci->ldCount);
4797  
4798  	switch (ret) {
4799  	case DCMD_FAILED:
4800  		megaraid_sas_kill_hba(instance);
4801  		break;
4802  	case DCMD_TIMEOUT:
4803  
4804  		switch (dcmd_timeout_ocr_possible(instance)) {
4805  		case INITIATE_OCR:
4806  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4807  			/*
4808  			 * DCMD failed from AEN path.
4809  			 * AEN path already hold reset_mutex to avoid PCI access
4810  			 * while OCR is in progress.
4811  			 */
4812  			mutex_unlock(&instance->reset_mutex);
4813  			megasas_reset_fusion(instance->host,
4814  						MFI_IO_TIMEOUT_OCR);
4815  			mutex_lock(&instance->reset_mutex);
4816  			break;
4817  		case KILL_ADAPTER:
4818  			megaraid_sas_kill_hba(instance);
4819  			break;
4820  		case IGNORE_TIMEOUT:
4821  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4822  				__func__, __LINE__);
4823  			break;
4824  		}
4825  
4826  		break;
4827  
4828  	case DCMD_SUCCESS:
4829  		if (megasas_dbg_lvl & LD_PD_DEBUG)
4830  			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4831  				 __func__, ld_count);
4832  
4833  		if (ld_count > instance->fw_supported_vd_count)
4834  			break;
4835  
4836  		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4837  
4838  		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4839  			if (ci->ldList[ld_index].state != 0) {
4840  				ids = ci->ldList[ld_index].ref.targetId;
4841  				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4842  				if (megasas_dbg_lvl & LD_PD_DEBUG)
4843  					dev_info(&instance->pdev->dev,
4844  						 "LD%d: targetID: 0x%03x\n",
4845  						 ld_index, ids);
4846  			}
4847  		}
4848  
4849  		break;
4850  	}
4851  
4852  	if (ret != DCMD_TIMEOUT)
4853  		megasas_return_cmd(instance, cmd);
4854  
4855  	return ret;
4856  }
4857  
4858  /**
4859   * megasas_ld_list_query -	Returns FW's ld_list structure
4860   * @instance:				Adapter soft state
4861   * @query_type:				ld_list structure type
4862   *
4863   * Issues an internal command (DCMD) to get the FW's controller PD
4864   * list structure.  This information is mainly used to find out SYSTEM
4865   * supported by the FW.
4866   */
4867  static int
megasas_ld_list_query(struct megasas_instance * instance,u8 query_type)4868  megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4869  {
4870  	int ret = 0, ld_index = 0, ids = 0;
4871  	struct megasas_cmd *cmd;
4872  	struct megasas_dcmd_frame *dcmd;
4873  	struct MR_LD_TARGETID_LIST *ci;
4874  	dma_addr_t ci_h = 0;
4875  	u32 tgtid_count;
4876  
4877  	ci = instance->ld_targetid_list_buf;
4878  	ci_h = instance->ld_targetid_list_buf_h;
4879  
4880  	cmd = megasas_get_cmd(instance);
4881  
4882  	if (!cmd) {
4883  		dev_warn(&instance->pdev->dev,
4884  		         "megasas_ld_list_query: Failed to get cmd\n");
4885  		return -ENOMEM;
4886  	}
4887  
4888  	dcmd = &cmd->frame->dcmd;
4889  
4890  	memset(ci, 0, sizeof(*ci));
4891  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4892  
4893  	dcmd->mbox.b[0] = query_type;
4894  	if (instance->supportmax256vd)
4895  		dcmd->mbox.b[2] = 1;
4896  
4897  	dcmd->cmd = MFI_CMD_DCMD;
4898  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4899  	dcmd->sge_count = 1;
4900  	dcmd->flags = MFI_FRAME_DIR_READ;
4901  	dcmd->timeout = 0;
4902  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4903  	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4904  	dcmd->pad_0  = 0;
4905  
4906  	megasas_set_dma_settings(instance, dcmd, ci_h,
4907  				 sizeof(struct MR_LD_TARGETID_LIST));
4908  
4909  	if ((instance->adapter_type != MFI_SERIES) &&
4910  	    !instance->mask_interrupts)
4911  		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4912  	else
4913  		ret = megasas_issue_polled(instance, cmd);
4914  
4915  	switch (ret) {
4916  	case DCMD_FAILED:
4917  		dev_info(&instance->pdev->dev,
4918  			"DCMD not supported by firmware - %s %d\n",
4919  				__func__, __LINE__);
4920  		ret = megasas_get_ld_list(instance);
4921  		break;
4922  	case DCMD_TIMEOUT:
4923  		switch (dcmd_timeout_ocr_possible(instance)) {
4924  		case INITIATE_OCR:
4925  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4926  			/*
4927  			 * DCMD failed from AEN path.
4928  			 * AEN path already hold reset_mutex to avoid PCI access
4929  			 * while OCR is in progress.
4930  			 */
4931  			mutex_unlock(&instance->reset_mutex);
4932  			megasas_reset_fusion(instance->host,
4933  						MFI_IO_TIMEOUT_OCR);
4934  			mutex_lock(&instance->reset_mutex);
4935  			break;
4936  		case KILL_ADAPTER:
4937  			megaraid_sas_kill_hba(instance);
4938  			break;
4939  		case IGNORE_TIMEOUT:
4940  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4941  				__func__, __LINE__);
4942  			break;
4943  		}
4944  
4945  		break;
4946  	case DCMD_SUCCESS:
4947  		tgtid_count = le32_to_cpu(ci->count);
4948  
4949  		if (megasas_dbg_lvl & LD_PD_DEBUG)
4950  			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4951  				 __func__, tgtid_count);
4952  
4953  		if ((tgtid_count > (instance->fw_supported_vd_count)))
4954  			break;
4955  
4956  		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4957  		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4958  			ids = ci->targetId[ld_index];
4959  			instance->ld_ids[ids] = ci->targetId[ld_index];
4960  			if (megasas_dbg_lvl & LD_PD_DEBUG)
4961  				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4962  					 ld_index, ci->targetId[ld_index]);
4963  		}
4964  
4965  		break;
4966  	}
4967  
4968  	if (ret != DCMD_TIMEOUT)
4969  		megasas_return_cmd(instance, cmd);
4970  
4971  	return ret;
4972  }
4973  
4974  /**
4975   * megasas_host_device_list_query
4976   * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4977   * dcmd.mbox              - reserved
4978   * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4979   * Desc:    This DCMD will return the combined device list
4980   * Status:  MFI_STAT_OK - List returned successfully
4981   *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4982   *                                 disabled
4983   * @instance:			Adapter soft state
4984   * @is_probe:			Driver probe check
4985   * Return:			0 if DCMD succeeded
4986   *				 non-zero if failed
4987   */
4988  static int
megasas_host_device_list_query(struct megasas_instance * instance,bool is_probe)4989  megasas_host_device_list_query(struct megasas_instance *instance,
4990  			       bool is_probe)
4991  {
4992  	int ret, i, target_id;
4993  	struct megasas_cmd *cmd;
4994  	struct megasas_dcmd_frame *dcmd;
4995  	struct MR_HOST_DEVICE_LIST *ci;
4996  	u32 count;
4997  	dma_addr_t ci_h;
4998  
4999  	ci = instance->host_device_list_buf;
5000  	ci_h = instance->host_device_list_buf_h;
5001  
5002  	cmd = megasas_get_cmd(instance);
5003  
5004  	if (!cmd) {
5005  		dev_warn(&instance->pdev->dev,
5006  			 "%s: failed to get cmd\n",
5007  			 __func__);
5008  		return -ENOMEM;
5009  	}
5010  
5011  	dcmd = &cmd->frame->dcmd;
5012  
5013  	memset(ci, 0, sizeof(*ci));
5014  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5015  
5016  	dcmd->mbox.b[0] = is_probe ? 0 : 1;
5017  	dcmd->cmd = MFI_CMD_DCMD;
5018  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5019  	dcmd->sge_count = 1;
5020  	dcmd->flags = MFI_FRAME_DIR_READ;
5021  	dcmd->timeout = 0;
5022  	dcmd->pad_0 = 0;
5023  	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
5024  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
5025  
5026  	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
5027  
5028  	if (!instance->mask_interrupts) {
5029  		ret = megasas_issue_blocked_cmd(instance, cmd,
5030  						MFI_IO_TIMEOUT_SECS);
5031  	} else {
5032  		ret = megasas_issue_polled(instance, cmd);
5033  		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5034  	}
5035  
5036  	switch (ret) {
5037  	case DCMD_SUCCESS:
5038  		/* Fill the internal pd_list and ld_ids array based on
5039  		 * targetIds returned by FW
5040  		 */
5041  		count = le32_to_cpu(ci->count);
5042  
5043  		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
5044  			break;
5045  
5046  		if (megasas_dbg_lvl & LD_PD_DEBUG)
5047  			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
5048  				 __func__, count);
5049  
5050  		memset(instance->local_pd_list, 0,
5051  		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
5052  		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
5053  		for (i = 0; i < count; i++) {
5054  			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
5055  			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
5056  				instance->local_pd_list[target_id].tid = target_id;
5057  				instance->local_pd_list[target_id].driveType =
5058  						ci->host_device_list[i].scsi_type;
5059  				instance->local_pd_list[target_id].driveState =
5060  						MR_PD_STATE_SYSTEM;
5061  				if (megasas_dbg_lvl & LD_PD_DEBUG)
5062  					dev_info(&instance->pdev->dev,
5063  						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
5064  						 i, target_id, ci->host_device_list[i].scsi_type);
5065  			} else {
5066  				instance->ld_ids[target_id] = target_id;
5067  				if (megasas_dbg_lvl & LD_PD_DEBUG)
5068  					dev_info(&instance->pdev->dev,
5069  						 "Device %d: LD targetID: 0x%03x\n",
5070  						 i, target_id);
5071  			}
5072  		}
5073  
5074  		memcpy(instance->pd_list, instance->local_pd_list,
5075  		       sizeof(instance->pd_list));
5076  		break;
5077  
5078  	case DCMD_TIMEOUT:
5079  		switch (dcmd_timeout_ocr_possible(instance)) {
5080  		case INITIATE_OCR:
5081  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5082  			mutex_unlock(&instance->reset_mutex);
5083  			megasas_reset_fusion(instance->host,
5084  				MFI_IO_TIMEOUT_OCR);
5085  			mutex_lock(&instance->reset_mutex);
5086  			break;
5087  		case KILL_ADAPTER:
5088  			megaraid_sas_kill_hba(instance);
5089  			break;
5090  		case IGNORE_TIMEOUT:
5091  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5092  				 __func__, __LINE__);
5093  			break;
5094  		}
5095  		break;
5096  	case DCMD_FAILED:
5097  		dev_err(&instance->pdev->dev,
5098  			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
5099  			__func__);
5100  		break;
5101  	}
5102  
5103  	if (ret != DCMD_TIMEOUT)
5104  		megasas_return_cmd(instance, cmd);
5105  
5106  	return ret;
5107  }
5108  
5109  /*
5110   * megasas_update_ext_vd_details : Update details w.r.t Extended VD
5111   * instance			 : Controller's instance
5112  */
megasas_update_ext_vd_details(struct megasas_instance * instance)5113  static void megasas_update_ext_vd_details(struct megasas_instance *instance)
5114  {
5115  	struct fusion_context *fusion;
5116  	u32 ventura_map_sz = 0;
5117  
5118  	fusion = instance->ctrl_context;
5119  	/* For MFI based controllers return dummy success */
5120  	if (!fusion)
5121  		return;
5122  
5123  	instance->supportmax256vd =
5124  		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5125  	/* Below is additional check to address future FW enhancement */
5126  	if (instance->ctrl_info_buf->max_lds > 64)
5127  		instance->supportmax256vd = 1;
5128  
5129  	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5130  					* MEGASAS_MAX_DEV_PER_CHANNEL;
5131  	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5132  					* MEGASAS_MAX_DEV_PER_CHANNEL;
5133  	if (instance->supportmax256vd) {
5134  		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5135  		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5136  	} else {
5137  		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5138  		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5139  	}
5140  
5141  	dev_info(&instance->pdev->dev,
5142  		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5143  		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5144  		instance->ctrl_info_buf->max_lds);
5145  
5146  	if (instance->max_raid_mapsize) {
5147  		ventura_map_sz = instance->max_raid_mapsize *
5148  						MR_MIN_MAP_SIZE; /* 64k */
5149  		fusion->current_map_sz = ventura_map_sz;
5150  		fusion->max_map_sz = ventura_map_sz;
5151  	} else {
5152  		fusion->old_map_sz =
5153  			struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap,
5154  				      instance->fw_supported_vd_count);
5155  		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5156  
5157  		fusion->max_map_sz =
5158  			max(fusion->old_map_sz, fusion->new_map_sz);
5159  
5160  		if (instance->supportmax256vd)
5161  			fusion->current_map_sz = fusion->new_map_sz;
5162  		else
5163  			fusion->current_map_sz = fusion->old_map_sz;
5164  	}
5165  	/* irrespective of FW raid maps, driver raid map is constant */
5166  	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5167  }
5168  
5169  /*
5170   * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5171   * dcmd.hdr.length            - number of bytes to read
5172   * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5173   * Desc:			 Fill in snapdump properties
5174   * Status:			 MFI_STAT_OK- Command successful
5175   */
megasas_get_snapdump_properties(struct megasas_instance * instance)5176  void megasas_get_snapdump_properties(struct megasas_instance *instance)
5177  {
5178  	int ret = 0;
5179  	struct megasas_cmd *cmd;
5180  	struct megasas_dcmd_frame *dcmd;
5181  	struct MR_SNAPDUMP_PROPERTIES *ci;
5182  	dma_addr_t ci_h = 0;
5183  
5184  	ci = instance->snapdump_prop;
5185  	ci_h = instance->snapdump_prop_h;
5186  
5187  	if (!ci)
5188  		return;
5189  
5190  	cmd = megasas_get_cmd(instance);
5191  
5192  	if (!cmd) {
5193  		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5194  		return;
5195  	}
5196  
5197  	dcmd = &cmd->frame->dcmd;
5198  
5199  	memset(ci, 0, sizeof(*ci));
5200  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5201  
5202  	dcmd->cmd = MFI_CMD_DCMD;
5203  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5204  	dcmd->sge_count = 1;
5205  	dcmd->flags = MFI_FRAME_DIR_READ;
5206  	dcmd->timeout = 0;
5207  	dcmd->pad_0 = 0;
5208  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5209  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5210  
5211  	megasas_set_dma_settings(instance, dcmd, ci_h,
5212  				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5213  
5214  	if (!instance->mask_interrupts) {
5215  		ret = megasas_issue_blocked_cmd(instance, cmd,
5216  						MFI_IO_TIMEOUT_SECS);
5217  	} else {
5218  		ret = megasas_issue_polled(instance, cmd);
5219  		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5220  	}
5221  
5222  	switch (ret) {
5223  	case DCMD_SUCCESS:
5224  		instance->snapdump_wait_time =
5225  			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5226  				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5227  		break;
5228  
5229  	case DCMD_TIMEOUT:
5230  		switch (dcmd_timeout_ocr_possible(instance)) {
5231  		case INITIATE_OCR:
5232  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5233  			mutex_unlock(&instance->reset_mutex);
5234  			megasas_reset_fusion(instance->host,
5235  				MFI_IO_TIMEOUT_OCR);
5236  			mutex_lock(&instance->reset_mutex);
5237  			break;
5238  		case KILL_ADAPTER:
5239  			megaraid_sas_kill_hba(instance);
5240  			break;
5241  		case IGNORE_TIMEOUT:
5242  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5243  				__func__, __LINE__);
5244  			break;
5245  		}
5246  	}
5247  
5248  	if (ret != DCMD_TIMEOUT)
5249  		megasas_return_cmd(instance, cmd);
5250  }
5251  
5252  /**
5253   * megasas_get_ctrl_info -	Returns FW's controller structure
5254   * @instance:				Adapter soft state
5255   *
5256   * Issues an internal command (DCMD) to get the FW's controller structure.
5257   * This information is mainly used to find out the maximum IO transfer per
5258   * command supported by the FW.
5259   */
5260  int
megasas_get_ctrl_info(struct megasas_instance * instance)5261  megasas_get_ctrl_info(struct megasas_instance *instance)
5262  {
5263  	int ret = 0;
5264  	struct megasas_cmd *cmd;
5265  	struct megasas_dcmd_frame *dcmd;
5266  	struct megasas_ctrl_info *ci;
5267  	dma_addr_t ci_h = 0;
5268  
5269  	ci = instance->ctrl_info_buf;
5270  	ci_h = instance->ctrl_info_buf_h;
5271  
5272  	cmd = megasas_get_cmd(instance);
5273  
5274  	if (!cmd) {
5275  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5276  		return -ENOMEM;
5277  	}
5278  
5279  	dcmd = &cmd->frame->dcmd;
5280  
5281  	memset(ci, 0, sizeof(*ci));
5282  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5283  
5284  	dcmd->cmd = MFI_CMD_DCMD;
5285  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5286  	dcmd->sge_count = 1;
5287  	dcmd->flags = MFI_FRAME_DIR_READ;
5288  	dcmd->timeout = 0;
5289  	dcmd->pad_0 = 0;
5290  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5291  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5292  	dcmd->mbox.b[0] = 1;
5293  
5294  	megasas_set_dma_settings(instance, dcmd, ci_h,
5295  				 sizeof(struct megasas_ctrl_info));
5296  
5297  	if ((instance->adapter_type != MFI_SERIES) &&
5298  	    !instance->mask_interrupts) {
5299  		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5300  	} else {
5301  		ret = megasas_issue_polled(instance, cmd);
5302  		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5303  	}
5304  
5305  	switch (ret) {
5306  	case DCMD_SUCCESS:
5307  		/* Save required controller information in
5308  		 * CPU endianness format.
5309  		 */
5310  		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5311  		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5312  		le32_to_cpus((u32 *)&ci->adapterOperations2);
5313  		le32_to_cpus((u32 *)&ci->adapterOperations3);
5314  		le16_to_cpus((u16 *)&ci->adapter_operations4);
5315  		le32_to_cpus((u32 *)&ci->adapter_operations5);
5316  
5317  		/* Update the latest Ext VD info.
5318  		 * From Init path, store current firmware details.
5319  		 * From OCR path, detect any firmware properties changes.
5320  		 * in case of Firmware upgrade without system reboot.
5321  		 */
5322  		megasas_update_ext_vd_details(instance);
5323  		instance->support_seqnum_jbod_fp =
5324  			ci->adapterOperations3.useSeqNumJbodFP;
5325  		instance->support_morethan256jbod =
5326  			ci->adapter_operations4.support_pd_map_target_id;
5327  		instance->support_nvme_passthru =
5328  			ci->adapter_operations4.support_nvme_passthru;
5329  		instance->support_pci_lane_margining =
5330  			ci->adapter_operations5.support_pci_lane_margining;
5331  		instance->task_abort_tmo = ci->TaskAbortTO;
5332  		instance->max_reset_tmo = ci->MaxResetTO;
5333  
5334  		/*Check whether controller is iMR or MR */
5335  		instance->is_imr = (ci->memory_size ? 0 : 1);
5336  
5337  		instance->snapdump_wait_time =
5338  			(ci->properties.on_off_properties2.enable_snap_dump ?
5339  			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5340  
5341  		instance->enable_fw_dev_list =
5342  			ci->properties.on_off_properties2.enable_fw_dev_list;
5343  
5344  		dev_info(&instance->pdev->dev,
5345  			"controller type\t: %s(%dMB)\n",
5346  			instance->is_imr ? "iMR" : "MR",
5347  			le16_to_cpu(ci->memory_size));
5348  
5349  		instance->disableOnlineCtrlReset =
5350  			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5351  		instance->secure_jbod_support =
5352  			ci->adapterOperations3.supportSecurityonJBOD;
5353  		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5354  			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5355  		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5356  			instance->secure_jbod_support ? "Yes" : "No");
5357  		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5358  			 instance->support_nvme_passthru ? "Yes" : "No");
5359  		dev_info(&instance->pdev->dev,
5360  			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5361  			 instance->task_abort_tmo, instance->max_reset_tmo);
5362  		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5363  			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5364  		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5365  			 instance->support_pci_lane_margining ? "Yes" : "No");
5366  
5367  		break;
5368  
5369  	case DCMD_TIMEOUT:
5370  		switch (dcmd_timeout_ocr_possible(instance)) {
5371  		case INITIATE_OCR:
5372  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5373  			mutex_unlock(&instance->reset_mutex);
5374  			megasas_reset_fusion(instance->host,
5375  				MFI_IO_TIMEOUT_OCR);
5376  			mutex_lock(&instance->reset_mutex);
5377  			break;
5378  		case KILL_ADAPTER:
5379  			megaraid_sas_kill_hba(instance);
5380  			break;
5381  		case IGNORE_TIMEOUT:
5382  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5383  				__func__, __LINE__);
5384  			break;
5385  		}
5386  		break;
5387  	case DCMD_FAILED:
5388  		megaraid_sas_kill_hba(instance);
5389  		break;
5390  
5391  	}
5392  
5393  	if (ret != DCMD_TIMEOUT)
5394  		megasas_return_cmd(instance, cmd);
5395  
5396  	return ret;
5397  }
5398  
5399  /*
5400   * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5401   *					to firmware
5402   *
5403   * @instance:				Adapter soft state
5404   * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5405  					MR_CRASH_BUF_TURN_OFF = 0
5406  					MR_CRASH_BUF_TURN_ON = 1
5407   * @return 0 on success non-zero on failure.
5408   * Issues an internal command (DCMD) to set parameters for crash dump feature.
5409   * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5410   * that driver supports crash dump feature. This DCMD will be sent only if
5411   * crash dump feature is supported by the FW.
5412   *
5413   */
megasas_set_crash_dump_params(struct megasas_instance * instance,u8 crash_buf_state)5414  int megasas_set_crash_dump_params(struct megasas_instance *instance,
5415  	u8 crash_buf_state)
5416  {
5417  	int ret = 0;
5418  	struct megasas_cmd *cmd;
5419  	struct megasas_dcmd_frame *dcmd;
5420  
5421  	cmd = megasas_get_cmd(instance);
5422  
5423  	if (!cmd) {
5424  		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5425  		return -ENOMEM;
5426  	}
5427  
5428  
5429  	dcmd = &cmd->frame->dcmd;
5430  
5431  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5432  	dcmd->mbox.b[0] = crash_buf_state;
5433  	dcmd->cmd = MFI_CMD_DCMD;
5434  	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5435  	dcmd->sge_count = 1;
5436  	dcmd->flags = MFI_FRAME_DIR_NONE;
5437  	dcmd->timeout = 0;
5438  	dcmd->pad_0 = 0;
5439  	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5440  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5441  
5442  	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5443  				 CRASH_DMA_BUF_SIZE);
5444  
5445  	if ((instance->adapter_type != MFI_SERIES) &&
5446  	    !instance->mask_interrupts)
5447  		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5448  	else
5449  		ret = megasas_issue_polled(instance, cmd);
5450  
5451  	if (ret == DCMD_TIMEOUT) {
5452  		switch (dcmd_timeout_ocr_possible(instance)) {
5453  		case INITIATE_OCR:
5454  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5455  			megasas_reset_fusion(instance->host,
5456  					MFI_IO_TIMEOUT_OCR);
5457  			break;
5458  		case KILL_ADAPTER:
5459  			megaraid_sas_kill_hba(instance);
5460  			break;
5461  		case IGNORE_TIMEOUT:
5462  			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5463  				__func__, __LINE__);
5464  			break;
5465  		}
5466  	} else
5467  		megasas_return_cmd(instance, cmd);
5468  
5469  	return ret;
5470  }
5471  
5472  /**
5473   * megasas_issue_init_mfi -	Initializes the FW
5474   * @instance:		Adapter soft state
5475   *
5476   * Issues the INIT MFI cmd
5477   */
5478  static int
megasas_issue_init_mfi(struct megasas_instance * instance)5479  megasas_issue_init_mfi(struct megasas_instance *instance)
5480  {
5481  	__le32 context;
5482  	struct megasas_cmd *cmd;
5483  	struct megasas_init_frame *init_frame;
5484  	struct megasas_init_queue_info *initq_info;
5485  	dma_addr_t init_frame_h;
5486  	dma_addr_t initq_info_h;
5487  
5488  	/*
5489  	 * Prepare a init frame. Note the init frame points to queue info
5490  	 * structure. Each frame has SGL allocated after first 64 bytes. For
5491  	 * this frame - since we don't need any SGL - we use SGL's space as
5492  	 * queue info structure
5493  	 *
5494  	 * We will not get a NULL command below. We just created the pool.
5495  	 */
5496  	cmd = megasas_get_cmd(instance);
5497  
5498  	init_frame = (struct megasas_init_frame *)cmd->frame;
5499  	initq_info = (struct megasas_init_queue_info *)
5500  		((unsigned long)init_frame + 64);
5501  
5502  	init_frame_h = cmd->frame_phys_addr;
5503  	initq_info_h = init_frame_h + 64;
5504  
5505  	context = init_frame->context;
5506  	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5507  	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5508  	init_frame->context = context;
5509  
5510  	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5511  	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5512  
5513  	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5514  	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5515  
5516  	init_frame->cmd = MFI_CMD_INIT;
5517  	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5518  	init_frame->queue_info_new_phys_addr_lo =
5519  		cpu_to_le32(lower_32_bits(initq_info_h));
5520  	init_frame->queue_info_new_phys_addr_hi =
5521  		cpu_to_le32(upper_32_bits(initq_info_h));
5522  
5523  	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5524  
5525  	/*
5526  	 * disable the intr before firing the init frame to FW
5527  	 */
5528  	instance->instancet->disable_intr(instance);
5529  
5530  	/*
5531  	 * Issue the init frame in polled mode
5532  	 */
5533  
5534  	if (megasas_issue_polled(instance, cmd)) {
5535  		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5536  		megasas_return_cmd(instance, cmd);
5537  		goto fail_fw_init;
5538  	}
5539  
5540  	megasas_return_cmd(instance, cmd);
5541  
5542  	return 0;
5543  
5544  fail_fw_init:
5545  	return -EINVAL;
5546  }
5547  
5548  static u32
megasas_init_adapter_mfi(struct megasas_instance * instance)5549  megasas_init_adapter_mfi(struct megasas_instance *instance)
5550  {
5551  	u32 context_sz;
5552  	u32 reply_q_sz;
5553  
5554  	/*
5555  	 * Get various operational parameters from status register
5556  	 */
5557  	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5558  	/*
5559  	 * Reduce the max supported cmds by 1. This is to ensure that the
5560  	 * reply_q_sz (1 more than the max cmd that driver may send)
5561  	 * does not exceed max cmds that the FW can support
5562  	 */
5563  	instance->max_fw_cmds = instance->max_fw_cmds-1;
5564  	instance->max_mfi_cmds = instance->max_fw_cmds;
5565  	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5566  					0x10;
5567  	/*
5568  	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5569  	 * are reserved for IOCTL + driver's internal DCMDs.
5570  	 */
5571  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5572  		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5573  		instance->max_scsi_cmds = (instance->max_fw_cmds -
5574  			MEGASAS_SKINNY_INT_CMDS);
5575  		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5576  	} else {
5577  		instance->max_scsi_cmds = (instance->max_fw_cmds -
5578  			MEGASAS_INT_CMDS);
5579  		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5580  	}
5581  
5582  	instance->cur_can_queue = instance->max_scsi_cmds;
5583  	/*
5584  	 * Create a pool of commands
5585  	 */
5586  	if (megasas_alloc_cmds(instance))
5587  		goto fail_alloc_cmds;
5588  
5589  	/*
5590  	 * Allocate memory for reply queue. Length of reply queue should
5591  	 * be _one_ more than the maximum commands handled by the firmware.
5592  	 *
5593  	 * Note: When FW completes commands, it places corresponding contex
5594  	 * values in this circular reply queue. This circular queue is a fairly
5595  	 * typical producer-consumer queue. FW is the producer (of completed
5596  	 * commands) and the driver is the consumer.
5597  	 */
5598  	context_sz = sizeof(u32);
5599  	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5600  
5601  	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5602  			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5603  
5604  	if (!instance->reply_queue) {
5605  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5606  		goto fail_reply_queue;
5607  	}
5608  
5609  	if (megasas_issue_init_mfi(instance))
5610  		goto fail_fw_init;
5611  
5612  	if (megasas_get_ctrl_info(instance)) {
5613  		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5614  			"Fail from %s %d\n", instance->unique_id,
5615  			__func__, __LINE__);
5616  		goto fail_fw_init;
5617  	}
5618  
5619  	instance->fw_support_ieee = 0;
5620  	instance->fw_support_ieee =
5621  		(instance->instancet->read_fw_status_reg(instance) &
5622  		0x04000000);
5623  
5624  	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5625  			instance->fw_support_ieee);
5626  
5627  	if (instance->fw_support_ieee)
5628  		instance->flag_ieee = 1;
5629  
5630  	return 0;
5631  
5632  fail_fw_init:
5633  
5634  	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5635  			    instance->reply_queue, instance->reply_queue_h);
5636  fail_reply_queue:
5637  	megasas_free_cmds(instance);
5638  
5639  fail_alloc_cmds:
5640  	return 1;
5641  }
5642  
5643  static
megasas_setup_irq_poll(struct megasas_instance * instance)5644  void megasas_setup_irq_poll(struct megasas_instance *instance)
5645  {
5646  	struct megasas_irq_context *irq_ctx;
5647  	u32 count, i;
5648  
5649  	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5650  
5651  	/* Initialize IRQ poll */
5652  	for (i = 0; i < count; i++) {
5653  		irq_ctx = &instance->irq_context[i];
5654  		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5655  		irq_ctx->irq_poll_scheduled = false;
5656  		irq_poll_init(&irq_ctx->irqpoll,
5657  			      instance->threshold_reply_count,
5658  			      megasas_irqpoll);
5659  	}
5660  }
5661  
5662  /*
5663   * megasas_setup_irqs_ioapic -		register legacy interrupts.
5664   * @instance:				Adapter soft state
5665   *
5666   * Do not enable interrupt, only setup ISRs.
5667   *
5668   * Return 0 on success.
5669   */
5670  static int
megasas_setup_irqs_ioapic(struct megasas_instance * instance)5671  megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5672  {
5673  	struct pci_dev *pdev;
5674  
5675  	pdev = instance->pdev;
5676  	instance->irq_context[0].instance = instance;
5677  	instance->irq_context[0].MSIxIndex = 0;
5678  	snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5679  		"megasas", instance->host->host_no);
5680  	if (request_irq(pci_irq_vector(pdev, 0),
5681  			instance->instancet->service_isr, IRQF_SHARED,
5682  			instance->irq_context->name, &instance->irq_context[0])) {
5683  		dev_err(&instance->pdev->dev,
5684  				"Failed to register IRQ from %s %d\n",
5685  				__func__, __LINE__);
5686  		return -1;
5687  	}
5688  	instance->perf_mode = MR_LATENCY_PERF_MODE;
5689  	instance->low_latency_index_start = 0;
5690  	return 0;
5691  }
5692  
5693  /**
5694   * megasas_setup_irqs_msix -		register MSI-x interrupts.
5695   * @instance:				Adapter soft state
5696   * @is_probe:				Driver probe check
5697   *
5698   * Do not enable interrupt, only setup ISRs.
5699   *
5700   * Return 0 on success.
5701   */
5702  static int
megasas_setup_irqs_msix(struct megasas_instance * instance,u8 is_probe)5703  megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5704  {
5705  	int i, j;
5706  	struct pci_dev *pdev;
5707  
5708  	pdev = instance->pdev;
5709  
5710  	/* Try MSI-x */
5711  	for (i = 0; i < instance->msix_vectors; i++) {
5712  		instance->irq_context[i].instance = instance;
5713  		instance->irq_context[i].MSIxIndex = i;
5714  		snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5715  			"megasas", instance->host->host_no, i);
5716  		if (request_irq(pci_irq_vector(pdev, i),
5717  			instance->instancet->service_isr, 0, instance->irq_context[i].name,
5718  			&instance->irq_context[i])) {
5719  			dev_err(&instance->pdev->dev,
5720  				"Failed to register IRQ for vector %d.\n", i);
5721  			for (j = 0; j < i; j++) {
5722  				if (j < instance->low_latency_index_start)
5723  					irq_update_affinity_hint(
5724  						pci_irq_vector(pdev, j), NULL);
5725  				free_irq(pci_irq_vector(pdev, j),
5726  					 &instance->irq_context[j]);
5727  			}
5728  			/* Retry irq register for IO_APIC*/
5729  			instance->msix_vectors = 0;
5730  			instance->msix_load_balance = false;
5731  			if (is_probe) {
5732  				pci_free_irq_vectors(instance->pdev);
5733  				return megasas_setup_irqs_ioapic(instance);
5734  			} else {
5735  				return -1;
5736  			}
5737  		}
5738  	}
5739  
5740  	return 0;
5741  }
5742  
5743  /*
5744   * megasas_destroy_irqs-		unregister interrupts.
5745   * @instance:				Adapter soft state
5746   * return:				void
5747   */
5748  static void
megasas_destroy_irqs(struct megasas_instance * instance)5749  megasas_destroy_irqs(struct megasas_instance *instance) {
5750  
5751  	int i;
5752  	int count;
5753  	struct megasas_irq_context *irq_ctx;
5754  
5755  	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5756  	if (instance->adapter_type != MFI_SERIES) {
5757  		for (i = 0; i < count; i++) {
5758  			irq_ctx = &instance->irq_context[i];
5759  			irq_poll_disable(&irq_ctx->irqpoll);
5760  		}
5761  	}
5762  
5763  	if (instance->msix_vectors)
5764  		for (i = 0; i < instance->msix_vectors; i++) {
5765  			if (i < instance->low_latency_index_start)
5766  				irq_update_affinity_hint(
5767  				    pci_irq_vector(instance->pdev, i), NULL);
5768  			free_irq(pci_irq_vector(instance->pdev, i),
5769  				 &instance->irq_context[i]);
5770  		}
5771  	else
5772  		free_irq(pci_irq_vector(instance->pdev, 0),
5773  			 &instance->irq_context[0]);
5774  }
5775  
5776  /**
5777   * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5778   * @instance:				Adapter soft state
5779   *
5780   * Return 0 on success.
5781   */
5782  void
megasas_setup_jbod_map(struct megasas_instance * instance)5783  megasas_setup_jbod_map(struct megasas_instance *instance)
5784  {
5785  	int i;
5786  	struct fusion_context *fusion = instance->ctrl_context;
5787  	size_t pd_seq_map_sz;
5788  
5789  	pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq,
5790  				      MAX_PHYSICAL_DEVICES);
5791  
5792  	instance->use_seqnum_jbod_fp =
5793  		instance->support_seqnum_jbod_fp;
5794  	if (reset_devices || !fusion ||
5795  		!instance->support_seqnum_jbod_fp) {
5796  		dev_info(&instance->pdev->dev,
5797  			"JBOD sequence map is disabled %s %d\n",
5798  			__func__, __LINE__);
5799  		instance->use_seqnum_jbod_fp = false;
5800  		return;
5801  	}
5802  
5803  	if (fusion->pd_seq_sync[0])
5804  		goto skip_alloc;
5805  
5806  	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5807  		fusion->pd_seq_sync[i] = dma_alloc_coherent
5808  			(&instance->pdev->dev, pd_seq_map_sz,
5809  			&fusion->pd_seq_phys[i], GFP_KERNEL);
5810  		if (!fusion->pd_seq_sync[i]) {
5811  			dev_err(&instance->pdev->dev,
5812  				"Failed to allocate memory from %s %d\n",
5813  				__func__, __LINE__);
5814  			if (i == 1) {
5815  				dma_free_coherent(&instance->pdev->dev,
5816  					pd_seq_map_sz, fusion->pd_seq_sync[0],
5817  					fusion->pd_seq_phys[0]);
5818  				fusion->pd_seq_sync[0] = NULL;
5819  			}
5820  			instance->use_seqnum_jbod_fp = false;
5821  			return;
5822  		}
5823  	}
5824  
5825  skip_alloc:
5826  	if (!megasas_sync_pd_seq_num(instance, false) &&
5827  		!megasas_sync_pd_seq_num(instance, true))
5828  		instance->use_seqnum_jbod_fp = true;
5829  	else
5830  		instance->use_seqnum_jbod_fp = false;
5831  }
5832  
megasas_setup_reply_map(struct megasas_instance * instance)5833  static void megasas_setup_reply_map(struct megasas_instance *instance)
5834  {
5835  	const struct cpumask *mask;
5836  	unsigned int queue, cpu, low_latency_index_start;
5837  
5838  	low_latency_index_start = instance->low_latency_index_start;
5839  
5840  	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5841  		mask = pci_irq_get_affinity(instance->pdev, queue);
5842  		if (!mask)
5843  			goto fallback;
5844  
5845  		for_each_cpu(cpu, mask)
5846  			instance->reply_map[cpu] = queue;
5847  	}
5848  	return;
5849  
5850  fallback:
5851  	queue = low_latency_index_start;
5852  	for_each_possible_cpu(cpu) {
5853  		instance->reply_map[cpu] = queue;
5854  		if (queue == (instance->msix_vectors - 1))
5855  			queue = low_latency_index_start;
5856  		else
5857  			queue++;
5858  	}
5859  }
5860  
5861  /**
5862   * megasas_get_device_list -	Get the PD and LD device list from FW.
5863   * @instance:			Adapter soft state
5864   * @return:			Success or failure
5865   *
5866   * Issue DCMDs to Firmware to get the PD and LD list.
5867   * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5868   * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5869   */
5870  static
megasas_get_device_list(struct megasas_instance * instance)5871  int megasas_get_device_list(struct megasas_instance *instance)
5872  {
5873  	if (instance->enable_fw_dev_list) {
5874  		if (megasas_host_device_list_query(instance, true))
5875  			return FAILED;
5876  	} else {
5877  		if (megasas_get_pd_list(instance) < 0) {
5878  			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5879  			return FAILED;
5880  		}
5881  
5882  		if (megasas_ld_list_query(instance,
5883  					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5884  			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5885  			return FAILED;
5886  		}
5887  	}
5888  
5889  	return SUCCESS;
5890  }
5891  
5892  /**
5893   * megasas_set_high_iops_queue_affinity_and_hint -	Set affinity and hint
5894   *							for high IOPS queues
5895   * @instance:						Adapter soft state
5896   * return:						void
5897   */
5898  static inline void
megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance * instance)5899  megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
5900  {
5901  	int i;
5902  	unsigned int irq;
5903  	const struct cpumask *mask;
5904  
5905  	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5906  		mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
5907  
5908  		for (i = 0; i < instance->low_latency_index_start; i++) {
5909  			irq = pci_irq_vector(instance->pdev, i);
5910  			irq_set_affinity_and_hint(irq, mask);
5911  		}
5912  	}
5913  }
5914  
5915  static int
__megasas_alloc_irq_vectors(struct megasas_instance * instance)5916  __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5917  {
5918  	int i, irq_flags;
5919  	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5920  	struct irq_affinity *descp = &desc;
5921  
5922  	irq_flags = PCI_IRQ_MSIX;
5923  
5924  	if (instance->smp_affinity_enable)
5925  		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
5926  	else
5927  		descp = NULL;
5928  
5929  	/* Do not allocate msix vectors for poll_queues.
5930  	 * msix_vectors is always within a range of FW supported reply queue.
5931  	 */
5932  	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5933  		instance->low_latency_index_start,
5934  		instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp);
5935  
5936  	return i;
5937  }
5938  
5939  /**
5940   * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5941   * @instance:			Adapter soft state
5942   * return:			void
5943   */
5944  static void
megasas_alloc_irq_vectors(struct megasas_instance * instance)5945  megasas_alloc_irq_vectors(struct megasas_instance *instance)
5946  {
5947  	int i;
5948  	unsigned int num_msix_req;
5949  
5950  	instance->iopoll_q_count = 0;
5951  	if ((instance->adapter_type != MFI_SERIES) &&
5952  		poll_queues) {
5953  
5954  		instance->perf_mode = MR_LATENCY_PERF_MODE;
5955  		instance->low_latency_index_start = 1;
5956  
5957  		/* reserve for default and non-mananged pre-vector. */
5958  		if (instance->msix_vectors > (poll_queues + 2))
5959  			instance->iopoll_q_count = poll_queues;
5960  		else
5961  			instance->iopoll_q_count = 0;
5962  
5963  		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5964  		instance->msix_vectors = min(num_msix_req,
5965  				instance->msix_vectors);
5966  
5967  	}
5968  
5969  	i = __megasas_alloc_irq_vectors(instance);
5970  
5971  	if (((instance->perf_mode == MR_BALANCED_PERF_MODE)
5972  		|| instance->iopoll_q_count) &&
5973  	    (i != (instance->msix_vectors - instance->iopoll_q_count))) {
5974  		if (instance->msix_vectors)
5975  			pci_free_irq_vectors(instance->pdev);
5976  		/* Disable Balanced IOPS mode and try realloc vectors */
5977  		instance->perf_mode = MR_LATENCY_PERF_MODE;
5978  		instance->low_latency_index_start = 1;
5979  		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5980  
5981  		instance->msix_vectors = min(num_msix_req,
5982  				instance->msix_vectors);
5983  
5984  		instance->iopoll_q_count = 0;
5985  		i = __megasas_alloc_irq_vectors(instance);
5986  
5987  	}
5988  
5989  	dev_info(&instance->pdev->dev,
5990  		"requested/available msix %d/%d poll_queue %d\n",
5991  			instance->msix_vectors - instance->iopoll_q_count,
5992  			i, instance->iopoll_q_count);
5993  
5994  	if (i > 0)
5995  		instance->msix_vectors = i;
5996  	else
5997  		instance->msix_vectors = 0;
5998  
5999  	if (instance->smp_affinity_enable)
6000  		megasas_set_high_iops_queue_affinity_and_hint(instance);
6001  }
6002  
6003  /**
6004   * megasas_init_fw -	Initializes the FW
6005   * @instance:		Adapter soft state
6006   *
6007   * This is the main function for initializing firmware
6008   */
6009  
megasas_init_fw(struct megasas_instance * instance)6010  static int megasas_init_fw(struct megasas_instance *instance)
6011  {
6012  	u32 max_sectors_1;
6013  	u32 max_sectors_2, tmp_sectors, msix_enable;
6014  	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
6015  	resource_size_t base_addr;
6016  	void *base_addr_phys;
6017  	struct megasas_ctrl_info *ctrl_info = NULL;
6018  	unsigned long bar_list;
6019  	int i, j, loop;
6020  	struct IOV_111 *iovPtr;
6021  	struct fusion_context *fusion;
6022  	bool intr_coalescing;
6023  	unsigned int num_msix_req;
6024  	u16 lnksta, speed;
6025  
6026  	fusion = instance->ctrl_context;
6027  
6028  	/* Find first memory bar */
6029  	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
6030  	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
6031  	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
6032  					 "megasas: LSI")) {
6033  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
6034  		return -EBUSY;
6035  	}
6036  
6037  	base_addr = pci_resource_start(instance->pdev, instance->bar);
6038  	instance->reg_set = ioremap(base_addr, 8192);
6039  
6040  	if (!instance->reg_set) {
6041  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
6042  		goto fail_ioremap;
6043  	}
6044  
6045  	base_addr_phys = &base_addr;
6046  	dev_printk(KERN_DEBUG, &instance->pdev->dev,
6047  		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
6048  		   instance->bar, base_addr_phys, instance->reg_set);
6049  
6050  	if (instance->adapter_type != MFI_SERIES)
6051  		instance->instancet = &megasas_instance_template_fusion;
6052  	else {
6053  		switch (instance->pdev->device) {
6054  		case PCI_DEVICE_ID_LSI_SAS1078R:
6055  		case PCI_DEVICE_ID_LSI_SAS1078DE:
6056  			instance->instancet = &megasas_instance_template_ppc;
6057  			break;
6058  		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
6059  		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
6060  			instance->instancet = &megasas_instance_template_gen2;
6061  			break;
6062  		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
6063  		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
6064  			instance->instancet = &megasas_instance_template_skinny;
6065  			break;
6066  		case PCI_DEVICE_ID_LSI_SAS1064R:
6067  		case PCI_DEVICE_ID_DELL_PERC5:
6068  		default:
6069  			instance->instancet = &megasas_instance_template_xscale;
6070  			instance->pd_list_not_supported = 1;
6071  			break;
6072  		}
6073  	}
6074  
6075  	if (megasas_transition_to_ready(instance, 0)) {
6076  		dev_info(&instance->pdev->dev,
6077  			 "Failed to transition controller to ready from %s!\n",
6078  			 __func__);
6079  		if (instance->adapter_type != MFI_SERIES) {
6080  			status_reg = instance->instancet->read_fw_status_reg(
6081  					instance);
6082  			if (status_reg & MFI_RESET_ADAPTER) {
6083  				if (megasas_adp_reset_wait_for_ready
6084  					(instance, true, 0) == FAILED)
6085  					goto fail_ready_state;
6086  			} else {
6087  				goto fail_ready_state;
6088  			}
6089  		} else {
6090  			atomic_set(&instance->fw_reset_no_pci_access, 1);
6091  			instance->instancet->adp_reset
6092  				(instance, instance->reg_set);
6093  			atomic_set(&instance->fw_reset_no_pci_access, 0);
6094  
6095  			/*waiting for about 30 second before retry*/
6096  			ssleep(30);
6097  
6098  			if (megasas_transition_to_ready(instance, 0))
6099  				goto fail_ready_state;
6100  		}
6101  
6102  		dev_info(&instance->pdev->dev,
6103  			 "FW restarted successfully from %s!\n",
6104  			 __func__);
6105  	}
6106  
6107  	megasas_init_ctrl_params(instance);
6108  
6109  	if (megasas_set_dma_mask(instance))
6110  		goto fail_ready_state;
6111  
6112  	if (megasas_alloc_ctrl_mem(instance))
6113  		goto fail_alloc_dma_buf;
6114  
6115  	if (megasas_alloc_ctrl_dma_buffers(instance))
6116  		goto fail_alloc_dma_buf;
6117  
6118  	fusion = instance->ctrl_context;
6119  
6120  	if (instance->adapter_type >= VENTURA_SERIES) {
6121  		scratch_pad_2 =
6122  			megasas_readl(instance,
6123  				      &instance->reg_set->outbound_scratch_pad_2);
6124  		instance->max_raid_mapsize = ((scratch_pad_2 >>
6125  			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
6126  			MR_MAX_RAID_MAP_SIZE_MASK);
6127  	}
6128  
6129  	instance->enable_sdev_max_qd = enable_sdev_max_qd;
6130  
6131  	switch (instance->adapter_type) {
6132  	case VENTURA_SERIES:
6133  		fusion->pcie_bw_limitation = true;
6134  		break;
6135  	case AERO_SERIES:
6136  		fusion->r56_div_offload = true;
6137  		break;
6138  	default:
6139  		break;
6140  	}
6141  
6142  	/* Check if MSI-X is supported while in ready state */
6143  	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
6144  		       0x4000000) >> 0x1a;
6145  	if (msix_enable && !msix_disable) {
6146  
6147  		scratch_pad_1 = megasas_readl
6148  			(instance, &instance->reg_set->outbound_scratch_pad_1);
6149  		/* Check max MSI-X vectors */
6150  		if (fusion) {
6151  			if (instance->adapter_type == THUNDERBOLT_SERIES) {
6152  				/* Thunderbolt Series*/
6153  				instance->msix_vectors = (scratch_pad_1
6154  					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6155  			} else {
6156  				instance->msix_vectors = ((scratch_pad_1
6157  					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6158  					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6159  
6160  				/*
6161  				 * For Invader series, > 8 MSI-x vectors
6162  				 * supported by FW/HW implies combined
6163  				 * reply queue mode is enabled.
6164  				 * For Ventura series, > 16 MSI-x vectors
6165  				 * supported by FW/HW implies combined
6166  				 * reply queue mode is enabled.
6167  				 */
6168  				switch (instance->adapter_type) {
6169  				case INVADER_SERIES:
6170  					if (instance->msix_vectors > 8)
6171  						instance->msix_combined = true;
6172  					break;
6173  				case AERO_SERIES:
6174  				case VENTURA_SERIES:
6175  					if (instance->msix_vectors > 16)
6176  						instance->msix_combined = true;
6177  					break;
6178  				}
6179  
6180  				if (rdpq_enable)
6181  					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6182  								1 : 0;
6183  
6184  				if (instance->adapter_type >= INVADER_SERIES &&
6185  				    !instance->msix_combined) {
6186  					instance->msix_load_balance = true;
6187  					instance->smp_affinity_enable = false;
6188  				}
6189  
6190  				/* Save 1-15 reply post index address to local memory
6191  				 * Index 0 is already saved from reg offset
6192  				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6193  				 */
6194  				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6195  					instance->reply_post_host_index_addr[loop] =
6196  						(u32 __iomem *)
6197  						((u8 __iomem *)instance->reg_set +
6198  						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6199  						+ (loop * 0x10));
6200  				}
6201  			}
6202  
6203  			dev_info(&instance->pdev->dev,
6204  				 "firmware supports msix\t: (%d)",
6205  				 instance->msix_vectors);
6206  			if (msix_vectors)
6207  				instance->msix_vectors = min(msix_vectors,
6208  					instance->msix_vectors);
6209  		} else /* MFI adapters */
6210  			instance->msix_vectors = 1;
6211  
6212  
6213  		/*
6214  		 * For Aero (if some conditions are met), driver will configure a
6215  		 * few additional reply queues with interrupt coalescing enabled.
6216  		 * These queues with interrupt coalescing enabled are called
6217  		 * High IOPS queues and rest of reply queues (based on number of
6218  		 * logical CPUs) are termed as Low latency queues.
6219  		 *
6220  		 * Total Number of reply queues = High IOPS queues + low latency queues
6221  		 *
6222  		 * For rest of fusion adapters, 1 additional reply queue will be
6223  		 * reserved for management commands, rest of reply queues
6224  		 * (based on number of logical CPUs) will be used for IOs and
6225  		 * referenced as IO queues.
6226  		 * Total Number of reply queues = 1 + IO queues
6227  		 *
6228  		 * MFI adapters supports single MSI-x so single reply queue
6229  		 * will be used for IO and management commands.
6230  		 */
6231  
6232  		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6233  								true : false;
6234  		if (intr_coalescing &&
6235  			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6236  			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6237  			instance->perf_mode = MR_BALANCED_PERF_MODE;
6238  		else
6239  			instance->perf_mode = MR_LATENCY_PERF_MODE;
6240  
6241  
6242  		if (instance->adapter_type == AERO_SERIES) {
6243  			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6244  			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6245  
6246  			/*
6247  			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6248  			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6249  			 */
6250  			if (speed < 0x4) {
6251  				instance->perf_mode = MR_LATENCY_PERF_MODE;
6252  				fusion->pcie_bw_limitation = true;
6253  			}
6254  
6255  			/*
6256  			 * Performance mode settings provided through module parameter-perf_mode will
6257  			 * take affect only for:
6258  			 * 1. Aero family of adapters.
6259  			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6260  			 */
6261  			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6262  				(perf_mode <= MR_LATENCY_PERF_MODE))
6263  				instance->perf_mode = perf_mode;
6264  			/*
6265  			 * If intr coalescing is not supported by controller FW, then IOPS
6266  			 * and Balanced modes are not feasible.
6267  			 */
6268  			if (!intr_coalescing)
6269  				instance->perf_mode = MR_LATENCY_PERF_MODE;
6270  
6271  		}
6272  
6273  		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6274  			instance->low_latency_index_start =
6275  				MR_HIGH_IOPS_QUEUE_COUNT;
6276  		else
6277  			instance->low_latency_index_start = 1;
6278  
6279  		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6280  
6281  		instance->msix_vectors = min(num_msix_req,
6282  				instance->msix_vectors);
6283  
6284  		megasas_alloc_irq_vectors(instance);
6285  		if (!instance->msix_vectors)
6286  			instance->msix_load_balance = false;
6287  	}
6288  	/*
6289  	 * MSI-X host index 0 is common for all adapter.
6290  	 * It is used for all MPT based Adapters.
6291  	 */
6292  	if (instance->msix_combined) {
6293  		instance->reply_post_host_index_addr[0] =
6294  				(u32 *)((u8 *)instance->reg_set +
6295  				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6296  	} else {
6297  		instance->reply_post_host_index_addr[0] =
6298  			(u32 *)((u8 *)instance->reg_set +
6299  			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6300  	}
6301  
6302  	if (!instance->msix_vectors) {
6303  		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6304  		if (i < 0)
6305  			goto fail_init_adapter;
6306  	}
6307  
6308  	megasas_setup_reply_map(instance);
6309  
6310  	dev_info(&instance->pdev->dev,
6311  		"current msix/online cpus\t: (%d/%d)\n",
6312  		instance->msix_vectors, (unsigned int)num_online_cpus());
6313  	dev_info(&instance->pdev->dev,
6314  		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6315  
6316  	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6317  		(unsigned long)instance);
6318  
6319  	/*
6320  	 * Below are default value for legacy Firmware.
6321  	 * non-fusion based controllers
6322  	 */
6323  	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6324  	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6325  	/* Get operational params, sge flags, send init cmd to controller */
6326  	if (instance->instancet->init_adapter(instance))
6327  		goto fail_init_adapter;
6328  
6329  	if (instance->adapter_type >= VENTURA_SERIES) {
6330  		scratch_pad_3 =
6331  			megasas_readl(instance,
6332  				      &instance->reg_set->outbound_scratch_pad_3);
6333  		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6334  			MR_DEFAULT_NVME_PAGE_SHIFT)
6335  			instance->nvme_page_size =
6336  				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6337  
6338  		dev_info(&instance->pdev->dev,
6339  			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6340  	}
6341  
6342  	if (instance->msix_vectors ?
6343  		megasas_setup_irqs_msix(instance, 1) :
6344  		megasas_setup_irqs_ioapic(instance))
6345  		goto fail_init_adapter;
6346  
6347  	if (instance->adapter_type != MFI_SERIES)
6348  		megasas_setup_irq_poll(instance);
6349  
6350  	instance->instancet->enable_intr(instance);
6351  
6352  	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6353  
6354  	megasas_setup_jbod_map(instance);
6355  
6356  	if (megasas_get_device_list(instance) != SUCCESS) {
6357  		dev_err(&instance->pdev->dev,
6358  			"%s: megasas_get_device_list failed\n",
6359  			__func__);
6360  		goto fail_get_ld_pd_list;
6361  	}
6362  
6363  	/* stream detection initialization */
6364  	if (instance->adapter_type >= VENTURA_SERIES) {
6365  		fusion->stream_detect_by_ld =
6366  			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6367  				sizeof(struct LD_STREAM_DETECT *),
6368  				GFP_KERNEL);
6369  		if (!fusion->stream_detect_by_ld) {
6370  			dev_err(&instance->pdev->dev,
6371  				"unable to allocate stream detection for pool of LDs\n");
6372  			goto fail_get_ld_pd_list;
6373  		}
6374  		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6375  			fusion->stream_detect_by_ld[i] =
6376  				kzalloc(sizeof(struct LD_STREAM_DETECT),
6377  				GFP_KERNEL);
6378  			if (!fusion->stream_detect_by_ld[i]) {
6379  				dev_err(&instance->pdev->dev,
6380  					"unable to allocate stream detect by LD\n ");
6381  				for (j = 0; j < i; ++j)
6382  					kfree(fusion->stream_detect_by_ld[j]);
6383  				kfree(fusion->stream_detect_by_ld);
6384  				fusion->stream_detect_by_ld = NULL;
6385  				goto fail_get_ld_pd_list;
6386  			}
6387  			fusion->stream_detect_by_ld[i]->mru_bit_map
6388  				= MR_STREAM_BITMAP;
6389  		}
6390  	}
6391  
6392  	/*
6393  	 * Compute the max allowed sectors per IO: The controller info has two
6394  	 * limits on max sectors. Driver should use the minimum of these two.
6395  	 *
6396  	 * 1 << stripe_sz_ops.min = max sectors per strip
6397  	 *
6398  	 * Note that older firmwares ( < FW ver 30) didn't report information
6399  	 * to calculate max_sectors_1. So the number ended up as zero always.
6400  	 */
6401  	tmp_sectors = 0;
6402  	ctrl_info = instance->ctrl_info_buf;
6403  
6404  	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6405  		le16_to_cpu(ctrl_info->max_strips_per_io);
6406  	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6407  
6408  	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6409  
6410  	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6411  	instance->passive = ctrl_info->cluster.passive;
6412  	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6413  	instance->UnevenSpanSupport =
6414  		ctrl_info->adapterOperations2.supportUnevenSpans;
6415  	if (instance->UnevenSpanSupport) {
6416  		struct fusion_context *fusion = instance->ctrl_context;
6417  		if (MR_ValidateMapInfo(instance, instance->map_id))
6418  			fusion->fast_path_io = 1;
6419  		else
6420  			fusion->fast_path_io = 0;
6421  
6422  	}
6423  	if (ctrl_info->host_interface.SRIOV) {
6424  		instance->requestorId = ctrl_info->iov.requestorId;
6425  		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6426  			if (!ctrl_info->adapterOperations2.activePassive)
6427  			    instance->PlasmaFW111 = 1;
6428  
6429  			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6430  			    instance->PlasmaFW111 ? "1.11" : "new");
6431  
6432  			if (instance->PlasmaFW111) {
6433  			    iovPtr = (struct IOV_111 *)
6434  				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6435  			    instance->requestorId = iovPtr->requestorId;
6436  			}
6437  		}
6438  		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6439  			instance->requestorId);
6440  	}
6441  
6442  	instance->crash_dump_fw_support =
6443  		ctrl_info->adapterOperations3.supportCrashDump;
6444  	instance->crash_dump_drv_support =
6445  		(instance->crash_dump_fw_support &&
6446  		instance->crash_dump_buf);
6447  	if (instance->crash_dump_drv_support)
6448  		megasas_set_crash_dump_params(instance,
6449  			MR_CRASH_BUF_TURN_OFF);
6450  
6451  	else {
6452  		if (instance->crash_dump_buf)
6453  			dma_free_coherent(&instance->pdev->dev,
6454  				CRASH_DMA_BUF_SIZE,
6455  				instance->crash_dump_buf,
6456  				instance->crash_dump_h);
6457  		instance->crash_dump_buf = NULL;
6458  	}
6459  
6460  	if (instance->snapdump_wait_time) {
6461  		megasas_get_snapdump_properties(instance);
6462  		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6463  			 instance->snapdump_wait_time);
6464  	}
6465  
6466  	dev_info(&instance->pdev->dev,
6467  		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6468  		le16_to_cpu(ctrl_info->pci.vendor_id),
6469  		le16_to_cpu(ctrl_info->pci.device_id),
6470  		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6471  		le16_to_cpu(ctrl_info->pci.sub_device_id));
6472  	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6473  		instance->UnevenSpanSupport ? "yes" : "no");
6474  	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6475  		instance->crash_dump_drv_support ? "yes" : "no");
6476  	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6477  		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6478  
6479  	instance->max_sectors_per_req = instance->max_num_sge *
6480  						SGE_BUFFER_SIZE / 512;
6481  	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6482  		instance->max_sectors_per_req = tmp_sectors;
6483  
6484  	/* Check for valid throttlequeuedepth module parameter */
6485  	if (throttlequeuedepth &&
6486  			throttlequeuedepth <= instance->max_scsi_cmds)
6487  		instance->throttlequeuedepth = throttlequeuedepth;
6488  	else
6489  		instance->throttlequeuedepth =
6490  				MEGASAS_THROTTLE_QUEUE_DEPTH;
6491  
6492  	if ((resetwaittime < 1) ||
6493  	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6494  		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6495  
6496  	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6497  		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6498  
6499  	/* Launch SR-IOV heartbeat timer */
6500  	if (instance->requestorId) {
6501  		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6502  			megasas_start_timer(instance);
6503  		} else {
6504  			instance->skip_heartbeat_timer_del = 1;
6505  			goto fail_get_ld_pd_list;
6506  		}
6507  	}
6508  
6509  	/*
6510  	 * Create and start watchdog thread which will monitor
6511  	 * controller state every 1 sec and trigger OCR when
6512  	 * it enters fault state
6513  	 */
6514  	if (instance->adapter_type != MFI_SERIES)
6515  		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6516  			goto fail_start_watchdog;
6517  
6518  	return 0;
6519  
6520  fail_start_watchdog:
6521  	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6522  		del_timer_sync(&instance->sriov_heartbeat_timer);
6523  fail_get_ld_pd_list:
6524  	instance->instancet->disable_intr(instance);
6525  	megasas_destroy_irqs(instance);
6526  fail_init_adapter:
6527  	if (instance->msix_vectors)
6528  		pci_free_irq_vectors(instance->pdev);
6529  	instance->msix_vectors = 0;
6530  fail_alloc_dma_buf:
6531  	megasas_free_ctrl_dma_buffers(instance);
6532  	megasas_free_ctrl_mem(instance);
6533  fail_ready_state:
6534  	iounmap(instance->reg_set);
6535  
6536  fail_ioremap:
6537  	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6538  
6539  	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6540  		__func__, __LINE__);
6541  	return -EINVAL;
6542  }
6543  
6544  /**
6545   * megasas_release_mfi -	Reverses the FW initialization
6546   * @instance:			Adapter soft state
6547   */
megasas_release_mfi(struct megasas_instance * instance)6548  static void megasas_release_mfi(struct megasas_instance *instance)
6549  {
6550  	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6551  
6552  	if (instance->reply_queue)
6553  		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6554  			    instance->reply_queue, instance->reply_queue_h);
6555  
6556  	megasas_free_cmds(instance);
6557  
6558  	iounmap(instance->reg_set);
6559  
6560  	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6561  }
6562  
6563  /**
6564   * megasas_get_seq_num -	Gets latest event sequence numbers
6565   * @instance:			Adapter soft state
6566   * @eli:			FW event log sequence numbers information
6567   *
6568   * FW maintains a log of all events in a non-volatile area. Upper layers would
6569   * usually find out the latest sequence number of the events, the seq number at
6570   * the boot etc. They would "read" all the events below the latest seq number
6571   * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6572   * number), they would subsribe to AEN (asynchronous event notification) and
6573   * wait for the events to happen.
6574   */
6575  static int
megasas_get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)6576  megasas_get_seq_num(struct megasas_instance *instance,
6577  		    struct megasas_evt_log_info *eli)
6578  {
6579  	struct megasas_cmd *cmd;
6580  	struct megasas_dcmd_frame *dcmd;
6581  	struct megasas_evt_log_info *el_info;
6582  	dma_addr_t el_info_h = 0;
6583  	int ret;
6584  
6585  	cmd = megasas_get_cmd(instance);
6586  
6587  	if (!cmd) {
6588  		return -ENOMEM;
6589  	}
6590  
6591  	dcmd = &cmd->frame->dcmd;
6592  	el_info = dma_alloc_coherent(&instance->pdev->dev,
6593  				     sizeof(struct megasas_evt_log_info),
6594  				     &el_info_h, GFP_KERNEL);
6595  	if (!el_info) {
6596  		megasas_return_cmd(instance, cmd);
6597  		return -ENOMEM;
6598  	}
6599  
6600  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6601  
6602  	dcmd->cmd = MFI_CMD_DCMD;
6603  	dcmd->cmd_status = 0x0;
6604  	dcmd->sge_count = 1;
6605  	dcmd->flags = MFI_FRAME_DIR_READ;
6606  	dcmd->timeout = 0;
6607  	dcmd->pad_0 = 0;
6608  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6609  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6610  
6611  	megasas_set_dma_settings(instance, dcmd, el_info_h,
6612  				 sizeof(struct megasas_evt_log_info));
6613  
6614  	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6615  	if (ret != DCMD_SUCCESS) {
6616  		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6617  			__func__, __LINE__);
6618  		goto dcmd_failed;
6619  	}
6620  
6621  	/*
6622  	 * Copy the data back into callers buffer
6623  	 */
6624  	eli->newest_seq_num = el_info->newest_seq_num;
6625  	eli->oldest_seq_num = el_info->oldest_seq_num;
6626  	eli->clear_seq_num = el_info->clear_seq_num;
6627  	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6628  	eli->boot_seq_num = el_info->boot_seq_num;
6629  
6630  dcmd_failed:
6631  	dma_free_coherent(&instance->pdev->dev,
6632  			sizeof(struct megasas_evt_log_info),
6633  			el_info, el_info_h);
6634  
6635  	megasas_return_cmd(instance, cmd);
6636  
6637  	return ret;
6638  }
6639  
6640  /**
6641   * megasas_register_aen -	Registers for asynchronous event notification
6642   * @instance:			Adapter soft state
6643   * @seq_num:			The starting sequence number
6644   * @class_locale_word:		Class of the event
6645   *
6646   * This function subscribes for AEN for events beyond the @seq_num. It requests
6647   * to be notified if and only if the event is of type @class_locale
6648   */
6649  static int
megasas_register_aen(struct megasas_instance * instance,u32 seq_num,u32 class_locale_word)6650  megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6651  		     u32 class_locale_word)
6652  {
6653  	int ret_val;
6654  	struct megasas_cmd *cmd;
6655  	struct megasas_dcmd_frame *dcmd;
6656  	union megasas_evt_class_locale curr_aen;
6657  	union megasas_evt_class_locale prev_aen;
6658  
6659  	/*
6660  	 * If there an AEN pending already (aen_cmd), check if the
6661  	 * class_locale of that pending AEN is inclusive of the new
6662  	 * AEN request we currently have. If it is, then we don't have
6663  	 * to do anything. In other words, whichever events the current
6664  	 * AEN request is subscribing to, have already been subscribed
6665  	 * to.
6666  	 *
6667  	 * If the old_cmd is _not_ inclusive, then we have to abort
6668  	 * that command, form a class_locale that is superset of both
6669  	 * old and current and re-issue to the FW
6670  	 */
6671  
6672  	curr_aen.word = class_locale_word;
6673  
6674  	if (instance->aen_cmd) {
6675  
6676  		prev_aen.word =
6677  			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6678  
6679  		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6680  		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6681  			dev_info(&instance->pdev->dev,
6682  				 "%s %d out of range class %d send by application\n",
6683  				 __func__, __LINE__, curr_aen.members.class);
6684  			return 0;
6685  		}
6686  
6687  		/*
6688  		 * A class whose enum value is smaller is inclusive of all
6689  		 * higher values. If a PROGRESS (= -1) was previously
6690  		 * registered, then a new registration requests for higher
6691  		 * classes need not be sent to FW. They are automatically
6692  		 * included.
6693  		 *
6694  		 * Locale numbers don't have such hierarchy. They are bitmap
6695  		 * values
6696  		 */
6697  		if ((prev_aen.members.class <= curr_aen.members.class) &&
6698  		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6699  		      curr_aen.members.locale)) {
6700  			/*
6701  			 * Previously issued event registration includes
6702  			 * current request. Nothing to do.
6703  			 */
6704  			return 0;
6705  		} else {
6706  			curr_aen.members.locale |= prev_aen.members.locale;
6707  
6708  			if (prev_aen.members.class < curr_aen.members.class)
6709  				curr_aen.members.class = prev_aen.members.class;
6710  
6711  			instance->aen_cmd->abort_aen = 1;
6712  			ret_val = megasas_issue_blocked_abort_cmd(instance,
6713  								  instance->
6714  								  aen_cmd, 30);
6715  
6716  			if (ret_val) {
6717  				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6718  				       "previous AEN command\n");
6719  				return ret_val;
6720  			}
6721  		}
6722  	}
6723  
6724  	cmd = megasas_get_cmd(instance);
6725  
6726  	if (!cmd)
6727  		return -ENOMEM;
6728  
6729  	dcmd = &cmd->frame->dcmd;
6730  
6731  	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6732  
6733  	/*
6734  	 * Prepare DCMD for aen registration
6735  	 */
6736  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6737  
6738  	dcmd->cmd = MFI_CMD_DCMD;
6739  	dcmd->cmd_status = 0x0;
6740  	dcmd->sge_count = 1;
6741  	dcmd->flags = MFI_FRAME_DIR_READ;
6742  	dcmd->timeout = 0;
6743  	dcmd->pad_0 = 0;
6744  	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6745  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6746  	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6747  	instance->last_seq_num = seq_num;
6748  	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6749  
6750  	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6751  				 sizeof(struct megasas_evt_detail));
6752  
6753  	if (instance->aen_cmd != NULL) {
6754  		megasas_return_cmd(instance, cmd);
6755  		return 0;
6756  	}
6757  
6758  	/*
6759  	 * Store reference to the cmd used to register for AEN. When an
6760  	 * application wants us to register for AEN, we have to abort this
6761  	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6762  	 */
6763  	instance->aen_cmd = cmd;
6764  
6765  	/*
6766  	 * Issue the aen registration frame
6767  	 */
6768  	instance->instancet->issue_dcmd(instance, cmd);
6769  
6770  	return 0;
6771  }
6772  
6773  /* megasas_get_target_prop - Send DCMD with below details to firmware.
6774   *
6775   * This DCMD will fetch few properties of LD/system PD defined
6776   * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6777   *
6778   * DCMD send by drivers whenever new target is added to the OS.
6779   *
6780   * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6781   * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6782   *                       0 = system PD, 1 = LD.
6783   * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6784   * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6785   *
6786   * @instance:		Adapter soft state
6787   * @sdev:		OS provided scsi device
6788   *
6789   * Returns 0 on success non-zero on failure.
6790   */
6791  int
megasas_get_target_prop(struct megasas_instance * instance,struct scsi_device * sdev)6792  megasas_get_target_prop(struct megasas_instance *instance,
6793  			struct scsi_device *sdev)
6794  {
6795  	int ret;
6796  	struct megasas_cmd *cmd;
6797  	struct megasas_dcmd_frame *dcmd;
6798  	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6799  			sdev->id;
6800  
6801  	cmd = megasas_get_cmd(instance);
6802  
6803  	if (!cmd) {
6804  		dev_err(&instance->pdev->dev,
6805  			"Failed to get cmd %s\n", __func__);
6806  		return -ENOMEM;
6807  	}
6808  
6809  	dcmd = &cmd->frame->dcmd;
6810  
6811  	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6812  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6813  	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6814  
6815  	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6816  	dcmd->cmd = MFI_CMD_DCMD;
6817  	dcmd->cmd_status = 0xFF;
6818  	dcmd->sge_count = 1;
6819  	dcmd->flags = MFI_FRAME_DIR_READ;
6820  	dcmd->timeout = 0;
6821  	dcmd->pad_0 = 0;
6822  	dcmd->data_xfer_len =
6823  		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6824  	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6825  
6826  	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6827  				 sizeof(struct MR_TARGET_PROPERTIES));
6828  
6829  	if ((instance->adapter_type != MFI_SERIES) &&
6830  	    !instance->mask_interrupts)
6831  		ret = megasas_issue_blocked_cmd(instance,
6832  						cmd, MFI_IO_TIMEOUT_SECS);
6833  	else
6834  		ret = megasas_issue_polled(instance, cmd);
6835  
6836  	switch (ret) {
6837  	case DCMD_TIMEOUT:
6838  		switch (dcmd_timeout_ocr_possible(instance)) {
6839  		case INITIATE_OCR:
6840  			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6841  			mutex_unlock(&instance->reset_mutex);
6842  			megasas_reset_fusion(instance->host,
6843  					     MFI_IO_TIMEOUT_OCR);
6844  			mutex_lock(&instance->reset_mutex);
6845  			break;
6846  		case KILL_ADAPTER:
6847  			megaraid_sas_kill_hba(instance);
6848  			break;
6849  		case IGNORE_TIMEOUT:
6850  			dev_info(&instance->pdev->dev,
6851  				 "Ignore DCMD timeout: %s %d\n",
6852  				 __func__, __LINE__);
6853  			break;
6854  		}
6855  		break;
6856  
6857  	default:
6858  		megasas_return_cmd(instance, cmd);
6859  	}
6860  	if (ret != DCMD_SUCCESS)
6861  		dev_err(&instance->pdev->dev,
6862  			"return from %s %d return value %d\n",
6863  			__func__, __LINE__, ret);
6864  
6865  	return ret;
6866  }
6867  
6868  /**
6869   * megasas_start_aen -	Subscribes to AEN during driver load time
6870   * @instance:		Adapter soft state
6871   */
megasas_start_aen(struct megasas_instance * instance)6872  static int megasas_start_aen(struct megasas_instance *instance)
6873  {
6874  	struct megasas_evt_log_info eli;
6875  	union megasas_evt_class_locale class_locale;
6876  
6877  	/*
6878  	 * Get the latest sequence number from FW
6879  	 */
6880  	memset(&eli, 0, sizeof(eli));
6881  
6882  	if (megasas_get_seq_num(instance, &eli))
6883  		return -1;
6884  
6885  	/*
6886  	 * Register AEN with FW for latest sequence number plus 1
6887  	 */
6888  	class_locale.members.reserved = 0;
6889  	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6890  	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6891  
6892  	return megasas_register_aen(instance,
6893  			le32_to_cpu(eli.newest_seq_num) + 1,
6894  			class_locale.word);
6895  }
6896  
6897  /**
6898   * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6899   * @instance:		Adapter soft state
6900   */
megasas_io_attach(struct megasas_instance * instance)6901  static int megasas_io_attach(struct megasas_instance *instance)
6902  {
6903  	struct Scsi_Host *host = instance->host;
6904  
6905  	/*
6906  	 * Export parameters required by SCSI mid-layer
6907  	 */
6908  	host->unique_id = instance->unique_id;
6909  	host->can_queue = instance->max_scsi_cmds;
6910  	host->this_id = instance->init_id;
6911  	host->sg_tablesize = instance->max_num_sge;
6912  
6913  	if (instance->fw_support_ieee)
6914  		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6915  
6916  	/*
6917  	 * Check if the module parameter value for max_sectors can be used
6918  	 */
6919  	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6920  		instance->max_sectors_per_req = max_sectors;
6921  	else {
6922  		if (max_sectors) {
6923  			if (((instance->pdev->device ==
6924  				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6925  				(instance->pdev->device ==
6926  				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6927  				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6928  				instance->max_sectors_per_req = max_sectors;
6929  			} else {
6930  			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6931  				"and <= %d (or < 1MB for GEN2 controller)\n",
6932  				instance->max_sectors_per_req);
6933  			}
6934  		}
6935  	}
6936  
6937  	host->max_sectors = instance->max_sectors_per_req;
6938  	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6939  	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6940  	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6941  	host->max_lun = MEGASAS_MAX_LUN;
6942  	host->max_cmd_len = 16;
6943  
6944  	/* Use shared host tagset only for fusion adaptors
6945  	 * if there are managed interrupts (smp affinity enabled case).
6946  	 * Single msix_vectors in kdump, so shared host tag is also disabled.
6947  	 */
6948  
6949  	host->host_tagset = 0;
6950  	host->nr_hw_queues = 1;
6951  
6952  	if ((instance->adapter_type != MFI_SERIES) &&
6953  		(instance->msix_vectors > instance->low_latency_index_start) &&
6954  		host_tagset_enable &&
6955  		instance->smp_affinity_enable) {
6956  		host->host_tagset = 1;
6957  		host->nr_hw_queues = instance->msix_vectors -
6958  			instance->low_latency_index_start + instance->iopoll_q_count;
6959  		if (instance->iopoll_q_count)
6960  			host->nr_maps = 3;
6961  	} else {
6962  		instance->iopoll_q_count = 0;
6963  	}
6964  
6965  	dev_info(&instance->pdev->dev,
6966  		"Max firmware commands: %d shared with default "
6967  		"hw_queues = %d poll_queues %d\n", instance->max_fw_cmds,
6968  		host->nr_hw_queues - instance->iopoll_q_count,
6969  		instance->iopoll_q_count);
6970  	/*
6971  	 * Notify the mid-layer about the new controller
6972  	 */
6973  	if (scsi_add_host(host, &instance->pdev->dev)) {
6974  		dev_err(&instance->pdev->dev,
6975  			"Failed to add host from %s %d\n",
6976  			__func__, __LINE__);
6977  		return -ENODEV;
6978  	}
6979  
6980  	return 0;
6981  }
6982  
6983  /**
6984   * megasas_set_dma_mask -	Set DMA mask for supported controllers
6985   *
6986   * @instance:		Adapter soft state
6987   * Description:
6988   *
6989   * For Ventura, driver/FW will operate in 63bit DMA addresses.
6990   *
6991   * For invader-
6992   *	By default, driver/FW will operate in 32bit DMA addresses
6993   *	for consistent DMA mapping but if 32 bit consistent
6994   *	DMA mask fails, driver will try with 63 bit consistent
6995   *	mask provided FW is true 63bit DMA capable
6996   *
6997   * For older controllers(Thunderbolt and MFI based adapters)-
6998   *	driver/FW will operate in 32 bit consistent DMA addresses.
6999   */
7000  static int
megasas_set_dma_mask(struct megasas_instance * instance)7001  megasas_set_dma_mask(struct megasas_instance *instance)
7002  {
7003  	u64 consistent_mask;
7004  	struct pci_dev *pdev;
7005  	u32 scratch_pad_1;
7006  
7007  	pdev = instance->pdev;
7008  	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
7009  				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
7010  
7011  	if (IS_DMA64) {
7012  		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
7013  		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
7014  			goto fail_set_dma_mask;
7015  
7016  		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
7017  		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
7018  		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
7019  			/*
7020  			 * If 32 bit DMA mask fails, then try for 64 bit mask
7021  			 * for FW capable of handling 64 bit DMA.
7022  			 */
7023  			scratch_pad_1 = megasas_readl
7024  				(instance, &instance->reg_set->outbound_scratch_pad_1);
7025  
7026  			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
7027  				goto fail_set_dma_mask;
7028  			else if (dma_set_mask_and_coherent(&pdev->dev,
7029  							   DMA_BIT_MASK(63)))
7030  				goto fail_set_dma_mask;
7031  		}
7032  	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
7033  		goto fail_set_dma_mask;
7034  
7035  	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
7036  		instance->consistent_mask_64bit = false;
7037  	else
7038  		instance->consistent_mask_64bit = true;
7039  
7040  	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
7041  		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
7042  		 (instance->consistent_mask_64bit ? "63" : "32"));
7043  
7044  	return 0;
7045  
7046  fail_set_dma_mask:
7047  	dev_err(&pdev->dev, "Failed to set DMA mask\n");
7048  	return -1;
7049  
7050  }
7051  
7052  /*
7053   * megasas_set_adapter_type -	Set adapter type.
7054   *				Supported controllers can be divided in
7055   *				different categories-
7056   *					enum MR_ADAPTER_TYPE {
7057   *						MFI_SERIES = 1,
7058   *						THUNDERBOLT_SERIES = 2,
7059   *						INVADER_SERIES = 3,
7060   *						VENTURA_SERIES = 4,
7061   *						AERO_SERIES = 5,
7062   *					};
7063   * @instance:			Adapter soft state
7064   * return:			void
7065   */
megasas_set_adapter_type(struct megasas_instance * instance)7066  static inline void megasas_set_adapter_type(struct megasas_instance *instance)
7067  {
7068  	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
7069  	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
7070  		instance->adapter_type = MFI_SERIES;
7071  	} else {
7072  		switch (instance->pdev->device) {
7073  		case PCI_DEVICE_ID_LSI_AERO_10E1:
7074  		case PCI_DEVICE_ID_LSI_AERO_10E2:
7075  		case PCI_DEVICE_ID_LSI_AERO_10E5:
7076  		case PCI_DEVICE_ID_LSI_AERO_10E6:
7077  			instance->adapter_type = AERO_SERIES;
7078  			break;
7079  		case PCI_DEVICE_ID_LSI_VENTURA:
7080  		case PCI_DEVICE_ID_LSI_CRUSADER:
7081  		case PCI_DEVICE_ID_LSI_HARPOON:
7082  		case PCI_DEVICE_ID_LSI_TOMCAT:
7083  		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
7084  		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
7085  			instance->adapter_type = VENTURA_SERIES;
7086  			break;
7087  		case PCI_DEVICE_ID_LSI_FUSION:
7088  		case PCI_DEVICE_ID_LSI_PLASMA:
7089  			instance->adapter_type = THUNDERBOLT_SERIES;
7090  			break;
7091  		case PCI_DEVICE_ID_LSI_INVADER:
7092  		case PCI_DEVICE_ID_LSI_INTRUDER:
7093  		case PCI_DEVICE_ID_LSI_INTRUDER_24:
7094  		case PCI_DEVICE_ID_LSI_CUTLASS_52:
7095  		case PCI_DEVICE_ID_LSI_CUTLASS_53:
7096  		case PCI_DEVICE_ID_LSI_FURY:
7097  			instance->adapter_type = INVADER_SERIES;
7098  			break;
7099  		default: /* For all other supported controllers */
7100  			instance->adapter_type = MFI_SERIES;
7101  			break;
7102  		}
7103  	}
7104  }
7105  
megasas_alloc_mfi_ctrl_mem(struct megasas_instance * instance)7106  static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
7107  {
7108  	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
7109  			sizeof(u32), &instance->producer_h, GFP_KERNEL);
7110  	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
7111  			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
7112  
7113  	if (!instance->producer || !instance->consumer) {
7114  		dev_err(&instance->pdev->dev,
7115  			"Failed to allocate memory for producer, consumer\n");
7116  		return -1;
7117  	}
7118  
7119  	*instance->producer = 0;
7120  	*instance->consumer = 0;
7121  	return 0;
7122  }
7123  
7124  /**
7125   * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
7126   *				structures which are not common across MFI
7127   *				adapters and fusion adapters.
7128   *				For MFI based adapters, allocate producer and
7129   *				consumer buffers. For fusion adapters, allocate
7130   *				memory for fusion context.
7131   * @instance:			Adapter soft state
7132   * return:			0 for SUCCESS
7133   */
megasas_alloc_ctrl_mem(struct megasas_instance * instance)7134  static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
7135  {
7136  	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
7137  				      GFP_KERNEL);
7138  	if (!instance->reply_map)
7139  		return -ENOMEM;
7140  
7141  	switch (instance->adapter_type) {
7142  	case MFI_SERIES:
7143  		if (megasas_alloc_mfi_ctrl_mem(instance))
7144  			return -ENOMEM;
7145  		break;
7146  	case AERO_SERIES:
7147  	case VENTURA_SERIES:
7148  	case THUNDERBOLT_SERIES:
7149  	case INVADER_SERIES:
7150  		if (megasas_alloc_fusion_context(instance))
7151  			return -ENOMEM;
7152  		break;
7153  	}
7154  
7155  	return 0;
7156  }
7157  
7158  /*
7159   * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
7160   *				producer, consumer buffers for MFI adapters
7161   *
7162   * @instance -			Adapter soft instance
7163   *
7164   */
megasas_free_ctrl_mem(struct megasas_instance * instance)7165  static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
7166  {
7167  	kfree(instance->reply_map);
7168  	if (instance->adapter_type == MFI_SERIES) {
7169  		if (instance->producer)
7170  			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7171  					    instance->producer,
7172  					    instance->producer_h);
7173  		if (instance->consumer)
7174  			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7175  					    instance->consumer,
7176  					    instance->consumer_h);
7177  	} else {
7178  		megasas_free_fusion_context(instance);
7179  	}
7180  }
7181  
7182  /**
7183   * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
7184   *					driver load time
7185   *
7186   * @instance:				Adapter soft instance
7187   *
7188   * @return:				O for SUCCESS
7189   */
7190  static inline
megasas_alloc_ctrl_dma_buffers(struct megasas_instance * instance)7191  int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7192  {
7193  	struct pci_dev *pdev = instance->pdev;
7194  	struct fusion_context *fusion = instance->ctrl_context;
7195  
7196  	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7197  			sizeof(struct megasas_evt_detail),
7198  			&instance->evt_detail_h, GFP_KERNEL);
7199  
7200  	if (!instance->evt_detail) {
7201  		dev_err(&instance->pdev->dev,
7202  			"Failed to allocate event detail buffer\n");
7203  		return -ENOMEM;
7204  	}
7205  
7206  	if (fusion) {
7207  		fusion->ioc_init_request =
7208  			dma_alloc_coherent(&pdev->dev,
7209  					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7210  					   &fusion->ioc_init_request_phys,
7211  					   GFP_KERNEL);
7212  
7213  		if (!fusion->ioc_init_request) {
7214  			dev_err(&pdev->dev,
7215  				"Failed to allocate ioc init request\n");
7216  			return -ENOMEM;
7217  		}
7218  
7219  		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7220  				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7221  				&instance->snapdump_prop_h, GFP_KERNEL);
7222  
7223  		if (!instance->snapdump_prop)
7224  			dev_err(&pdev->dev,
7225  				"Failed to allocate snapdump properties buffer\n");
7226  
7227  		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7228  							HOST_DEVICE_LIST_SZ,
7229  							&instance->host_device_list_buf_h,
7230  							GFP_KERNEL);
7231  
7232  		if (!instance->host_device_list_buf) {
7233  			dev_err(&pdev->dev,
7234  				"Failed to allocate targetid list buffer\n");
7235  			return -ENOMEM;
7236  		}
7237  
7238  	}
7239  
7240  	instance->pd_list_buf =
7241  		dma_alloc_coherent(&pdev->dev,
7242  				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7243  				     &instance->pd_list_buf_h, GFP_KERNEL);
7244  
7245  	if (!instance->pd_list_buf) {
7246  		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7247  		return -ENOMEM;
7248  	}
7249  
7250  	instance->ctrl_info_buf =
7251  		dma_alloc_coherent(&pdev->dev,
7252  				     sizeof(struct megasas_ctrl_info),
7253  				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7254  
7255  	if (!instance->ctrl_info_buf) {
7256  		dev_err(&pdev->dev,
7257  			"Failed to allocate controller info buffer\n");
7258  		return -ENOMEM;
7259  	}
7260  
7261  	instance->ld_list_buf =
7262  		dma_alloc_coherent(&pdev->dev,
7263  				     sizeof(struct MR_LD_LIST),
7264  				     &instance->ld_list_buf_h, GFP_KERNEL);
7265  
7266  	if (!instance->ld_list_buf) {
7267  		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7268  		return -ENOMEM;
7269  	}
7270  
7271  	instance->ld_targetid_list_buf =
7272  		dma_alloc_coherent(&pdev->dev,
7273  				sizeof(struct MR_LD_TARGETID_LIST),
7274  				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7275  
7276  	if (!instance->ld_targetid_list_buf) {
7277  		dev_err(&pdev->dev,
7278  			"Failed to allocate LD targetid list buffer\n");
7279  		return -ENOMEM;
7280  	}
7281  
7282  	if (!reset_devices) {
7283  		instance->system_info_buf =
7284  			dma_alloc_coherent(&pdev->dev,
7285  					sizeof(struct MR_DRV_SYSTEM_INFO),
7286  					&instance->system_info_h, GFP_KERNEL);
7287  		instance->pd_info =
7288  			dma_alloc_coherent(&pdev->dev,
7289  					sizeof(struct MR_PD_INFO),
7290  					&instance->pd_info_h, GFP_KERNEL);
7291  		instance->tgt_prop =
7292  			dma_alloc_coherent(&pdev->dev,
7293  					sizeof(struct MR_TARGET_PROPERTIES),
7294  					&instance->tgt_prop_h, GFP_KERNEL);
7295  		instance->crash_dump_buf =
7296  			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7297  					&instance->crash_dump_h, GFP_KERNEL);
7298  
7299  		if (!instance->system_info_buf)
7300  			dev_err(&instance->pdev->dev,
7301  				"Failed to allocate system info buffer\n");
7302  
7303  		if (!instance->pd_info)
7304  			dev_err(&instance->pdev->dev,
7305  				"Failed to allocate pd_info buffer\n");
7306  
7307  		if (!instance->tgt_prop)
7308  			dev_err(&instance->pdev->dev,
7309  				"Failed to allocate tgt_prop buffer\n");
7310  
7311  		if (!instance->crash_dump_buf)
7312  			dev_err(&instance->pdev->dev,
7313  				"Failed to allocate crash dump buffer\n");
7314  	}
7315  
7316  	return 0;
7317  }
7318  
7319  /*
7320   * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7321   *					during driver load time
7322   *
7323   * @instance-				Adapter soft instance
7324   *
7325   */
7326  static inline
megasas_free_ctrl_dma_buffers(struct megasas_instance * instance)7327  void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7328  {
7329  	struct pci_dev *pdev = instance->pdev;
7330  	struct fusion_context *fusion = instance->ctrl_context;
7331  
7332  	if (instance->evt_detail)
7333  		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7334  				    instance->evt_detail,
7335  				    instance->evt_detail_h);
7336  
7337  	if (fusion && fusion->ioc_init_request)
7338  		dma_free_coherent(&pdev->dev,
7339  				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7340  				  fusion->ioc_init_request,
7341  				  fusion->ioc_init_request_phys);
7342  
7343  	if (instance->pd_list_buf)
7344  		dma_free_coherent(&pdev->dev,
7345  				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7346  				    instance->pd_list_buf,
7347  				    instance->pd_list_buf_h);
7348  
7349  	if (instance->ld_list_buf)
7350  		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7351  				    instance->ld_list_buf,
7352  				    instance->ld_list_buf_h);
7353  
7354  	if (instance->ld_targetid_list_buf)
7355  		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7356  				    instance->ld_targetid_list_buf,
7357  				    instance->ld_targetid_list_buf_h);
7358  
7359  	if (instance->ctrl_info_buf)
7360  		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7361  				    instance->ctrl_info_buf,
7362  				    instance->ctrl_info_buf_h);
7363  
7364  	if (instance->system_info_buf)
7365  		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7366  				    instance->system_info_buf,
7367  				    instance->system_info_h);
7368  
7369  	if (instance->pd_info)
7370  		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7371  				    instance->pd_info, instance->pd_info_h);
7372  
7373  	if (instance->tgt_prop)
7374  		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7375  				    instance->tgt_prop, instance->tgt_prop_h);
7376  
7377  	if (instance->crash_dump_buf)
7378  		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7379  				    instance->crash_dump_buf,
7380  				    instance->crash_dump_h);
7381  
7382  	if (instance->snapdump_prop)
7383  		dma_free_coherent(&pdev->dev,
7384  				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7385  				  instance->snapdump_prop,
7386  				  instance->snapdump_prop_h);
7387  
7388  	if (instance->host_device_list_buf)
7389  		dma_free_coherent(&pdev->dev,
7390  				  HOST_DEVICE_LIST_SZ,
7391  				  instance->host_device_list_buf,
7392  				  instance->host_device_list_buf_h);
7393  
7394  }
7395  
7396  /*
7397   * megasas_init_ctrl_params -		Initialize controller's instance
7398   *					parameters before FW init
7399   * @instance -				Adapter soft instance
7400   * @return -				void
7401   */
megasas_init_ctrl_params(struct megasas_instance * instance)7402  static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7403  {
7404  	instance->fw_crash_state = UNAVAILABLE;
7405  
7406  	megasas_poll_wait_aen = 0;
7407  	instance->issuepend_done = 1;
7408  	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7409  
7410  	/*
7411  	 * Initialize locks and queues
7412  	 */
7413  	INIT_LIST_HEAD(&instance->cmd_pool);
7414  	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7415  
7416  	atomic_set(&instance->fw_outstanding, 0);
7417  	atomic64_set(&instance->total_io_count, 0);
7418  
7419  	init_waitqueue_head(&instance->int_cmd_wait_q);
7420  	init_waitqueue_head(&instance->abort_cmd_wait_q);
7421  
7422  	mutex_init(&instance->crashdump_lock);
7423  	spin_lock_init(&instance->mfi_pool_lock);
7424  	spin_lock_init(&instance->hba_lock);
7425  	spin_lock_init(&instance->stream_lock);
7426  	spin_lock_init(&instance->completion_lock);
7427  
7428  	mutex_init(&instance->reset_mutex);
7429  
7430  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7431  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7432  		instance->flag_ieee = 1;
7433  
7434  	instance->flag = 0;
7435  	instance->unload = 1;
7436  	instance->last_time = 0;
7437  	instance->disableOnlineCtrlReset = 1;
7438  	instance->UnevenSpanSupport = 0;
7439  	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7440  	instance->msix_load_balance = false;
7441  
7442  	if (instance->adapter_type != MFI_SERIES)
7443  		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7444  	else
7445  		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7446  }
7447  
7448  /**
7449   * megasas_probe_one -	PCI hotplug entry point
7450   * @pdev:		PCI device structure
7451   * @id:			PCI ids of supported hotplugged adapter
7452   */
megasas_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)7453  static int megasas_probe_one(struct pci_dev *pdev,
7454  			     const struct pci_device_id *id)
7455  {
7456  	int rval, pos;
7457  	struct Scsi_Host *host;
7458  	struct megasas_instance *instance;
7459  	u16 control = 0;
7460  
7461  	switch (pdev->device) {
7462  	case PCI_DEVICE_ID_LSI_AERO_10E0:
7463  	case PCI_DEVICE_ID_LSI_AERO_10E3:
7464  	case PCI_DEVICE_ID_LSI_AERO_10E4:
7465  	case PCI_DEVICE_ID_LSI_AERO_10E7:
7466  		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7467  		return 1;
7468  	case PCI_DEVICE_ID_LSI_AERO_10E1:
7469  	case PCI_DEVICE_ID_LSI_AERO_10E5:
7470  		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7471  		break;
7472  	}
7473  
7474  	/* Reset MSI-X in the kdump kernel */
7475  	if (reset_devices) {
7476  		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7477  		if (pos) {
7478  			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7479  					     &control);
7480  			if (control & PCI_MSIX_FLAGS_ENABLE) {
7481  				dev_info(&pdev->dev, "resetting MSI-X\n");
7482  				pci_write_config_word(pdev,
7483  						      pos + PCI_MSIX_FLAGS,
7484  						      control &
7485  						      ~PCI_MSIX_FLAGS_ENABLE);
7486  			}
7487  		}
7488  	}
7489  
7490  	/*
7491  	 * PCI prepping: enable device set bus mastering and dma mask
7492  	 */
7493  	rval = pci_enable_device_mem(pdev);
7494  
7495  	if (rval) {
7496  		return rval;
7497  	}
7498  
7499  	pci_set_master(pdev);
7500  
7501  	host = scsi_host_alloc(&megasas_template,
7502  			       sizeof(struct megasas_instance));
7503  
7504  	if (!host) {
7505  		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7506  		goto fail_alloc_instance;
7507  	}
7508  
7509  	instance = (struct megasas_instance *)host->hostdata;
7510  	memset(instance, 0, sizeof(*instance));
7511  	atomic_set(&instance->fw_reset_no_pci_access, 0);
7512  
7513  	/*
7514  	 * Initialize PCI related and misc parameters
7515  	 */
7516  	instance->pdev = pdev;
7517  	instance->host = host;
7518  	instance->unique_id = pci_dev_id(pdev);
7519  	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7520  
7521  	megasas_set_adapter_type(instance);
7522  
7523  	/*
7524  	 * Initialize MFI Firmware
7525  	 */
7526  	if (megasas_init_fw(instance))
7527  		goto fail_init_mfi;
7528  
7529  	if (instance->requestorId) {
7530  		if (instance->PlasmaFW111) {
7531  			instance->vf_affiliation_111 =
7532  				dma_alloc_coherent(&pdev->dev,
7533  					sizeof(struct MR_LD_VF_AFFILIATION_111),
7534  					&instance->vf_affiliation_111_h,
7535  					GFP_KERNEL);
7536  			if (!instance->vf_affiliation_111)
7537  				dev_warn(&pdev->dev, "Can't allocate "
7538  				       "memory for VF affiliation buffer\n");
7539  		} else {
7540  			instance->vf_affiliation =
7541  				dma_alloc_coherent(&pdev->dev,
7542  					(MAX_LOGICAL_DRIVES + 1) *
7543  					sizeof(struct MR_LD_VF_AFFILIATION),
7544  					&instance->vf_affiliation_h,
7545  					GFP_KERNEL);
7546  			if (!instance->vf_affiliation)
7547  				dev_warn(&pdev->dev, "Can't allocate "
7548  				       "memory for VF affiliation buffer\n");
7549  		}
7550  	}
7551  
7552  	/*
7553  	 * Store instance in PCI softstate
7554  	 */
7555  	pci_set_drvdata(pdev, instance);
7556  
7557  	/*
7558  	 * Add this controller to megasas_mgmt_info structure so that it
7559  	 * can be exported to management applications
7560  	 */
7561  	megasas_mgmt_info.count++;
7562  	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7563  	megasas_mgmt_info.max_index++;
7564  
7565  	/*
7566  	 * Register with SCSI mid-layer
7567  	 */
7568  	if (megasas_io_attach(instance))
7569  		goto fail_io_attach;
7570  
7571  	instance->unload = 0;
7572  	/*
7573  	 * Trigger SCSI to scan our drives
7574  	 */
7575  	if (!instance->enable_fw_dev_list ||
7576  	    (instance->host_device_list_buf->count > 0))
7577  		scsi_scan_host(host);
7578  
7579  	/*
7580  	 * Initiate AEN (Asynchronous Event Notification)
7581  	 */
7582  	if (megasas_start_aen(instance)) {
7583  		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7584  		goto fail_start_aen;
7585  	}
7586  
7587  	megasas_setup_debugfs(instance);
7588  
7589  	/* Get current SR-IOV LD/VF affiliation */
7590  	if (instance->requestorId)
7591  		megasas_get_ld_vf_affiliation(instance, 1);
7592  
7593  	return 0;
7594  
7595  fail_start_aen:
7596  	instance->unload = 1;
7597  	scsi_remove_host(instance->host);
7598  fail_io_attach:
7599  	megasas_mgmt_info.count--;
7600  	megasas_mgmt_info.max_index--;
7601  	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7602  
7603  	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7604  		del_timer_sync(&instance->sriov_heartbeat_timer);
7605  
7606  	instance->instancet->disable_intr(instance);
7607  	megasas_destroy_irqs(instance);
7608  
7609  	if (instance->adapter_type != MFI_SERIES)
7610  		megasas_release_fusion(instance);
7611  	else
7612  		megasas_release_mfi(instance);
7613  
7614  	if (instance->msix_vectors)
7615  		pci_free_irq_vectors(instance->pdev);
7616  	instance->msix_vectors = 0;
7617  
7618  	if (instance->fw_crash_state != UNAVAILABLE)
7619  		megasas_free_host_crash_buffer(instance);
7620  
7621  	if (instance->adapter_type != MFI_SERIES)
7622  		megasas_fusion_stop_watchdog(instance);
7623  fail_init_mfi:
7624  	scsi_host_put(host);
7625  fail_alloc_instance:
7626  	pci_disable_device(pdev);
7627  
7628  	return -ENODEV;
7629  }
7630  
7631  /**
7632   * megasas_flush_cache -	Requests FW to flush all its caches
7633   * @instance:			Adapter soft state
7634   */
megasas_flush_cache(struct megasas_instance * instance)7635  static void megasas_flush_cache(struct megasas_instance *instance)
7636  {
7637  	struct megasas_cmd *cmd;
7638  	struct megasas_dcmd_frame *dcmd;
7639  
7640  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7641  		return;
7642  
7643  	cmd = megasas_get_cmd(instance);
7644  
7645  	if (!cmd)
7646  		return;
7647  
7648  	dcmd = &cmd->frame->dcmd;
7649  
7650  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7651  
7652  	dcmd->cmd = MFI_CMD_DCMD;
7653  	dcmd->cmd_status = 0x0;
7654  	dcmd->sge_count = 0;
7655  	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7656  	dcmd->timeout = 0;
7657  	dcmd->pad_0 = 0;
7658  	dcmd->data_xfer_len = 0;
7659  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7660  	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7661  
7662  	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7663  			!= DCMD_SUCCESS) {
7664  		dev_err(&instance->pdev->dev,
7665  			"return from %s %d\n", __func__, __LINE__);
7666  		return;
7667  	}
7668  
7669  	megasas_return_cmd(instance, cmd);
7670  }
7671  
7672  /**
7673   * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7674   * @instance:				Adapter soft state
7675   * @opcode:				Shutdown/Hibernate
7676   */
megasas_shutdown_controller(struct megasas_instance * instance,u32 opcode)7677  static void megasas_shutdown_controller(struct megasas_instance *instance,
7678  					u32 opcode)
7679  {
7680  	struct megasas_cmd *cmd;
7681  	struct megasas_dcmd_frame *dcmd;
7682  
7683  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7684  		return;
7685  
7686  	cmd = megasas_get_cmd(instance);
7687  
7688  	if (!cmd)
7689  		return;
7690  
7691  	if (instance->aen_cmd)
7692  		megasas_issue_blocked_abort_cmd(instance,
7693  			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7694  	if (instance->map_update_cmd)
7695  		megasas_issue_blocked_abort_cmd(instance,
7696  			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7697  	if (instance->jbod_seq_cmd)
7698  		megasas_issue_blocked_abort_cmd(instance,
7699  			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7700  
7701  	dcmd = &cmd->frame->dcmd;
7702  
7703  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7704  
7705  	dcmd->cmd = MFI_CMD_DCMD;
7706  	dcmd->cmd_status = 0x0;
7707  	dcmd->sge_count = 0;
7708  	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7709  	dcmd->timeout = 0;
7710  	dcmd->pad_0 = 0;
7711  	dcmd->data_xfer_len = 0;
7712  	dcmd->opcode = cpu_to_le32(opcode);
7713  
7714  	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7715  			!= DCMD_SUCCESS) {
7716  		dev_err(&instance->pdev->dev,
7717  			"return from %s %d\n", __func__, __LINE__);
7718  		return;
7719  	}
7720  
7721  	megasas_return_cmd(instance, cmd);
7722  }
7723  
7724  /**
7725   * megasas_suspend -	driver suspend entry point
7726   * @dev:		Device structure
7727   */
7728  static int __maybe_unused
megasas_suspend(struct device * dev)7729  megasas_suspend(struct device *dev)
7730  {
7731  	struct megasas_instance *instance;
7732  
7733  	instance = dev_get_drvdata(dev);
7734  
7735  	if (!instance)
7736  		return 0;
7737  
7738  	instance->unload = 1;
7739  
7740  	dev_info(dev, "%s is called\n", __func__);
7741  
7742  	/* Shutdown SR-IOV heartbeat timer */
7743  	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7744  		del_timer_sync(&instance->sriov_heartbeat_timer);
7745  
7746  	/* Stop the FW fault detection watchdog */
7747  	if (instance->adapter_type != MFI_SERIES)
7748  		megasas_fusion_stop_watchdog(instance);
7749  
7750  	megasas_flush_cache(instance);
7751  	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7752  
7753  	/* cancel the delayed work if this work still in queue */
7754  	if (instance->ev != NULL) {
7755  		struct megasas_aen_event *ev = instance->ev;
7756  		cancel_delayed_work_sync(&ev->hotplug_work);
7757  		instance->ev = NULL;
7758  	}
7759  
7760  	tasklet_kill(&instance->isr_tasklet);
7761  
7762  	pci_set_drvdata(instance->pdev, instance);
7763  	instance->instancet->disable_intr(instance);
7764  
7765  	megasas_destroy_irqs(instance);
7766  
7767  	if (instance->msix_vectors)
7768  		pci_free_irq_vectors(instance->pdev);
7769  
7770  	return 0;
7771  }
7772  
7773  /**
7774   * megasas_resume-      driver resume entry point
7775   * @dev:		Device structure
7776   */
7777  static int __maybe_unused
megasas_resume(struct device * dev)7778  megasas_resume(struct device *dev)
7779  {
7780  	int rval;
7781  	struct Scsi_Host *host;
7782  	struct megasas_instance *instance;
7783  	u32 status_reg;
7784  
7785  	instance = dev_get_drvdata(dev);
7786  
7787  	if (!instance)
7788  		return 0;
7789  
7790  	host = instance->host;
7791  
7792  	dev_info(dev, "%s is called\n", __func__);
7793  
7794  	/*
7795  	 * We expect the FW state to be READY
7796  	 */
7797  
7798  	if (megasas_transition_to_ready(instance, 0)) {
7799  		dev_info(&instance->pdev->dev,
7800  			 "Failed to transition controller to ready from %s!\n",
7801  			 __func__);
7802  		if (instance->adapter_type != MFI_SERIES) {
7803  			status_reg =
7804  				instance->instancet->read_fw_status_reg(instance);
7805  			if (!(status_reg & MFI_RESET_ADAPTER) ||
7806  				((megasas_adp_reset_wait_for_ready
7807  				(instance, true, 0)) == FAILED))
7808  				goto fail_ready_state;
7809  		} else {
7810  			atomic_set(&instance->fw_reset_no_pci_access, 1);
7811  			instance->instancet->adp_reset
7812  				(instance, instance->reg_set);
7813  			atomic_set(&instance->fw_reset_no_pci_access, 0);
7814  
7815  			/* waiting for about 30 seconds before retry */
7816  			ssleep(30);
7817  
7818  			if (megasas_transition_to_ready(instance, 0))
7819  				goto fail_ready_state;
7820  		}
7821  
7822  		dev_info(&instance->pdev->dev,
7823  			 "FW restarted successfully from %s!\n",
7824  			 __func__);
7825  	}
7826  	if (megasas_set_dma_mask(instance))
7827  		goto fail_set_dma_mask;
7828  
7829  	/*
7830  	 * Initialize MFI Firmware
7831  	 */
7832  
7833  	atomic_set(&instance->fw_outstanding, 0);
7834  	atomic_set(&instance->ldio_outstanding, 0);
7835  
7836  	/* Now re-enable MSI-X */
7837  	if (instance->msix_vectors)
7838  		megasas_alloc_irq_vectors(instance);
7839  
7840  	if (!instance->msix_vectors) {
7841  		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7842  					     PCI_IRQ_LEGACY);
7843  		if (rval < 0)
7844  			goto fail_reenable_msix;
7845  	}
7846  
7847  	megasas_setup_reply_map(instance);
7848  
7849  	if (instance->adapter_type != MFI_SERIES) {
7850  		megasas_reset_reply_desc(instance);
7851  		if (megasas_ioc_init_fusion(instance)) {
7852  			megasas_free_cmds(instance);
7853  			megasas_free_cmds_fusion(instance);
7854  			goto fail_init_mfi;
7855  		}
7856  		if (!megasas_get_map_info(instance))
7857  			megasas_sync_map_info(instance);
7858  	} else {
7859  		*instance->producer = 0;
7860  		*instance->consumer = 0;
7861  		if (megasas_issue_init_mfi(instance))
7862  			goto fail_init_mfi;
7863  	}
7864  
7865  	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7866  		goto fail_init_mfi;
7867  
7868  	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7869  		     (unsigned long)instance);
7870  
7871  	if (instance->msix_vectors ?
7872  			megasas_setup_irqs_msix(instance, 0) :
7873  			megasas_setup_irqs_ioapic(instance))
7874  		goto fail_init_mfi;
7875  
7876  	if (instance->adapter_type != MFI_SERIES)
7877  		megasas_setup_irq_poll(instance);
7878  
7879  	/* Re-launch SR-IOV heartbeat timer */
7880  	if (instance->requestorId) {
7881  		if (!megasas_sriov_start_heartbeat(instance, 0))
7882  			megasas_start_timer(instance);
7883  		else {
7884  			instance->skip_heartbeat_timer_del = 1;
7885  			goto fail_init_mfi;
7886  		}
7887  	}
7888  
7889  	instance->instancet->enable_intr(instance);
7890  	megasas_setup_jbod_map(instance);
7891  	instance->unload = 0;
7892  
7893  	/*
7894  	 * Initiate AEN (Asynchronous Event Notification)
7895  	 */
7896  	if (megasas_start_aen(instance))
7897  		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7898  
7899  	/* Re-launch FW fault watchdog */
7900  	if (instance->adapter_type != MFI_SERIES)
7901  		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7902  			goto fail_start_watchdog;
7903  
7904  	return 0;
7905  
7906  fail_start_watchdog:
7907  	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7908  		del_timer_sync(&instance->sriov_heartbeat_timer);
7909  fail_init_mfi:
7910  	megasas_free_ctrl_dma_buffers(instance);
7911  	megasas_free_ctrl_mem(instance);
7912  	scsi_host_put(host);
7913  
7914  fail_reenable_msix:
7915  fail_set_dma_mask:
7916  fail_ready_state:
7917  
7918  	return -ENODEV;
7919  }
7920  
7921  static inline int
megasas_wait_for_adapter_operational(struct megasas_instance * instance)7922  megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7923  {
7924  	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7925  	int i;
7926  	u8 adp_state;
7927  
7928  	for (i = 0; i < wait_time; i++) {
7929  		adp_state = atomic_read(&instance->adprecovery);
7930  		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7931  		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7932  			break;
7933  
7934  		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7935  			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7936  
7937  		msleep(1000);
7938  	}
7939  
7940  	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7941  		dev_info(&instance->pdev->dev,
7942  			 "%s HBA failed to become operational, adp_state %d\n",
7943  			 __func__, adp_state);
7944  		return 1;
7945  	}
7946  
7947  	return 0;
7948  }
7949  
7950  /**
7951   * megasas_detach_one -	PCI hot"un"plug entry point
7952   * @pdev:		PCI device structure
7953   */
megasas_detach_one(struct pci_dev * pdev)7954  static void megasas_detach_one(struct pci_dev *pdev)
7955  {
7956  	int i;
7957  	struct Scsi_Host *host;
7958  	struct megasas_instance *instance;
7959  	struct fusion_context *fusion;
7960  	size_t pd_seq_map_sz;
7961  
7962  	instance = pci_get_drvdata(pdev);
7963  
7964  	if (!instance)
7965  		return;
7966  
7967  	host = instance->host;
7968  	fusion = instance->ctrl_context;
7969  
7970  	/* Shutdown SR-IOV heartbeat timer */
7971  	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7972  		del_timer_sync(&instance->sriov_heartbeat_timer);
7973  
7974  	/* Stop the FW fault detection watchdog */
7975  	if (instance->adapter_type != MFI_SERIES)
7976  		megasas_fusion_stop_watchdog(instance);
7977  
7978  	if (instance->fw_crash_state != UNAVAILABLE)
7979  		megasas_free_host_crash_buffer(instance);
7980  	scsi_remove_host(instance->host);
7981  	instance->unload = 1;
7982  
7983  	if (megasas_wait_for_adapter_operational(instance))
7984  		goto skip_firing_dcmds;
7985  
7986  	megasas_flush_cache(instance);
7987  	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7988  
7989  skip_firing_dcmds:
7990  	/* cancel the delayed work if this work still in queue*/
7991  	if (instance->ev != NULL) {
7992  		struct megasas_aen_event *ev = instance->ev;
7993  		cancel_delayed_work_sync(&ev->hotplug_work);
7994  		instance->ev = NULL;
7995  	}
7996  
7997  	/* cancel all wait events */
7998  	wake_up_all(&instance->int_cmd_wait_q);
7999  
8000  	tasklet_kill(&instance->isr_tasklet);
8001  
8002  	/*
8003  	 * Take the instance off the instance array. Note that we will not
8004  	 * decrement the max_index. We let this array be sparse array
8005  	 */
8006  	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8007  		if (megasas_mgmt_info.instance[i] == instance) {
8008  			megasas_mgmt_info.count--;
8009  			megasas_mgmt_info.instance[i] = NULL;
8010  
8011  			break;
8012  		}
8013  	}
8014  
8015  	instance->instancet->disable_intr(instance);
8016  
8017  	megasas_destroy_irqs(instance);
8018  
8019  	if (instance->msix_vectors)
8020  		pci_free_irq_vectors(instance->pdev);
8021  
8022  	if (instance->adapter_type >= VENTURA_SERIES) {
8023  		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
8024  			kfree(fusion->stream_detect_by_ld[i]);
8025  		kfree(fusion->stream_detect_by_ld);
8026  		fusion->stream_detect_by_ld = NULL;
8027  	}
8028  
8029  
8030  	if (instance->adapter_type != MFI_SERIES) {
8031  		megasas_release_fusion(instance);
8032  		pd_seq_map_sz =
8033  			struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC,
8034  				      seq, MAX_PHYSICAL_DEVICES);
8035  		for (i = 0; i < 2 ; i++) {
8036  			if (fusion->ld_map[i])
8037  				dma_free_coherent(&instance->pdev->dev,
8038  						  fusion->max_map_sz,
8039  						  fusion->ld_map[i],
8040  						  fusion->ld_map_phys[i]);
8041  			if (fusion->ld_drv_map[i]) {
8042  				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
8043  					vfree(fusion->ld_drv_map[i]);
8044  				else
8045  					free_pages((ulong)fusion->ld_drv_map[i],
8046  						   fusion->drv_map_pages);
8047  			}
8048  
8049  			if (fusion->pd_seq_sync[i])
8050  				dma_free_coherent(&instance->pdev->dev,
8051  					pd_seq_map_sz,
8052  					fusion->pd_seq_sync[i],
8053  					fusion->pd_seq_phys[i]);
8054  		}
8055  	} else {
8056  		megasas_release_mfi(instance);
8057  	}
8058  
8059  	if (instance->vf_affiliation)
8060  		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
8061  				    sizeof(struct MR_LD_VF_AFFILIATION),
8062  				    instance->vf_affiliation,
8063  				    instance->vf_affiliation_h);
8064  
8065  	if (instance->vf_affiliation_111)
8066  		dma_free_coherent(&pdev->dev,
8067  				    sizeof(struct MR_LD_VF_AFFILIATION_111),
8068  				    instance->vf_affiliation_111,
8069  				    instance->vf_affiliation_111_h);
8070  
8071  	if (instance->hb_host_mem)
8072  		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
8073  				    instance->hb_host_mem,
8074  				    instance->hb_host_mem_h);
8075  
8076  	megasas_free_ctrl_dma_buffers(instance);
8077  
8078  	megasas_free_ctrl_mem(instance);
8079  
8080  	megasas_destroy_debugfs(instance);
8081  
8082  	scsi_host_put(host);
8083  
8084  	pci_disable_device(pdev);
8085  }
8086  
8087  /**
8088   * megasas_shutdown -	Shutdown entry point
8089   * @pdev:		PCI device structure
8090   */
megasas_shutdown(struct pci_dev * pdev)8091  static void megasas_shutdown(struct pci_dev *pdev)
8092  {
8093  	struct megasas_instance *instance = pci_get_drvdata(pdev);
8094  
8095  	if (!instance)
8096  		return;
8097  
8098  	instance->unload = 1;
8099  
8100  	if (megasas_wait_for_adapter_operational(instance))
8101  		goto skip_firing_dcmds;
8102  
8103  	megasas_flush_cache(instance);
8104  	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
8105  
8106  skip_firing_dcmds:
8107  	instance->instancet->disable_intr(instance);
8108  	megasas_destroy_irqs(instance);
8109  
8110  	if (instance->msix_vectors)
8111  		pci_free_irq_vectors(instance->pdev);
8112  }
8113  
8114  /*
8115   * megasas_mgmt_open -	char node "open" entry point
8116   * @inode:	char node inode
8117   * @filep:	char node file
8118   */
megasas_mgmt_open(struct inode * inode,struct file * filep)8119  static int megasas_mgmt_open(struct inode *inode, struct file *filep)
8120  {
8121  	/*
8122  	 * Allow only those users with admin rights
8123  	 */
8124  	if (!capable(CAP_SYS_ADMIN))
8125  		return -EACCES;
8126  
8127  	return 0;
8128  }
8129  
8130  /*
8131   * megasas_mgmt_fasync -	Async notifier registration from applications
8132   * @fd:		char node file descriptor number
8133   * @filep:	char node file
8134   * @mode:	notifier on/off
8135   *
8136   * This function adds the calling process to a driver global queue. When an
8137   * event occurs, SIGIO will be sent to all processes in this queue.
8138   */
megasas_mgmt_fasync(int fd,struct file * filep,int mode)8139  static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
8140  {
8141  	int rc;
8142  
8143  	mutex_lock(&megasas_async_queue_mutex);
8144  
8145  	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
8146  
8147  	mutex_unlock(&megasas_async_queue_mutex);
8148  
8149  	if (rc >= 0) {
8150  		/* For sanity check when we get ioctl */
8151  		filep->private_data = filep;
8152  		return 0;
8153  	}
8154  
8155  	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
8156  
8157  	return rc;
8158  }
8159  
8160  /*
8161   * megasas_mgmt_poll -  char node "poll" entry point
8162   * @filep:	char node file
8163   * @wait:	Events to poll for
8164   */
megasas_mgmt_poll(struct file * file,poll_table * wait)8165  static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8166  {
8167  	__poll_t mask;
8168  	unsigned long flags;
8169  
8170  	poll_wait(file, &megasas_poll_wait, wait);
8171  	spin_lock_irqsave(&poll_aen_lock, flags);
8172  	if (megasas_poll_wait_aen)
8173  		mask = (EPOLLIN | EPOLLRDNORM);
8174  	else
8175  		mask = 0;
8176  	megasas_poll_wait_aen = 0;
8177  	spin_unlock_irqrestore(&poll_aen_lock, flags);
8178  	return mask;
8179  }
8180  
8181  /*
8182   * megasas_set_crash_dump_params_ioctl:
8183   *		Send CRASH_DUMP_MODE DCMD to all controllers
8184   * @cmd:	MFI command frame
8185   */
8186  
megasas_set_crash_dump_params_ioctl(struct megasas_cmd * cmd)8187  static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8188  {
8189  	struct megasas_instance *local_instance;
8190  	int i, error = 0;
8191  	int crash_support;
8192  
8193  	crash_support = cmd->frame->dcmd.mbox.w[0];
8194  
8195  	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8196  		local_instance = megasas_mgmt_info.instance[i];
8197  		if (local_instance && local_instance->crash_dump_drv_support) {
8198  			if ((atomic_read(&local_instance->adprecovery) ==
8199  				MEGASAS_HBA_OPERATIONAL) &&
8200  				!megasas_set_crash_dump_params(local_instance,
8201  					crash_support)) {
8202  				local_instance->crash_dump_app_support =
8203  					crash_support;
8204  				dev_info(&local_instance->pdev->dev,
8205  					"Application firmware crash "
8206  					"dump mode set success\n");
8207  				error = 0;
8208  			} else {
8209  				dev_info(&local_instance->pdev->dev,
8210  					"Application firmware crash "
8211  					"dump mode set failed\n");
8212  				error = -1;
8213  			}
8214  		}
8215  	}
8216  	return error;
8217  }
8218  
8219  /**
8220   * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8221   * @instance:			Adapter soft state
8222   * @user_ioc:			User's ioctl packet
8223   * @ioc:			ioctl packet
8224   */
8225  static int
megasas_mgmt_fw_ioctl(struct megasas_instance * instance,struct megasas_iocpacket __user * user_ioc,struct megasas_iocpacket * ioc)8226  megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8227  		      struct megasas_iocpacket __user * user_ioc,
8228  		      struct megasas_iocpacket *ioc)
8229  {
8230  	struct megasas_sge64 *kern_sge64 = NULL;
8231  	struct megasas_sge32 *kern_sge32 = NULL;
8232  	struct megasas_cmd *cmd;
8233  	void *kbuff_arr[MAX_IOCTL_SGE];
8234  	dma_addr_t buf_handle = 0;
8235  	int error = 0, i;
8236  	void *sense = NULL;
8237  	dma_addr_t sense_handle;
8238  	void *sense_ptr;
8239  	u32 opcode = 0;
8240  	int ret = DCMD_SUCCESS;
8241  
8242  	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8243  
8244  	if (ioc->sge_count > MAX_IOCTL_SGE) {
8245  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8246  		       ioc->sge_count, MAX_IOCTL_SGE);
8247  		return -EINVAL;
8248  	}
8249  
8250  	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8251  	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8252  	    !instance->support_nvme_passthru) ||
8253  	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8254  	    !instance->support_pci_lane_margining)) {
8255  		dev_err(&instance->pdev->dev,
8256  			"Received invalid ioctl command 0x%x\n",
8257  			ioc->frame.hdr.cmd);
8258  		return -ENOTSUPP;
8259  	}
8260  
8261  	cmd = megasas_get_cmd(instance);
8262  	if (!cmd) {
8263  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8264  		return -ENOMEM;
8265  	}
8266  
8267  	/*
8268  	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8269  	 * frames into our cmd's frames. cmd->frame's context will get
8270  	 * overwritten when we copy from user's frames. So set that value
8271  	 * alone separately
8272  	 */
8273  	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8274  	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8275  	cmd->frame->hdr.pad_0 = 0;
8276  
8277  	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8278  
8279  	if (instance->consistent_mask_64bit)
8280  		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8281  				       MFI_FRAME_SENSE64));
8282  	else
8283  		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8284  					       MFI_FRAME_SENSE64));
8285  
8286  	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8287  		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8288  
8289  	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8290  		mutex_lock(&instance->reset_mutex);
8291  		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8292  			megasas_return_cmd(instance, cmd);
8293  			mutex_unlock(&instance->reset_mutex);
8294  			return -1;
8295  		}
8296  		mutex_unlock(&instance->reset_mutex);
8297  	}
8298  
8299  	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8300  		error = megasas_set_crash_dump_params_ioctl(cmd);
8301  		megasas_return_cmd(instance, cmd);
8302  		return error;
8303  	}
8304  
8305  	/*
8306  	 * The management interface between applications and the fw uses
8307  	 * MFI frames. E.g, RAID configuration changes, LD property changes
8308  	 * etc are accomplishes through different kinds of MFI frames. The
8309  	 * driver needs to care only about substituting user buffers with
8310  	 * kernel buffers in SGLs. The location of SGL is embedded in the
8311  	 * struct iocpacket itself.
8312  	 */
8313  	if (instance->consistent_mask_64bit)
8314  		kern_sge64 = (struct megasas_sge64 *)
8315  			((unsigned long)cmd->frame + ioc->sgl_off);
8316  	else
8317  		kern_sge32 = (struct megasas_sge32 *)
8318  			((unsigned long)cmd->frame + ioc->sgl_off);
8319  
8320  	/*
8321  	 * For each user buffer, create a mirror buffer and copy in
8322  	 */
8323  	for (i = 0; i < ioc->sge_count; i++) {
8324  		if (!ioc->sgl[i].iov_len)
8325  			continue;
8326  
8327  		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8328  						    ioc->sgl[i].iov_len,
8329  						    &buf_handle, GFP_KERNEL);
8330  		if (!kbuff_arr[i]) {
8331  			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8332  			       "kernel SGL buffer for IOCTL\n");
8333  			error = -ENOMEM;
8334  			goto out;
8335  		}
8336  
8337  		/*
8338  		 * We don't change the dma_coherent_mask, so
8339  		 * dma_alloc_coherent only returns 32bit addresses
8340  		 */
8341  		if (instance->consistent_mask_64bit) {
8342  			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8343  			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8344  		} else {
8345  			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8346  			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8347  		}
8348  
8349  		/*
8350  		 * We created a kernel buffer corresponding to the
8351  		 * user buffer. Now copy in from the user buffer
8352  		 */
8353  		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8354  				   (u32) (ioc->sgl[i].iov_len))) {
8355  			error = -EFAULT;
8356  			goto out;
8357  		}
8358  	}
8359  
8360  	if (ioc->sense_len) {
8361  		/* make sure the pointer is part of the frame */
8362  		if (ioc->sense_off >
8363  		    (sizeof(union megasas_frame) - sizeof(__le64))) {
8364  			error = -EINVAL;
8365  			goto out;
8366  		}
8367  
8368  		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8369  					     &sense_handle, GFP_KERNEL);
8370  		if (!sense) {
8371  			error = -ENOMEM;
8372  			goto out;
8373  		}
8374  
8375  		/* always store 64 bits regardless of addressing */
8376  		sense_ptr = (void *)cmd->frame + ioc->sense_off;
8377  		put_unaligned_le64(sense_handle, sense_ptr);
8378  	}
8379  
8380  	/*
8381  	 * Set the sync_cmd flag so that the ISR knows not to complete this
8382  	 * cmd to the SCSI mid-layer
8383  	 */
8384  	cmd->sync_cmd = 1;
8385  
8386  	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8387  	switch (ret) {
8388  	case DCMD_INIT:
8389  	case DCMD_BUSY:
8390  		cmd->sync_cmd = 0;
8391  		dev_err(&instance->pdev->dev,
8392  			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8393  			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8394  			 cmd->cmd_status_drv);
8395  		error = -EBUSY;
8396  		goto out;
8397  	}
8398  
8399  	cmd->sync_cmd = 0;
8400  
8401  	if (instance->unload == 1) {
8402  		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8403  			"don't submit data to application\n");
8404  		goto out;
8405  	}
8406  	/*
8407  	 * copy out the kernel buffers to user buffers
8408  	 */
8409  	for (i = 0; i < ioc->sge_count; i++) {
8410  		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8411  				 ioc->sgl[i].iov_len)) {
8412  			error = -EFAULT;
8413  			goto out;
8414  		}
8415  	}
8416  
8417  	/*
8418  	 * copy out the sense
8419  	 */
8420  	if (ioc->sense_len) {
8421  		void __user *uptr;
8422  		/*
8423  		 * sense_ptr points to the location that has the user
8424  		 * sense buffer address
8425  		 */
8426  		sense_ptr = (void *)ioc->frame.raw + ioc->sense_off;
8427  		if (in_compat_syscall())
8428  			uptr = compat_ptr(get_unaligned((compat_uptr_t *)
8429  							sense_ptr));
8430  		else
8431  			uptr = get_unaligned((void __user **)sense_ptr);
8432  
8433  		if (copy_to_user(uptr, sense, ioc->sense_len)) {
8434  			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8435  					"sense data\n");
8436  			error = -EFAULT;
8437  			goto out;
8438  		}
8439  	}
8440  
8441  	/*
8442  	 * copy the status codes returned by the fw
8443  	 */
8444  	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8445  			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8446  		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8447  		error = -EFAULT;
8448  	}
8449  
8450  out:
8451  	if (sense) {
8452  		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8453  				    sense, sense_handle);
8454  	}
8455  
8456  	for (i = 0; i < ioc->sge_count; i++) {
8457  		if (kbuff_arr[i]) {
8458  			if (instance->consistent_mask_64bit)
8459  				dma_free_coherent(&instance->pdev->dev,
8460  					le32_to_cpu(kern_sge64[i].length),
8461  					kbuff_arr[i],
8462  					le64_to_cpu(kern_sge64[i].phys_addr));
8463  			else
8464  				dma_free_coherent(&instance->pdev->dev,
8465  					le32_to_cpu(kern_sge32[i].length),
8466  					kbuff_arr[i],
8467  					le32_to_cpu(kern_sge32[i].phys_addr));
8468  			kbuff_arr[i] = NULL;
8469  		}
8470  	}
8471  
8472  	megasas_return_cmd(instance, cmd);
8473  	return error;
8474  }
8475  
8476  static struct megasas_iocpacket *
megasas_compat_iocpacket_get_user(void __user * arg)8477  megasas_compat_iocpacket_get_user(void __user *arg)
8478  {
8479  	struct megasas_iocpacket *ioc;
8480  	struct compat_megasas_iocpacket __user *cioc = arg;
8481  	size_t size;
8482  	int err = -EFAULT;
8483  	int i;
8484  
8485  	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
8486  	if (!ioc)
8487  		return ERR_PTR(-ENOMEM);
8488  	size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame);
8489  	if (copy_from_user(ioc, arg, size))
8490  		goto out;
8491  
8492  	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8493  		compat_uptr_t iov_base;
8494  
8495  		if (get_user(iov_base, &cioc->sgl[i].iov_base) ||
8496  		    get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len))
8497  			goto out;
8498  
8499  		ioc->sgl[i].iov_base = compat_ptr(iov_base);
8500  	}
8501  
8502  	return ioc;
8503  out:
8504  	kfree(ioc);
8505  	return ERR_PTR(err);
8506  }
8507  
megasas_mgmt_ioctl_fw(struct file * file,unsigned long arg)8508  static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8509  {
8510  	struct megasas_iocpacket __user *user_ioc =
8511  	    (struct megasas_iocpacket __user *)arg;
8512  	struct megasas_iocpacket *ioc;
8513  	struct megasas_instance *instance;
8514  	int error;
8515  
8516  	if (in_compat_syscall())
8517  		ioc = megasas_compat_iocpacket_get_user(user_ioc);
8518  	else
8519  		ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket));
8520  
8521  	if (IS_ERR(ioc))
8522  		return PTR_ERR(ioc);
8523  
8524  	instance = megasas_lookup_instance(ioc->host_no);
8525  	if (!instance) {
8526  		error = -ENODEV;
8527  		goto out_kfree_ioc;
8528  	}
8529  
8530  	/* Block ioctls in VF mode */
8531  	if (instance->requestorId && !allow_vf_ioctls) {
8532  		error = -ENODEV;
8533  		goto out_kfree_ioc;
8534  	}
8535  
8536  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8537  		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8538  		error = -ENODEV;
8539  		goto out_kfree_ioc;
8540  	}
8541  
8542  	if (instance->unload == 1) {
8543  		error = -ENODEV;
8544  		goto out_kfree_ioc;
8545  	}
8546  
8547  	if (down_interruptible(&instance->ioctl_sem)) {
8548  		error = -ERESTARTSYS;
8549  		goto out_kfree_ioc;
8550  	}
8551  
8552  	if  (megasas_wait_for_adapter_operational(instance)) {
8553  		error = -ENODEV;
8554  		goto out_up;
8555  	}
8556  
8557  	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8558  out_up:
8559  	up(&instance->ioctl_sem);
8560  
8561  out_kfree_ioc:
8562  	kfree(ioc);
8563  	return error;
8564  }
8565  
megasas_mgmt_ioctl_aen(struct file * file,unsigned long arg)8566  static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8567  {
8568  	struct megasas_instance *instance;
8569  	struct megasas_aen aen;
8570  	int error;
8571  
8572  	if (file->private_data != file) {
8573  		printk(KERN_DEBUG "megasas: fasync_helper was not "
8574  		       "called first\n");
8575  		return -EINVAL;
8576  	}
8577  
8578  	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8579  		return -EFAULT;
8580  
8581  	instance = megasas_lookup_instance(aen.host_no);
8582  
8583  	if (!instance)
8584  		return -ENODEV;
8585  
8586  	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8587  		return -ENODEV;
8588  	}
8589  
8590  	if (instance->unload == 1) {
8591  		return -ENODEV;
8592  	}
8593  
8594  	if  (megasas_wait_for_adapter_operational(instance))
8595  		return -ENODEV;
8596  
8597  	mutex_lock(&instance->reset_mutex);
8598  	error = megasas_register_aen(instance, aen.seq_num,
8599  				     aen.class_locale_word);
8600  	mutex_unlock(&instance->reset_mutex);
8601  	return error;
8602  }
8603  
8604  /**
8605   * megasas_mgmt_ioctl -	char node ioctl entry point
8606   * @file:	char device file pointer
8607   * @cmd:	ioctl command
8608   * @arg:	ioctl command arguments address
8609   */
8610  static long
megasas_mgmt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8611  megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8612  {
8613  	switch (cmd) {
8614  	case MEGASAS_IOC_FIRMWARE:
8615  		return megasas_mgmt_ioctl_fw(file, arg);
8616  
8617  	case MEGASAS_IOC_GET_AEN:
8618  		return megasas_mgmt_ioctl_aen(file, arg);
8619  	}
8620  
8621  	return -ENOTTY;
8622  }
8623  
8624  #ifdef CONFIG_COMPAT
8625  static long
megasas_mgmt_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8626  megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8627  			  unsigned long arg)
8628  {
8629  	switch (cmd) {
8630  	case MEGASAS_IOC_FIRMWARE32:
8631  		return megasas_mgmt_ioctl_fw(file, arg);
8632  	case MEGASAS_IOC_GET_AEN:
8633  		return megasas_mgmt_ioctl_aen(file, arg);
8634  	}
8635  
8636  	return -ENOTTY;
8637  }
8638  #endif
8639  
8640  /*
8641   * File operations structure for management interface
8642   */
8643  static const struct file_operations megasas_mgmt_fops = {
8644  	.owner = THIS_MODULE,
8645  	.open = megasas_mgmt_open,
8646  	.fasync = megasas_mgmt_fasync,
8647  	.unlocked_ioctl = megasas_mgmt_ioctl,
8648  	.poll = megasas_mgmt_poll,
8649  #ifdef CONFIG_COMPAT
8650  	.compat_ioctl = megasas_mgmt_compat_ioctl,
8651  #endif
8652  	.llseek = noop_llseek,
8653  };
8654  
8655  static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume);
8656  
8657  /*
8658   * PCI hotplug support registration structure
8659   */
8660  static struct pci_driver megasas_pci_driver = {
8661  
8662  	.name = "megaraid_sas",
8663  	.id_table = megasas_pci_table,
8664  	.probe = megasas_probe_one,
8665  	.remove = megasas_detach_one,
8666  	.driver.pm = &megasas_pm_ops,
8667  	.shutdown = megasas_shutdown,
8668  };
8669  
8670  /*
8671   * Sysfs driver attributes
8672   */
version_show(struct device_driver * dd,char * buf)8673  static ssize_t version_show(struct device_driver *dd, char *buf)
8674  {
8675  	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8676  			MEGASAS_VERSION);
8677  }
8678  static DRIVER_ATTR_RO(version);
8679  
release_date_show(struct device_driver * dd,char * buf)8680  static ssize_t release_date_show(struct device_driver *dd, char *buf)
8681  {
8682  	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8683  		MEGASAS_RELDATE);
8684  }
8685  static DRIVER_ATTR_RO(release_date);
8686  
support_poll_for_event_show(struct device_driver * dd,char * buf)8687  static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8688  {
8689  	return sprintf(buf, "%u\n", support_poll_for_event);
8690  }
8691  static DRIVER_ATTR_RO(support_poll_for_event);
8692  
support_device_change_show(struct device_driver * dd,char * buf)8693  static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8694  {
8695  	return sprintf(buf, "%u\n", support_device_change);
8696  }
8697  static DRIVER_ATTR_RO(support_device_change);
8698  
dbg_lvl_show(struct device_driver * dd,char * buf)8699  static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8700  {
8701  	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8702  }
8703  
dbg_lvl_store(struct device_driver * dd,const char * buf,size_t count)8704  static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8705  			     size_t count)
8706  {
8707  	int retval = count;
8708  
8709  	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8710  		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8711  		retval = -EINVAL;
8712  	}
8713  	return retval;
8714  }
8715  static DRIVER_ATTR_RW(dbg_lvl);
8716  
8717  static ssize_t
support_nvme_encapsulation_show(struct device_driver * dd,char * buf)8718  support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8719  {
8720  	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8721  }
8722  
8723  static DRIVER_ATTR_RO(support_nvme_encapsulation);
8724  
8725  static ssize_t
support_pci_lane_margining_show(struct device_driver * dd,char * buf)8726  support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8727  {
8728  	return sprintf(buf, "%u\n", support_pci_lane_margining);
8729  }
8730  
8731  static DRIVER_ATTR_RO(support_pci_lane_margining);
8732  
megasas_remove_scsi_device(struct scsi_device * sdev)8733  static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8734  {
8735  	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8736  	scsi_remove_device(sdev);
8737  	scsi_device_put(sdev);
8738  }
8739  
8740  /**
8741   * megasas_update_device_list -	Update the PD and LD device list from FW
8742   *				after an AEN event notification
8743   * @instance:			Adapter soft state
8744   * @event_type:			Indicates type of event (PD or LD event)
8745   *
8746   * @return:			Success or failure
8747   *
8748   * Issue DCMDs to Firmware to update the internal device list in driver.
8749   * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8750   * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8751   */
8752  static
megasas_update_device_list(struct megasas_instance * instance,int event_type)8753  int megasas_update_device_list(struct megasas_instance *instance,
8754  			       int event_type)
8755  {
8756  	int dcmd_ret;
8757  
8758  	if (instance->enable_fw_dev_list) {
8759  		return megasas_host_device_list_query(instance, false);
8760  	} else {
8761  		if (event_type & SCAN_PD_CHANNEL) {
8762  			dcmd_ret = megasas_get_pd_list(instance);
8763  			if (dcmd_ret != DCMD_SUCCESS)
8764  				return dcmd_ret;
8765  		}
8766  
8767  		if (event_type & SCAN_VD_CHANNEL) {
8768  			if (!instance->requestorId ||
8769  			megasas_get_ld_vf_affiliation(instance, 0)) {
8770  				return megasas_ld_list_query(instance,
8771  						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8772  			}
8773  		}
8774  	}
8775  	return DCMD_SUCCESS;
8776  }
8777  
8778  /**
8779   * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8780   *				after an AEN event notification
8781   * @instance:			Adapter soft state
8782   * @scan_type:			Indicates type of devices (PD/LD) to add
8783   * @return			void
8784   */
8785  static
megasas_add_remove_devices(struct megasas_instance * instance,int scan_type)8786  void megasas_add_remove_devices(struct megasas_instance *instance,
8787  				int scan_type)
8788  {
8789  	int i, j;
8790  	u16 pd_index = 0;
8791  	u16 ld_index = 0;
8792  	u16 channel = 0, id = 0;
8793  	struct Scsi_Host *host;
8794  	struct scsi_device *sdev1;
8795  	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8796  	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8797  
8798  	host = instance->host;
8799  
8800  	if (instance->enable_fw_dev_list) {
8801  		targetid_list = instance->host_device_list_buf;
8802  		for (i = 0; i < targetid_list->count; i++) {
8803  			targetid_entry = &targetid_list->host_device_list[i];
8804  			if (targetid_entry->flags.u.bits.is_sys_pd) {
8805  				channel = le16_to_cpu(targetid_entry->target_id) /
8806  						MEGASAS_MAX_DEV_PER_CHANNEL;
8807  				id = le16_to_cpu(targetid_entry->target_id) %
8808  						MEGASAS_MAX_DEV_PER_CHANNEL;
8809  			} else {
8810  				channel = MEGASAS_MAX_PD_CHANNELS +
8811  					  (le16_to_cpu(targetid_entry->target_id) /
8812  					   MEGASAS_MAX_DEV_PER_CHANNEL);
8813  				id = le16_to_cpu(targetid_entry->target_id) %
8814  						MEGASAS_MAX_DEV_PER_CHANNEL;
8815  			}
8816  			sdev1 = scsi_device_lookup(host, channel, id, 0);
8817  			if (!sdev1) {
8818  				scsi_add_device(host, channel, id, 0);
8819  			} else {
8820  				scsi_device_put(sdev1);
8821  			}
8822  		}
8823  	}
8824  
8825  	if (scan_type & SCAN_PD_CHANNEL) {
8826  		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8827  			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8828  				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8829  				sdev1 = scsi_device_lookup(host, i, j, 0);
8830  				if (instance->pd_list[pd_index].driveState ==
8831  							MR_PD_STATE_SYSTEM) {
8832  					if (!sdev1)
8833  						scsi_add_device(host, i, j, 0);
8834  					else
8835  						scsi_device_put(sdev1);
8836  				} else {
8837  					if (sdev1)
8838  						megasas_remove_scsi_device(sdev1);
8839  				}
8840  			}
8841  		}
8842  	}
8843  
8844  	if (scan_type & SCAN_VD_CHANNEL) {
8845  		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8846  			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8847  				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8848  				sdev1 = scsi_device_lookup(host,
8849  						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8850  				if (instance->ld_ids[ld_index] != 0xff) {
8851  					if (!sdev1)
8852  						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8853  					else
8854  						scsi_device_put(sdev1);
8855  				} else {
8856  					if (sdev1)
8857  						megasas_remove_scsi_device(sdev1);
8858  				}
8859  			}
8860  		}
8861  	}
8862  
8863  }
8864  
8865  static void
megasas_aen_polling(struct work_struct * work)8866  megasas_aen_polling(struct work_struct *work)
8867  {
8868  	struct megasas_aen_event *ev =
8869  		container_of(work, struct megasas_aen_event, hotplug_work.work);
8870  	struct megasas_instance *instance = ev->instance;
8871  	union megasas_evt_class_locale class_locale;
8872  	int event_type = 0;
8873  	u32 seq_num;
8874  	u16 ld_target_id;
8875  	int error;
8876  	u8  dcmd_ret = DCMD_SUCCESS;
8877  	struct scsi_device *sdev1;
8878  
8879  	if (!instance) {
8880  		printk(KERN_ERR "invalid instance!\n");
8881  		kfree(ev);
8882  		return;
8883  	}
8884  
8885  	/* Don't run the event workqueue thread if OCR is running */
8886  	mutex_lock(&instance->reset_mutex);
8887  
8888  	instance->ev = NULL;
8889  	if (instance->evt_detail) {
8890  		megasas_decode_evt(instance);
8891  
8892  		switch (le32_to_cpu(instance->evt_detail->code)) {
8893  
8894  		case MR_EVT_PD_INSERTED:
8895  		case MR_EVT_PD_REMOVED:
8896  			event_type = SCAN_PD_CHANNEL;
8897  			break;
8898  
8899  		case MR_EVT_LD_OFFLINE:
8900  		case MR_EVT_LD_DELETED:
8901  			ld_target_id = instance->evt_detail->args.ld.target_id;
8902  			sdev1 = scsi_device_lookup(instance->host,
8903  						   MEGASAS_MAX_PD_CHANNELS +
8904  						   (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
8905  						   (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL),
8906  						   0);
8907  			if (sdev1) {
8908  				mutex_unlock(&instance->reset_mutex);
8909  				megasas_remove_scsi_device(sdev1);
8910  				mutex_lock(&instance->reset_mutex);
8911  			}
8912  
8913  			event_type = SCAN_VD_CHANNEL;
8914  			break;
8915  		case MR_EVT_LD_CREATED:
8916  			event_type = SCAN_VD_CHANNEL;
8917  			break;
8918  
8919  		case MR_EVT_CFG_CLEARED:
8920  		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8921  		case MR_EVT_FOREIGN_CFG_IMPORTED:
8922  		case MR_EVT_LD_STATE_CHANGE:
8923  			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8924  			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8925  				instance->host->host_no);
8926  			break;
8927  
8928  		case MR_EVT_CTRL_PROP_CHANGED:
8929  			dcmd_ret = megasas_get_ctrl_info(instance);
8930  			if (dcmd_ret == DCMD_SUCCESS &&
8931  			    instance->snapdump_wait_time) {
8932  				megasas_get_snapdump_properties(instance);
8933  				dev_info(&instance->pdev->dev,
8934  					 "Snap dump wait time\t: %d\n",
8935  					 instance->snapdump_wait_time);
8936  			}
8937  			break;
8938  		default:
8939  			event_type = 0;
8940  			break;
8941  		}
8942  	} else {
8943  		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8944  		mutex_unlock(&instance->reset_mutex);
8945  		kfree(ev);
8946  		return;
8947  	}
8948  
8949  	if (event_type)
8950  		dcmd_ret = megasas_update_device_list(instance, event_type);
8951  
8952  	mutex_unlock(&instance->reset_mutex);
8953  
8954  	if (event_type && dcmd_ret == DCMD_SUCCESS)
8955  		megasas_add_remove_devices(instance, event_type);
8956  
8957  	if (dcmd_ret == DCMD_SUCCESS)
8958  		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8959  	else
8960  		seq_num = instance->last_seq_num;
8961  
8962  	/* Register AEN with FW for latest sequence number plus 1 */
8963  	class_locale.members.reserved = 0;
8964  	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8965  	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8966  
8967  	if (instance->aen_cmd != NULL) {
8968  		kfree(ev);
8969  		return;
8970  	}
8971  
8972  	mutex_lock(&instance->reset_mutex);
8973  	error = megasas_register_aen(instance, seq_num,
8974  					class_locale.word);
8975  	if (error)
8976  		dev_err(&instance->pdev->dev,
8977  			"register aen failed error %x\n", error);
8978  
8979  	mutex_unlock(&instance->reset_mutex);
8980  	kfree(ev);
8981  }
8982  
8983  /**
8984   * megasas_init - Driver load entry point
8985   */
megasas_init(void)8986  static int __init megasas_init(void)
8987  {
8988  	int rval;
8989  
8990  	/*
8991  	 * Booted in kdump kernel, minimize memory footprints by
8992  	 * disabling few features
8993  	 */
8994  	if (reset_devices) {
8995  		msix_vectors = 1;
8996  		rdpq_enable = 0;
8997  		dual_qdepth_disable = 1;
8998  		poll_queues = 0;
8999  	}
9000  
9001  	/*
9002  	 * Announce driver version and other information
9003  	 */
9004  	pr_info("megasas: %s\n", MEGASAS_VERSION);
9005  
9006  	megasas_dbg_lvl = 0;
9007  	support_poll_for_event = 2;
9008  	support_device_change = 1;
9009  	support_nvme_encapsulation = true;
9010  	support_pci_lane_margining = true;
9011  
9012  	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
9013  
9014  	/*
9015  	 * Register character device node
9016  	 */
9017  	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
9018  
9019  	if (rval < 0) {
9020  		printk(KERN_DEBUG "megasas: failed to open device node\n");
9021  		return rval;
9022  	}
9023  
9024  	megasas_mgmt_majorno = rval;
9025  
9026  	megasas_init_debugfs();
9027  
9028  	/*
9029  	 * Register ourselves as PCI hotplug module
9030  	 */
9031  	rval = pci_register_driver(&megasas_pci_driver);
9032  
9033  	if (rval) {
9034  		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
9035  		goto err_pcidrv;
9036  	}
9037  
9038  	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
9039  	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
9040  		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
9041  		event_log_level = MFI_EVT_CLASS_CRITICAL;
9042  	}
9043  
9044  	rval = driver_create_file(&megasas_pci_driver.driver,
9045  				  &driver_attr_version);
9046  	if (rval)
9047  		goto err_dcf_attr_ver;
9048  
9049  	rval = driver_create_file(&megasas_pci_driver.driver,
9050  				  &driver_attr_release_date);
9051  	if (rval)
9052  		goto err_dcf_rel_date;
9053  
9054  	rval = driver_create_file(&megasas_pci_driver.driver,
9055  				&driver_attr_support_poll_for_event);
9056  	if (rval)
9057  		goto err_dcf_support_poll_for_event;
9058  
9059  	rval = driver_create_file(&megasas_pci_driver.driver,
9060  				  &driver_attr_dbg_lvl);
9061  	if (rval)
9062  		goto err_dcf_dbg_lvl;
9063  	rval = driver_create_file(&megasas_pci_driver.driver,
9064  				&driver_attr_support_device_change);
9065  	if (rval)
9066  		goto err_dcf_support_device_change;
9067  
9068  	rval = driver_create_file(&megasas_pci_driver.driver,
9069  				  &driver_attr_support_nvme_encapsulation);
9070  	if (rval)
9071  		goto err_dcf_support_nvme_encapsulation;
9072  
9073  	rval = driver_create_file(&megasas_pci_driver.driver,
9074  				  &driver_attr_support_pci_lane_margining);
9075  	if (rval)
9076  		goto err_dcf_support_pci_lane_margining;
9077  
9078  	return rval;
9079  
9080  err_dcf_support_pci_lane_margining:
9081  	driver_remove_file(&megasas_pci_driver.driver,
9082  			   &driver_attr_support_nvme_encapsulation);
9083  
9084  err_dcf_support_nvme_encapsulation:
9085  	driver_remove_file(&megasas_pci_driver.driver,
9086  			   &driver_attr_support_device_change);
9087  
9088  err_dcf_support_device_change:
9089  	driver_remove_file(&megasas_pci_driver.driver,
9090  			   &driver_attr_dbg_lvl);
9091  err_dcf_dbg_lvl:
9092  	driver_remove_file(&megasas_pci_driver.driver,
9093  			&driver_attr_support_poll_for_event);
9094  err_dcf_support_poll_for_event:
9095  	driver_remove_file(&megasas_pci_driver.driver,
9096  			   &driver_attr_release_date);
9097  err_dcf_rel_date:
9098  	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9099  err_dcf_attr_ver:
9100  	pci_unregister_driver(&megasas_pci_driver);
9101  err_pcidrv:
9102  	megasas_exit_debugfs();
9103  	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9104  	return rval;
9105  }
9106  
9107  /**
9108   * megasas_exit - Driver unload entry point
9109   */
megasas_exit(void)9110  static void __exit megasas_exit(void)
9111  {
9112  	driver_remove_file(&megasas_pci_driver.driver,
9113  			   &driver_attr_dbg_lvl);
9114  	driver_remove_file(&megasas_pci_driver.driver,
9115  			&driver_attr_support_poll_for_event);
9116  	driver_remove_file(&megasas_pci_driver.driver,
9117  			&driver_attr_support_device_change);
9118  	driver_remove_file(&megasas_pci_driver.driver,
9119  			   &driver_attr_release_date);
9120  	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9121  	driver_remove_file(&megasas_pci_driver.driver,
9122  			   &driver_attr_support_nvme_encapsulation);
9123  	driver_remove_file(&megasas_pci_driver.driver,
9124  			   &driver_attr_support_pci_lane_margining);
9125  
9126  	pci_unregister_driver(&megasas_pci_driver);
9127  	megasas_exit_debugfs();
9128  	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9129  }
9130  
9131  module_init(megasas_init);
9132  module_exit(megasas_exit);
9133