1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2009-2013 LSI Corporation
6 * Copyright (c) 2013-2016 Avago Technologies
7 * Copyright (c) 2016-2018 Broadcom Inc.
8 *
9 * FILE: megaraid_sas_fusion.h
10 *
11 * Authors: Broadcom Inc.
12 * Manoj Jose
13 * Sumant Patro
14 * Kashyap Desai <kashyap.desai@broadcom.com>
15 * Sumit Saxena <sumit.saxena@broadcom.com>
16 *
17 * Send feedback to: megaraidlinux.pdl@broadcom.com
18 */
19
20 #ifndef _MEGARAID_SAS_FUSION_H_
21 #define _MEGARAID_SAS_FUSION_H_
22
23 /* Fusion defines */
24 #define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
25 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
26 #define MEGASAS_MAX_CHAIN_SHIFT 5
27 #define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
28 #define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
29 #define MEGASAS_256K_IO 128
30 #define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
31 #define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
32 #define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
33 #define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
34 #define MEGASAS_LOAD_BALANCE_FLAG 0x1
35 #define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
36 #define HOST_DIAG_WRITE_ENABLE 0x80
37 #define HOST_DIAG_RESET_ADAPTER 0x4
38 #define MEGASAS_FUSION_MAX_RESET_TRIES 3
39 #define MAX_MSIX_QUEUES_FUSION 128
40 #define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
41 #define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
42
43 /* Invader defines */
44 #define MPI2_TYPE_CUDA 0x2
45 #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
46 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
47 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
48 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
49 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
50 #define MR_RL_WRITE_THROUGH_MODE 0x00
51 #define MR_RL_WRITE_BACK_MODE 0x01
52
53 /* T10 PI defines */
54 #define MR_PROT_INFO_TYPE_CONTROLLER 0x8
55 #define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
56 #define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
57 #define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
58 #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
59 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
60 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
61
62 #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
63 #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
64
65 /*
66 * Raid context flags
67 */
68
69 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
70 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
71 enum MR_RAID_FLAGS_IO_SUB_TYPE {
72 MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
73 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
74 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
75 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
76 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
77 MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
78 MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7,
79 MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8
80 };
81
82 /*
83 * Request descriptor types
84 */
85 #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
86 #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
87 #define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
88 #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
89
90 #define MEGASAS_FP_CMD_LEN 16
91 #define MEGASAS_FUSION_IN_RESET 0
92 #define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
93 #define RAID_1_PEER_CMDS 2
94 #define JBOD_MAPS_COUNT 2
95 #define MEGASAS_REDUCE_QD_COUNT 64
96 #define IOC_INIT_FRAME_SIZE 4096
97
98 /*
99 * Raid Context structure which describes MegaRAID specific IO Parameters
100 * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
101 */
102
103 struct RAID_CONTEXT {
104 #if defined(__BIG_ENDIAN_BITFIELD)
105 u8 nseg:4;
106 u8 type:4;
107 #else
108 u8 type:4;
109 u8 nseg:4;
110 #endif
111 u8 resvd0;
112 __le16 timeout_value;
113 u8 reg_lock_flags;
114 u8 resvd1;
115 __le16 virtual_disk_tgt_id;
116 __le64 reg_lock_row_lba;
117 __le32 reg_lock_length;
118 __le16 next_lmid;
119 u8 ex_status;
120 u8 status;
121 u8 raid_flags;
122 u8 num_sge;
123 __le16 config_seq_num;
124 u8 span_arm;
125 u8 priority;
126 u8 num_sge_ext;
127 u8 resvd2;
128 };
129
130 /*
131 * Raid Context structure which describes ventura MegaRAID specific
132 * IO Paramenters ,This resides at offset 0x60 where the SGL normally
133 * starts in MPT IO Frames
134 */
135 struct RAID_CONTEXT_G35 {
136 #define RAID_CONTEXT_NSEG_MASK 0x00F0
137 #define RAID_CONTEXT_NSEG_SHIFT 4
138 #define RAID_CONTEXT_TYPE_MASK 0x000F
139 #define RAID_CONTEXT_TYPE_SHIFT 0
140 u16 nseg_type;
141 u16 timeout_value; /* 0x02 -0x03 */
142 u16 routing_flags; // 0x04 -0x05 routing flags
143 u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
144 __le64 reg_lock_row_lba; /* 0x08 - 0x0F */
145 u32 reg_lock_length; /* 0x10 - 0x13 */
146 union { // flow specific
147 u16 rmw_op_index; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
148 u16 peer_smid; /* 0x14 - 0x15, R1 Write: peer smid*/
149 u16 r56_arm_map; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
150
151 } flow_specific;
152
153 u8 ex_status; /* 0x16 : OUT */
154 u8 status; /* 0x17 status */
155 u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
156 * resvd[3:1], preferredCpu[0]
157 */
158 u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
159 u16 config_seq_num; /* 0x1A -0x1B */
160 union {
161 /*
162 * Bit format:
163 * ---------------------------------
164 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
165 * ---------------------------------
166 * Byte0 | numSGE[7]- numSGE[0] |
167 * ---------------------------------
168 * Byte1 |SD | resvd | numSGE 8-11 |
169 * --------------------------------
170 */
171 #define NUM_SGE_MASK_LOWER 0xFF
172 #define NUM_SGE_MASK_UPPER 0x0F
173 #define NUM_SGE_SHIFT_UPPER 8
174 #define STREAM_DETECT_SHIFT 7
175 #define STREAM_DETECT_MASK 0x80
176 struct {
177 #if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
178 u16 stream_detected:1;
179 u16 reserved:3;
180 u16 num_sge:12;
181 #else
182 u16 num_sge:12;
183 u16 reserved:3;
184 u16 stream_detected:1;
185 #endif
186 } bits;
187 u8 bytes[2];
188 } u;
189 u8 resvd2[2]; /* 0x1E-0x1F */
190 };
191
192 #define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
193 #define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
194 #define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
195 #define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
196 #define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
197 #define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
198 #define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
199 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
200 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
201 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
202 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
203
set_num_sge(struct RAID_CONTEXT_G35 * rctx_g35,u16 sge_count)204 static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
205 u16 sge_count)
206 {
207 rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
208 rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
209 & NUM_SGE_MASK_UPPER);
210 }
211
get_num_sge(struct RAID_CONTEXT_G35 * rctx_g35)212 static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
213 {
214 u16 sge_count;
215
216 sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
217 << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
218 return sge_count;
219 }
220
221 #define SET_STREAM_DETECTED(rctx_g35) \
222 (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
223
224 #define CLEAR_STREAM_DETECTED(rctx_g35) \
225 (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
226
is_stream_detected(struct RAID_CONTEXT_G35 * rctx_g35)227 static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
228 {
229 return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
230 }
231
232 union RAID_CONTEXT_UNION {
233 struct RAID_CONTEXT raid_context;
234 struct RAID_CONTEXT_G35 raid_context_g35;
235 };
236
237 #define RAID_CTX_SPANARM_ARM_SHIFT (0)
238 #define RAID_CTX_SPANARM_ARM_MASK (0x1f)
239
240 #define RAID_CTX_SPANARM_SPAN_SHIFT (5)
241 #define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
242
243 /* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
244 #define RAID_CTX_R56_Q_ARM_MASK (0x1F)
245 #define RAID_CTX_R56_P_ARM_SHIFT (5)
246 #define RAID_CTX_R56_P_ARM_MASK (0x3E0)
247 #define RAID_CTX_R56_LOG_ARM_SHIFT (10)
248 #define RAID_CTX_R56_LOG_ARM_MASK (0x7C00)
249
250 /* number of bits per index in U32 TrackStream */
251 #define BITS_PER_INDEX_STREAM 4
252 #define INVALID_STREAM_NUM 16
253 #define MR_STREAM_BITMAP 0x76543210
254 #define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
255 #define ZERO_LAST_STREAM 0x0fffffff
256 #define MAX_STREAMS_TRACKED 8
257
258 /*
259 * define region lock types
260 */
261 enum REGION_TYPE {
262 REGION_TYPE_UNUSED = 0,
263 REGION_TYPE_SHARED_READ = 1,
264 REGION_TYPE_SHARED_WRITE = 2,
265 REGION_TYPE_EXCLUSIVE = 3,
266 };
267
268 /* MPI2 defines */
269 #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
270 #define MPI2_WHOINIT_HOST_DRIVER (0x04)
271 #define MPI2_VERSION_MAJOR (0x02)
272 #define MPI2_VERSION_MINOR (0x00)
273 #define MPI2_VERSION_MAJOR_MASK (0xFF00)
274 #define MPI2_VERSION_MAJOR_SHIFT (8)
275 #define MPI2_VERSION_MINOR_MASK (0x00FF)
276 #define MPI2_VERSION_MINOR_SHIFT (0)
277 #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
278 MPI2_VERSION_MINOR)
279 #define MPI2_HEADER_VERSION_UNIT (0x10)
280 #define MPI2_HEADER_VERSION_DEV (0x00)
281 #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
282 #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
283 #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
284 #define MPI2_HEADER_VERSION_DEV_SHIFT (0)
285 #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
286 MPI2_HEADER_VERSION_DEV)
287 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
288 #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
289 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
290 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
291 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
292 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
293 #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
294 /* EEDP escape mode */
295 #define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
296 #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
297 #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
298 #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
299 #define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
300 #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
301 #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
302 #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
303 #define MPI2_SCSIIO_CONTROL_READ (0x02000000)
304 #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
305 #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
306 #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
307 #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
308 #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
309 #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
310 #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
311 #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
312 #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
313 #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
314 #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
315 #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
316
317 struct MPI25_IEEE_SGE_CHAIN64 {
318 __le64 Address;
319 __le32 Length;
320 __le16 Reserved1;
321 u8 NextChainOffset;
322 u8 Flags;
323 };
324
325 struct MPI2_SGE_SIMPLE_UNION {
326 __le32 FlagsLength;
327 union {
328 __le32 Address32;
329 __le64 Address64;
330 } u;
331 };
332
333 struct MPI2_SCSI_IO_CDB_EEDP32 {
334 u8 CDB[20]; /* 0x00 */
335 __be32 PrimaryReferenceTag; /* 0x14 */
336 __be16 PrimaryApplicationTag; /* 0x18 */
337 __be16 PrimaryApplicationTagMask; /* 0x1A */
338 __le32 TransferLength; /* 0x1C */
339 };
340
341 struct MPI2_SGE_CHAIN_UNION {
342 __le16 Length;
343 u8 NextChainOffset;
344 u8 Flags;
345 union {
346 __le32 Address32;
347 __le64 Address64;
348 } u;
349 };
350
351 struct MPI2_IEEE_SGE_SIMPLE32 {
352 __le32 Address;
353 __le32 FlagsLength;
354 };
355
356 struct MPI2_IEEE_SGE_CHAIN32 {
357 __le32 Address;
358 __le32 FlagsLength;
359 };
360
361 struct MPI2_IEEE_SGE_SIMPLE64 {
362 __le64 Address;
363 __le32 Length;
364 __le16 Reserved1;
365 u8 Reserved2;
366 u8 Flags;
367 };
368
369 struct MPI2_IEEE_SGE_CHAIN64 {
370 __le64 Address;
371 __le32 Length;
372 __le16 Reserved1;
373 u8 Reserved2;
374 u8 Flags;
375 };
376
377 union MPI2_IEEE_SGE_SIMPLE_UNION {
378 struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
379 struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
380 };
381
382 union MPI2_IEEE_SGE_CHAIN_UNION {
383 struct MPI2_IEEE_SGE_CHAIN32 Chain32;
384 struct MPI2_IEEE_SGE_CHAIN64 Chain64;
385 };
386
387 union MPI2_SGE_IO_UNION {
388 struct MPI2_SGE_SIMPLE_UNION MpiSimple;
389 struct MPI2_SGE_CHAIN_UNION MpiChain;
390 union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
391 union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
392 };
393
394 union MPI2_SCSI_IO_CDB_UNION {
395 u8 CDB32[32];
396 struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
397 struct MPI2_SGE_SIMPLE_UNION SGE;
398 };
399
400 /****************************************************************************
401 * SCSI Task Management messages
402 ****************************************************************************/
403
404 /*SCSI Task Management Request Message */
405 struct MPI2_SCSI_TASK_MANAGE_REQUEST {
406 u16 DevHandle; /*0x00 */
407 u8 ChainOffset; /*0x02 */
408 u8 Function; /*0x03 */
409 u8 Reserved1; /*0x04 */
410 u8 TaskType; /*0x05 */
411 u8 Reserved2; /*0x06 */
412 u8 MsgFlags; /*0x07 */
413 u8 VP_ID; /*0x08 */
414 u8 VF_ID; /*0x09 */
415 u16 Reserved3; /*0x0A */
416 u8 LUN[8]; /*0x0C */
417 u32 Reserved4[7]; /*0x14 */
418 u16 TaskMID; /*0x30 */
419 u16 Reserved5; /*0x32 */
420 };
421
422
423 /*SCSI Task Management Reply Message */
424 struct MPI2_SCSI_TASK_MANAGE_REPLY {
425 u16 DevHandle; /*0x00 */
426 u8 MsgLength; /*0x02 */
427 u8 Function; /*0x03 */
428 u8 ResponseCode; /*0x04 */
429 u8 TaskType; /*0x05 */
430 u8 Reserved1; /*0x06 */
431 u8 MsgFlags; /*0x07 */
432 u8 VP_ID; /*0x08 */
433 u8 VF_ID; /*0x09 */
434 u16 Reserved2; /*0x0A */
435 u16 Reserved3; /*0x0C */
436 u16 IOCStatus; /*0x0E */
437 u32 IOCLogInfo; /*0x10 */
438 u32 TerminationCount; /*0x14 */
439 u32 ResponseInfo; /*0x18 */
440 };
441
442 struct MR_TM_REQUEST {
443 char request[128];
444 };
445
446 struct MR_TM_REPLY {
447 char reply[128];
448 };
449
450 /* SCSI Task Management Request Message */
451 struct MR_TASK_MANAGE_REQUEST {
452 /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
453 struct MR_TM_REQUEST TmRequest;
454 union {
455 struct {
456 #if defined(__BIG_ENDIAN_BITFIELD)
457 u32 reserved1:30;
458 u32 isTMForPD:1;
459 u32 isTMForLD:1;
460 #else
461 u32 isTMForLD:1;
462 u32 isTMForPD:1;
463 u32 reserved1:30;
464 #endif
465 u32 reserved2;
466 } tmReqFlags;
467 struct MR_TM_REPLY TMReply;
468 };
469 };
470
471 /* TaskType values */
472
473 #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
474 #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
475 #define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
476 #define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
477 #define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
478 #define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
479 #define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
480 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
481 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
482
483 /* ResponseCode values */
484
485 #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
486 #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
487 #define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
488 #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
489 #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
490 #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
491 #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
492 #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
493
494 /*
495 * RAID SCSI IO Request Message
496 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
497 */
498 struct MPI2_RAID_SCSI_IO_REQUEST {
499 __le16 DevHandle; /* 0x00 */
500 u8 ChainOffset; /* 0x02 */
501 u8 Function; /* 0x03 */
502 __le16 Reserved1; /* 0x04 */
503 u8 Reserved2; /* 0x06 */
504 u8 MsgFlags; /* 0x07 */
505 u8 VP_ID; /* 0x08 */
506 u8 VF_ID; /* 0x09 */
507 __le16 Reserved3; /* 0x0A */
508 __le32 SenseBufferLowAddress; /* 0x0C */
509 __le16 SGLFlags; /* 0x10 */
510 u8 SenseBufferLength; /* 0x12 */
511 u8 Reserved4; /* 0x13 */
512 u8 SGLOffset0; /* 0x14 */
513 u8 SGLOffset1; /* 0x15 */
514 u8 SGLOffset2; /* 0x16 */
515 u8 SGLOffset3; /* 0x17 */
516 __le32 SkipCount; /* 0x18 */
517 __le32 DataLength; /* 0x1C */
518 __le32 BidirectionalDataLength; /* 0x20 */
519 __le16 IoFlags; /* 0x24 */
520 __le16 EEDPFlags; /* 0x26 */
521 __le32 EEDPBlockSize; /* 0x28 */
522 __le32 SecondaryReferenceTag; /* 0x2C */
523 __le16 SecondaryApplicationTag; /* 0x30 */
524 __le16 ApplicationTagTranslationMask; /* 0x32 */
525 u8 LUN[8]; /* 0x34 */
526 __le32 Control; /* 0x3C */
527 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
528 union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
529 union {
530 union MPI2_SGE_IO_UNION SGL; /* 0x80 */
531 DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs);
532 };
533 };
534
535 /*
536 * MPT RAID MFA IO Descriptor.
537 */
538 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
539 u32 RequestFlags:8;
540 u32 MessageAddress1:24;
541 u32 MessageAddress2;
542 };
543
544 /* Default Request Descriptor */
545 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
546 u8 RequestFlags; /* 0x00 */
547 u8 MSIxIndex; /* 0x01 */
548 __le16 SMID; /* 0x02 */
549 __le16 LMID; /* 0x04 */
550 __le16 DescriptorTypeDependent; /* 0x06 */
551 };
552
553 /* High Priority Request Descriptor */
554 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
555 u8 RequestFlags; /* 0x00 */
556 u8 MSIxIndex; /* 0x01 */
557 __le16 SMID; /* 0x02 */
558 __le16 LMID; /* 0x04 */
559 __le16 Reserved1; /* 0x06 */
560 };
561
562 /* SCSI IO Request Descriptor */
563 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
564 u8 RequestFlags; /* 0x00 */
565 u8 MSIxIndex; /* 0x01 */
566 __le16 SMID; /* 0x02 */
567 __le16 LMID; /* 0x04 */
568 __le16 DevHandle; /* 0x06 */
569 };
570
571 /* SCSI Target Request Descriptor */
572 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
573 u8 RequestFlags; /* 0x00 */
574 u8 MSIxIndex; /* 0x01 */
575 __le16 SMID; /* 0x02 */
576 __le16 LMID; /* 0x04 */
577 __le16 IoIndex; /* 0x06 */
578 };
579
580 /* RAID Accelerator Request Descriptor */
581 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
582 u8 RequestFlags; /* 0x00 */
583 u8 MSIxIndex; /* 0x01 */
584 __le16 SMID; /* 0x02 */
585 __le16 LMID; /* 0x04 */
586 __le16 Reserved; /* 0x06 */
587 };
588
589 /* union of Request Descriptors */
590 union MEGASAS_REQUEST_DESCRIPTOR_UNION {
591 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
592 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
593 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
594 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
595 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
596 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
597 union {
598 struct {
599 __le32 low;
600 __le32 high;
601 } u;
602 __le64 Words;
603 };
604 };
605
606 /* Default Reply Descriptor */
607 struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
608 u8 ReplyFlags; /* 0x00 */
609 u8 MSIxIndex; /* 0x01 */
610 __le16 DescriptorTypeDependent1; /* 0x02 */
611 __le32 DescriptorTypeDependent2; /* 0x04 */
612 };
613
614 /* Address Reply Descriptor */
615 struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
616 u8 ReplyFlags; /* 0x00 */
617 u8 MSIxIndex; /* 0x01 */
618 __le16 SMID; /* 0x02 */
619 __le32 ReplyFrameAddress; /* 0x04 */
620 };
621
622 /* SCSI IO Success Reply Descriptor */
623 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
624 u8 ReplyFlags; /* 0x00 */
625 u8 MSIxIndex; /* 0x01 */
626 __le16 SMID; /* 0x02 */
627 __le16 TaskTag; /* 0x04 */
628 __le16 Reserved1; /* 0x06 */
629 };
630
631 /* TargetAssist Success Reply Descriptor */
632 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
633 u8 ReplyFlags; /* 0x00 */
634 u8 MSIxIndex; /* 0x01 */
635 __le16 SMID; /* 0x02 */
636 u8 SequenceNumber; /* 0x04 */
637 u8 Reserved1; /* 0x05 */
638 __le16 IoIndex; /* 0x06 */
639 };
640
641 /* Target Command Buffer Reply Descriptor */
642 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
643 u8 ReplyFlags; /* 0x00 */
644 u8 MSIxIndex; /* 0x01 */
645 u8 VP_ID; /* 0x02 */
646 u8 Flags; /* 0x03 */
647 __le16 InitiatorDevHandle; /* 0x04 */
648 __le16 IoIndex; /* 0x06 */
649 };
650
651 /* RAID Accelerator Success Reply Descriptor */
652 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
653 u8 ReplyFlags; /* 0x00 */
654 u8 MSIxIndex; /* 0x01 */
655 __le16 SMID; /* 0x02 */
656 __le32 Reserved; /* 0x04 */
657 };
658
659 /* union of Reply Descriptors */
660 union MPI2_REPLY_DESCRIPTORS_UNION {
661 struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
662 struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
663 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
664 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
665 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
666 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
667 RAIDAcceleratorSuccess;
668 __le64 Words;
669 };
670
671 /* IOCInit Request message */
672 struct MPI2_IOC_INIT_REQUEST {
673 u8 WhoInit; /* 0x00 */
674 u8 Reserved1; /* 0x01 */
675 u8 ChainOffset; /* 0x02 */
676 u8 Function; /* 0x03 */
677 __le16 Reserved2; /* 0x04 */
678 u8 Reserved3; /* 0x06 */
679 u8 MsgFlags; /* 0x07 */
680 u8 VP_ID; /* 0x08 */
681 u8 VF_ID; /* 0x09 */
682 __le16 Reserved4; /* 0x0A */
683 __le16 MsgVersion; /* 0x0C */
684 __le16 HeaderVersion; /* 0x0E */
685 u32 Reserved5; /* 0x10 */
686 __le16 Reserved6; /* 0x14 */
687 u8 HostPageSize; /* 0x16 */
688 u8 HostMSIxVectors; /* 0x17 */
689 __le16 Reserved8; /* 0x18 */
690 __le16 SystemRequestFrameSize; /* 0x1A */
691 __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */
692 __le16 ReplyFreeQueueDepth; /* 0x1E */
693 __le32 SenseBufferAddressHigh; /* 0x20 */
694 __le32 SystemReplyAddressHigh; /* 0x24 */
695 __le64 SystemRequestFrameBaseAddress; /* 0x28 */
696 __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */
697 __le64 ReplyFreeQueueAddress; /* 0x38 */
698 __le64 TimeStamp; /* 0x40 */
699 };
700
701 /* mrpriv defines */
702 #define MR_PD_INVALID 0xFFFF
703 #define MR_DEVHANDLE_INVALID 0xFFFF
704 #define MAX_SPAN_DEPTH 8
705 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
706 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
707 #define MAX_ROW_SIZE 32
708 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
709 #define MAX_LOGICAL_DRIVES 64
710 #define MAX_LOGICAL_DRIVES_EXT 256
711 #define MAX_LOGICAL_DRIVES_DYN 512
712 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
713 #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
714 #define MAX_ARRAYS 128
715 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
716 #define MAX_ARRAYS_EXT 256
717 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
718 #define MAX_API_ARRAYS_DYN 512
719 #define MAX_PHYSICAL_DEVICES 256
720 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
721 #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
722 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
723 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
724 #define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
725 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
726 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
727 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
728 #define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
729 #define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600
730
731 struct MR_DEV_HANDLE_INFO {
732 __le16 curDevHdl;
733 u8 validHandles;
734 u8 interfaceType;
735 __le16 devHandle[2];
736 };
737
738 struct MR_ARRAY_INFO {
739 __le16 pd[MAX_RAIDMAP_ROW_SIZE];
740 };
741
742 struct MR_QUAD_ELEMENT {
743 __le64 logStart;
744 __le64 logEnd;
745 __le64 offsetInSpan;
746 __le32 diff;
747 __le32 reserved1;
748 };
749
750 struct MR_SPAN_INFO {
751 __le32 noElements;
752 __le32 reserved1;
753 struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
754 };
755
756 struct MR_LD_SPAN {
757 __le64 startBlk;
758 __le64 numBlks;
759 __le16 arrayRef;
760 u8 spanRowSize;
761 u8 spanRowDataSize;
762 u8 reserved[4];
763 };
764
765 struct MR_SPAN_BLOCK_INFO {
766 __le64 num_rows;
767 struct MR_LD_SPAN span;
768 struct MR_SPAN_INFO block_span_info;
769 };
770
771 #define MR_RAID_CTX_CPUSEL_0 0
772 #define MR_RAID_CTX_CPUSEL_1 1
773 #define MR_RAID_CTX_CPUSEL_2 2
774 #define MR_RAID_CTX_CPUSEL_3 3
775 #define MR_RAID_CTX_CPUSEL_FCFS 0xF
776
777 struct MR_CPU_AFFINITY_MASK {
778 union {
779 struct {
780 #ifndef __BIG_ENDIAN_BITFIELD
781 u8 hw_path:1;
782 u8 cpu0:1;
783 u8 cpu1:1;
784 u8 cpu2:1;
785 u8 cpu3:1;
786 u8 reserved:3;
787 #else
788 u8 reserved:3;
789 u8 cpu3:1;
790 u8 cpu2:1;
791 u8 cpu1:1;
792 u8 cpu0:1;
793 u8 hw_path:1;
794 #endif
795 };
796 u8 core_mask;
797 };
798 };
799
800 struct MR_IO_AFFINITY {
801 union {
802 struct {
803 struct MR_CPU_AFFINITY_MASK pdRead;
804 struct MR_CPU_AFFINITY_MASK pdWrite;
805 struct MR_CPU_AFFINITY_MASK ldRead;
806 struct MR_CPU_AFFINITY_MASK ldWrite;
807 };
808 u32 word;
809 };
810 u8 maxCores; /* Total cores + HW Path in ROC */
811 u8 reserved[3];
812 };
813
814 struct MR_LD_RAID {
815 struct {
816 #if defined(__BIG_ENDIAN_BITFIELD)
817 u32 reserved4:2;
818 u32 fp_cache_bypass_capable:1;
819 u32 fp_rmw_capable:1;
820 u32 disable_coalescing:1;
821 u32 fpBypassRegionLock:1;
822 u32 tmCapable:1;
823 u32 fpNonRWCapable:1;
824 u32 fpReadAcrossStripe:1;
825 u32 fpWriteAcrossStripe:1;
826 u32 fpReadCapable:1;
827 u32 fpWriteCapable:1;
828 u32 encryptionType:8;
829 u32 pdPiMode:4;
830 u32 ldPiMode:4;
831 u32 reserved5:2;
832 u32 ra_capable:1;
833 u32 fpCapable:1;
834 #else
835 u32 fpCapable:1;
836 u32 ra_capable:1;
837 u32 reserved5:2;
838 u32 ldPiMode:4;
839 u32 pdPiMode:4;
840 u32 encryptionType:8;
841 u32 fpWriteCapable:1;
842 u32 fpReadCapable:1;
843 u32 fpWriteAcrossStripe:1;
844 u32 fpReadAcrossStripe:1;
845 u32 fpNonRWCapable:1;
846 u32 tmCapable:1;
847 u32 fpBypassRegionLock:1;
848 u32 disable_coalescing:1;
849 u32 fp_rmw_capable:1;
850 u32 fp_cache_bypass_capable:1;
851 u32 reserved4:2;
852 #endif
853 } capability;
854 __le32 reserved6;
855 __le64 size;
856 u8 spanDepth;
857 u8 level;
858 u8 stripeShift;
859 u8 rowSize;
860 u8 rowDataSize;
861 u8 writeMode;
862 u8 PRL;
863 u8 SRL;
864 __le16 targetId;
865 u8 ldState;
866 u8 regTypeReqOnWrite;
867 u8 modFactor;
868 u8 regTypeReqOnRead;
869 __le16 seqNum;
870
871 struct {
872 #ifndef __BIG_ENDIAN_BITFIELD
873 u32 ldSyncRequired:1;
874 u32 regTypeReqOnReadIsValid:1;
875 u32 isEPD:1;
876 u32 enableSLDOnAllRWIOs:1;
877 u32 reserved:28;
878 #else
879 u32 reserved:28;
880 u32 enableSLDOnAllRWIOs:1;
881 u32 isEPD:1;
882 u32 regTypeReqOnReadIsValid:1;
883 u32 ldSyncRequired:1;
884 #endif
885 } flags;
886
887 u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
888 u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
889 /* Ox2D This LD accept priority boost of this type */
890 u8 ld_accept_priority_type;
891 u8 reserved2[2]; /* 0x2E - 0x2F */
892 /* 0x30 - 0x33, Logical block size for the LD */
893 u32 logical_block_length;
894 struct {
895 #ifndef __BIG_ENDIAN_BITFIELD
896 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
897 u32 ld_pi_exp:4;
898 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
899 * BLOCK EXPONENT from READ CAPACITY 16
900 */
901 u32 ld_logical_block_exp:4;
902 u32 reserved1:24; /* 0x34 */
903 #else
904 u32 reserved1:24; /* 0x34 */
905 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
906 * BLOCK EXPONENT from READ CAPACITY 16
907 */
908 u32 ld_logical_block_exp:4;
909 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
910 u32 ld_pi_exp:4;
911 #endif
912 }; /* 0x34 - 0x37 */
913 /* 0x38 - 0x3f, This will determine which
914 * core will process LD IO and PD IO.
915 */
916 struct MR_IO_AFFINITY cpuAffinity;
917 /* Bit definiations are specified by MR_IO_AFFINITY */
918 u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */
919 };
920
921 struct MR_LD_SPAN_MAP {
922 struct MR_LD_RAID ldRaid;
923 u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
924 struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
925 };
926
927 struct MR_FW_RAID_MAP {
928 __le32 totalSize;
929 union {
930 struct {
931 __le32 maxLd;
932 __le32 maxSpanDepth;
933 __le32 maxRowSize;
934 __le32 maxPdCount;
935 __le32 maxArrays;
936 } validationInfo;
937 __le32 version[5];
938 };
939
940 __le32 ldCount;
941 __le32 Reserved1;
942 u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
943 MAX_RAIDMAP_VIEWS];
944 u8 fpPdIoTimeoutSec;
945 u8 reserved2[7];
946 struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
947 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
948 struct MR_LD_SPAN_MAP ldSpanMap[];
949 };
950
951 struct IO_REQUEST_INFO {
952 u64 ldStartBlock;
953 u32 numBlocks;
954 u16 ldTgtId;
955 u8 isRead;
956 __le16 devHandle;
957 u8 pd_interface;
958 u64 pdBlock;
959 u8 fpOkForIo;
960 u8 IoforUnevenSpan;
961 u8 start_span;
962 u8 do_fp_rlbypass;
963 u64 start_row;
964 u8 span_arm; /* span[7:5], arm[4:0] */
965 u8 pd_after_lb;
966 u16 r1_alt_dev_handle; /* raid 1/10 only */
967 bool ra_capable;
968 u8 data_arms;
969 };
970
971 struct MR_LD_TARGET_SYNC {
972 u8 targetId;
973 u8 reserved;
974 __le16 seqNum;
975 };
976
977 /*
978 * RAID Map descriptor Types.
979 * Each element should uniquely idetify one data structure in the RAID map
980 */
981 enum MR_RAID_MAP_DESC_TYPE {
982 /* MR_DEV_HANDLE_INFO data */
983 RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
984 /* target to Ld num Index map */
985 RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
986 /* MR_ARRAY_INFO data */
987 RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
988 /* MR_LD_SPAN_MAP data */
989 RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
990 RAID_MAP_DESC_TYPE_COUNT,
991 };
992
993 /*
994 * This table defines the offset, size and num elements of each descriptor
995 * type in the RAID Map buffer
996 */
997 struct MR_RAID_MAP_DESC_TABLE {
998 /* Raid map descriptor type */
999 u32 raid_map_desc_type;
1000 /* Offset into the RAID map buffer where
1001 * descriptor data is saved
1002 */
1003 u32 raid_map_desc_offset;
1004 /* total size of the
1005 * descriptor buffer
1006 */
1007 u32 raid_map_desc_buffer_size;
1008 /* Number of elements contained in the
1009 * descriptor buffer
1010 */
1011 u32 raid_map_desc_elements;
1012 };
1013
1014 /*
1015 * Dynamic Raid Map Structure.
1016 */
1017 struct MR_FW_RAID_MAP_DYNAMIC {
1018 u32 raid_map_size; /* total size of RAID Map structure */
1019 u32 desc_table_offset;/* Offset of desc table into RAID map*/
1020 u32 desc_table_size; /* Total Size of desc table */
1021 /* Total Number of elements in the desc table */
1022 u32 desc_table_num_elements;
1023 u64 reserved1;
1024 u32 reserved2[3]; /*future use */
1025 /* timeout value used by driver in FP IOs */
1026 u8 fp_pd_io_timeout_sec;
1027 u8 reserved3[3];
1028 /* when this seqNum increments, driver needs to
1029 * release RMW buffers asap
1030 */
1031 u32 rmw_fp_seq_num;
1032 u16 ld_count; /* count of lds. */
1033 u16 ar_count; /* count of arrays */
1034 u16 span_count; /* count of spans */
1035 u16 reserved4[3];
1036 /*
1037 * The below structure of pointers is only to be used by the driver.
1038 * This is added in the ,API to reduce the amount of code changes
1039 * needed in the driver to support dynamic RAID map Firmware should
1040 * not update these pointers while preparing the raid map
1041 */
1042 union {
1043 struct {
1044 struct MR_DEV_HANDLE_INFO *dev_hndl_info;
1045 u16 *ld_tgt_id_to_ld;
1046 struct MR_ARRAY_INFO *ar_map_info;
1047 struct MR_LD_SPAN_MAP *ld_span_map;
1048 };
1049 u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
1050 };
1051 /*
1052 * RAID Map descriptor table defines the layout of data in the RAID Map.
1053 * The size of the descriptor table itself could change.
1054 */
1055 /* Variable Size descriptor Table. */
1056 struct MR_RAID_MAP_DESC_TABLE
1057 raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
1058 /* Variable Size buffer containing all data */
1059 u32 raid_map_desc_data[];
1060 }; /* Dynamicaly sized RAID MAp structure */
1061
1062 #define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
1063 #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
1064 #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
1065 #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
1066 #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
1067 #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
1068 #define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
1069
1070 #define MPI2_SGE_FLAGS_SHIFT (0x02)
1071 #define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
1072 #define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
1073 #define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
1074
1075 #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
1076 #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
1077 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1078 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1079
1080 #define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
1081 #define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
1082
1083 struct megasas_register_set;
1084 struct megasas_instance;
1085
1086 union desc_word {
1087 u64 word;
1088 struct {
1089 u32 low;
1090 u32 high;
1091 } u;
1092 };
1093
1094 struct megasas_cmd_fusion {
1095 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1096 dma_addr_t io_request_phys_addr;
1097
1098 union MPI2_SGE_IO_UNION *sg_frame;
1099 dma_addr_t sg_frame_phys_addr;
1100
1101 u8 *sense;
1102 dma_addr_t sense_phys_addr;
1103
1104 struct list_head list;
1105 struct scsi_cmnd *scmd;
1106 struct megasas_instance *instance;
1107
1108 u8 retry_for_fw_reset;
1109 union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
1110
1111 /*
1112 * Context for a MFI frame.
1113 * Used to get the mfi cmd from list when a MFI cmd is completed
1114 */
1115 u32 sync_cmd_idx;
1116 u32 index;
1117 u8 pd_r1_lb;
1118 struct completion done;
1119 u8 pd_interface;
1120 u16 r1_alt_dev_handle; /* raid 1/10 only*/
1121 bool cmd_completed; /* raid 1/10 fp writes status holder */
1122
1123 };
1124
1125 struct LD_LOAD_BALANCE_INFO {
1126 u8 loadBalanceFlag;
1127 u8 reserved1;
1128 atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
1129 u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
1130 };
1131
1132 /* SPAN_SET is info caclulated from span info from Raid map per LD */
1133 typedef struct _LD_SPAN_SET {
1134 u64 log_start_lba;
1135 u64 log_end_lba;
1136 u64 span_row_start;
1137 u64 span_row_end;
1138 u64 data_strip_start;
1139 u64 data_strip_end;
1140 u64 data_row_start;
1141 u64 data_row_end;
1142 u8 strip_offset[MAX_SPAN_DEPTH];
1143 u32 span_row_data_width;
1144 u32 diff;
1145 u32 reserved[2];
1146 } LD_SPAN_SET, *PLD_SPAN_SET;
1147
1148 typedef struct LOG_BLOCK_SPAN_INFO {
1149 LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
1150 } LD_SPAN_INFO, *PLD_SPAN_INFO;
1151
1152 struct MR_FW_RAID_MAP_ALL {
1153 struct MR_FW_RAID_MAP raidMap;
1154 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
1155 } __attribute__ ((packed));
1156
1157 struct MR_DRV_RAID_MAP {
1158 /* total size of this structure, including this field.
1159 * This feild will be manupulated by driver for ext raid map,
1160 * else pick the value from firmware raid map.
1161 */
1162 __le32 totalSize;
1163
1164 union {
1165 struct {
1166 __le32 maxLd;
1167 __le32 maxSpanDepth;
1168 __le32 maxRowSize;
1169 __le32 maxPdCount;
1170 __le32 maxArrays;
1171 } validationInfo;
1172 __le32 version[5];
1173 };
1174
1175 /* timeout value used by driver in FP IOs*/
1176 u8 fpPdIoTimeoutSec;
1177 u8 reserved2[7];
1178
1179 __le16 ldCount;
1180 __le16 arCount;
1181 __le16 spanCount;
1182 __le16 reserve3;
1183
1184 struct MR_DEV_HANDLE_INFO
1185 devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
1186 u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
1187 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
1188 struct MR_LD_SPAN_MAP ldSpanMap[];
1189
1190 };
1191
1192 /* Driver raid map size is same as raid map ext
1193 * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
1194 * And it is mainly for code re-use purpose.
1195 */
1196 struct MR_DRV_RAID_MAP_ALL {
1197
1198 struct MR_DRV_RAID_MAP raidMap;
1199 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
1200 } __packed;
1201
1202
1203
1204 struct MR_FW_RAID_MAP_EXT {
1205 /* Not usred in new map */
1206 u32 reserved;
1207
1208 union {
1209 struct {
1210 u32 maxLd;
1211 u32 maxSpanDepth;
1212 u32 maxRowSize;
1213 u32 maxPdCount;
1214 u32 maxArrays;
1215 } validationInfo;
1216 u32 version[5];
1217 };
1218
1219 u8 fpPdIoTimeoutSec;
1220 u8 reserved2[7];
1221
1222 __le16 ldCount;
1223 __le16 arCount;
1224 __le16 spanCount;
1225 __le16 reserve3;
1226
1227 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
1228 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
1229 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
1230 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
1231 };
1232
1233 /*
1234 * * define MR_PD_CFG_SEQ structure for system PDs
1235 * */
1236 struct MR_PD_CFG_SEQ {
1237 u16 seqNum;
1238 u16 devHandle;
1239 struct {
1240 #if defined(__BIG_ENDIAN_BITFIELD)
1241 u8 reserved:7;
1242 u8 tmCapable:1;
1243 #else
1244 u8 tmCapable:1;
1245 u8 reserved:7;
1246 #endif
1247 } capability;
1248 u8 reserved;
1249 u16 pd_target_id;
1250 } __packed;
1251
1252 struct MR_PD_CFG_SEQ_NUM_SYNC {
1253 __le32 size;
1254 __le32 count;
1255 struct MR_PD_CFG_SEQ seq[];
1256 } __packed;
1257
1258 /* stream detection */
1259 struct STREAM_DETECT {
1260 u64 next_seq_lba; /* next LBA to match sequential access */
1261 struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
1262 struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
1263 u32 count_cmds_in_stream; /* count of host commands in this stream */
1264 u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
1265 u8 is_read; /* SCSI OpCode for this stream */
1266 u8 group_depth; /* total number of host commands in group */
1267 /* TRUE if cannot add any more commands to this group */
1268 bool group_flush;
1269 u8 reserved[7]; /* pad to 64-bit alignment */
1270 };
1271
1272 struct LD_STREAM_DETECT {
1273 bool write_back; /* TRUE if WB, FALSE if WT */
1274 bool fp_write_enabled;
1275 bool members_ssds;
1276 bool fp_cache_bypass_capable;
1277 u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
1278 /* this is the array of stream detect structures (one per stream) */
1279 struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
1280 };
1281
1282 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
1283 u64 RDPQBaseAddress;
1284 u32 Reserved1;
1285 u32 Reserved2;
1286 };
1287
1288 struct rdpq_alloc_detail {
1289 struct dma_pool *dma_pool_ptr;
1290 dma_addr_t pool_entry_phys;
1291 union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
1292 };
1293
1294 struct fusion_context {
1295 struct megasas_cmd_fusion **cmd_list;
1296 dma_addr_t req_frames_desc_phys;
1297 u8 *req_frames_desc;
1298
1299 struct dma_pool *io_request_frames_pool;
1300 dma_addr_t io_request_frames_phys;
1301 u8 *io_request_frames;
1302
1303 struct dma_pool *sg_dma_pool;
1304 struct dma_pool *sense_dma_pool;
1305
1306 u8 *sense;
1307 dma_addr_t sense_phys_addr;
1308
1309 atomic_t busy_mq_poll[MAX_MSIX_QUEUES_FUSION];
1310
1311 dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
1312 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
1313 struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
1314 struct dma_pool *reply_frames_desc_pool;
1315 struct dma_pool *reply_frames_desc_pool_align;
1316
1317 u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
1318
1319 u32 reply_q_depth;
1320 u32 request_alloc_sz;
1321 u32 reply_alloc_sz;
1322 u32 io_frames_alloc_sz;
1323
1324 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
1325 dma_addr_t rdpq_phys;
1326 u16 max_sge_in_main_msg;
1327 u16 max_sge_in_chain;
1328
1329 u8 chain_offset_io_request;
1330 u8 chain_offset_mfi_pthru;
1331
1332 struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
1333 dma_addr_t ld_map_phys[2];
1334
1335 /*Non dma-able memory. Driver local copy.*/
1336 struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
1337
1338 u32 max_map_sz;
1339 u32 current_map_sz;
1340 u32 old_map_sz;
1341 u32 new_map_sz;
1342 u32 drv_map_sz;
1343 u32 drv_map_pages;
1344 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
1345 dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
1346 u8 fast_path_io;
1347 struct LD_LOAD_BALANCE_INFO *load_balance_info;
1348 u32 load_balance_info_pages;
1349 LD_SPAN_INFO *log_to_span;
1350 u32 log_to_span_pages;
1351 struct LD_STREAM_DETECT **stream_detect_by_ld;
1352 dma_addr_t ioc_init_request_phys;
1353 struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
1354 struct megasas_cmd *ioc_init_cmd;
1355 bool pcie_bw_limitation;
1356 bool r56_div_offload;
1357 };
1358
1359 union desc_value {
1360 __le64 word;
1361 struct {
1362 __le32 low;
1363 __le32 high;
1364 } u;
1365 };
1366
1367 enum CMD_RET_VALUES {
1368 REFIRE_CMD = 1,
1369 COMPLETE_CMD = 2,
1370 RETURN_CMD = 3,
1371 };
1372
1373 struct MR_SNAPDUMP_PROPERTIES {
1374 u8 offload_num;
1375 u8 max_num_supported;
1376 u8 cur_num_supported;
1377 u8 trigger_min_num_sec_before_ocr;
1378 u8 reserved[12];
1379 };
1380
1381 struct megasas_debugfs_buffer {
1382 void *buf;
1383 u32 len;
1384 };
1385
1386 void megasas_free_cmds_fusion(struct megasas_instance *instance);
1387 int megasas_ioc_init_fusion(struct megasas_instance *instance);
1388 u8 megasas_get_map_info(struct megasas_instance *instance);
1389 int megasas_sync_map_info(struct megasas_instance *instance);
1390 void megasas_release_fusion(struct megasas_instance *instance);
1391 void megasas_reset_reply_desc(struct megasas_instance *instance);
1392 int megasas_check_mpio_paths(struct megasas_instance *instance,
1393 struct scsi_cmnd *scmd);
1394 void megasas_fusion_ocr_wq(struct work_struct *work);
1395
1396 #endif /* _MEGARAID_SAS_FUSION_H_ */
1397