1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2009-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  FILE: megaraid_sas_fp.c
21  *
22  *  Authors: Avago Technologies
23  *           Sumant Patro
24  *           Varad Talamacki
25  *           Manoj Jose
26  *           Kashyap Desai <kashyap.desai@avagotech.com>
27  *           Sumit Saxena <sumit.saxena@avagotech.com>
28  *
29  *  Send feedback to: megaraidlinux.pdl@avagotech.com
30  *
31  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
32  *  San Jose, California 95131
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/pci.h>
38 #include <linux/list.h>
39 #include <linux/moduleparam.h>
40 #include <linux/module.h>
41 #include <linux/spinlock.h>
42 #include <linux/interrupt.h>
43 #include <linux/delay.h>
44 #include <linux/uio.h>
45 #include <linux/uaccess.h>
46 #include <linux/fs.h>
47 #include <linux/compat.h>
48 #include <linux/blkdev.h>
49 #include <linux/poll.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 
56 #include "megaraid_sas_fusion.h"
57 #include "megaraid_sas.h"
58 #include <asm/div64.h>
59 
60 #define LB_PENDING_CMDS_DEFAULT 4
61 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
62 module_param(lb_pending_cmds, int, S_IRUGO);
63 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
64 	"threshold. Valid Values are 1-128. Default: 4");
65 
66 
67 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
68 #define MR_LD_STATE_OPTIMAL 3
69 
70 #ifdef FALSE
71 #undef FALSE
72 #endif
73 #define FALSE 0
74 
75 #ifdef TRUE
76 #undef TRUE
77 #endif
78 #define TRUE 1
79 
80 #define SPAN_DEBUG 0
81 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
82 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
83 #define SPAN_INVALID  0xff
84 
85 /* Prototypes */
86 static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
87 	PLD_SPAN_INFO ldSpanInfo);
88 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
89 	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
90 	struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
91 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
92 	u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
93 
94 u32 mega_mod64(u64 dividend, u32 divisor)
95 {
96 	u64 d;
97 	u32 remainder;
98 
99 	if (!divisor)
100 		printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
101 	d = dividend;
102 	remainder = do_div(d, divisor);
103 	return remainder;
104 }
105 
106 /**
107  * @param dividend    : Dividend
108  * @param divisor    : Divisor
109  *
110  * @return quotient
111  **/
112 u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
113 {
114 	u32 remainder;
115 	u64 d;
116 
117 	if (!divisor)
118 		printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
119 
120 	d = dividend;
121 	remainder = do_div(d, divisor);
122 
123 	return d;
124 }
125 
126 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
127 {
128 	return &map->raidMap.ldSpanMap[ld].ldRaid;
129 }
130 
131 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
132 						   struct MR_DRV_RAID_MAP_ALL
133 						   *map)
134 {
135 	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
136 }
137 
138 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
139 {
140 	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
141 }
142 
143 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
144 {
145 	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
146 }
147 
148 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
149 {
150 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
151 }
152 
153 __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
154 {
155 	return map->raidMap.devHndlInfo[pd].curDevHdl;
156 }
157 
158 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
159 {
160 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
161 }
162 
163 u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
164 {
165 	return map->raidMap.ldTgtIdToLd[ldTgtId];
166 }
167 
168 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
169 					  struct MR_DRV_RAID_MAP_ALL *map)
170 {
171 	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
172 }
173 
174 /*
175  * This function will Populate Driver Map using firmware raid map
176  */
177 void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
178 {
179 	struct fusion_context *fusion = instance->ctrl_context;
180 	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
181 	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
182 	int i;
183 	u16 ld_count;
184 
185 
186 	struct MR_DRV_RAID_MAP_ALL *drv_map =
187 			fusion->ld_drv_map[(instance->map_id & 1)];
188 	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
189 
190 	if (instance->supportmax256vd) {
191 		memcpy(fusion->ld_drv_map[instance->map_id & 1],
192 			fusion->ld_map[instance->map_id & 1],
193 			fusion->current_map_sz);
194 		/* New Raid map will not set totalSize, so keep expected value
195 		 * for legacy code in ValidateMapInfo
196 		 */
197 		pDrvRaidMap->totalSize =
198 			cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
199 	} else {
200 		fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
201 			fusion->ld_map[(instance->map_id & 1)];
202 		pFwRaidMap = &fw_map_old->raidMap;
203 		ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
204 
205 #if VD_EXT_DEBUG
206 		for (i = 0; i < ld_count; i++) {
207 			dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
208 				"Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
209 				instance->unique_id, i,
210 				fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
211 				fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
212 				fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
213 		}
214 #endif
215 
216 		memset(drv_map, 0, fusion->drv_map_sz);
217 		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
218 		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
219 		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
220 		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
221 			pDrvRaidMap->ldTgtIdToLd[i] =
222 				(u8)pFwRaidMap->ldTgtIdToLd[i];
223 		for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
224 			i < MAX_LOGICAL_DRIVES_EXT; i++)
225 			pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
226 		for (i = 0; i < ld_count; i++) {
227 			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
228 #if VD_EXT_DEBUG
229 			dev_dbg(&instance->pdev->dev,
230 				"pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
231 				"pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
232 				"size 0x%x\n", i, i,
233 				pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
234 				pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
235 				(u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
236 			dev_dbg(&instance->pdev->dev,
237 				"pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
238 				"pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
239 				"size 0x%x\n", i, i,
240 				pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
241 				pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
242 				(u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
243 			dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
244 				"raid map %p LD RAID MAP %p/%p\n", drv_map,
245 				pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
246 				&pDrvRaidMap->ldSpanMap[i].ldRaid);
247 #endif
248 		}
249 		memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
250 			sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
251 		memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
252 			sizeof(struct MR_DEV_HANDLE_INFO) *
253 			MAX_RAIDMAP_PHYSICAL_DEVICES);
254 	}
255 }
256 
257 /*
258  * This function will validate Map info data provided by FW
259  */
260 u8 MR_ValidateMapInfo(struct megasas_instance *instance)
261 {
262 	struct fusion_context *fusion;
263 	struct MR_DRV_RAID_MAP_ALL *drv_map;
264 	struct MR_DRV_RAID_MAP *pDrvRaidMap;
265 	struct LD_LOAD_BALANCE_INFO *lbInfo;
266 	PLD_SPAN_INFO ldSpanInfo;
267 	struct MR_LD_RAID         *raid;
268 	u16 ldCount, num_lds;
269 	u16 ld;
270 	u32 expected_size;
271 
272 
273 	MR_PopulateDrvRaidMap(instance);
274 
275 	fusion = instance->ctrl_context;
276 	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
277 	pDrvRaidMap = &drv_map->raidMap;
278 
279 	lbInfo = fusion->load_balance_info;
280 	ldSpanInfo = fusion->log_to_span;
281 
282 	if (instance->supportmax256vd)
283 		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
284 	else
285 		expected_size =
286 			(sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
287 			(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
288 
289 	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
290 		dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
291 		       (unsigned int) expected_size);
292 		dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
293 			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
294 			le32_to_cpu(pDrvRaidMap->totalSize));
295 		return 0;
296 	}
297 
298 	if (instance->UnevenSpanSupport)
299 		mr_update_span_set(drv_map, ldSpanInfo);
300 
301 	mr_update_load_balance_params(drv_map, lbInfo);
302 
303 	num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
304 
305 	/*Convert Raid capability values to CPU arch */
306 	for (ldCount = 0; ldCount < num_lds; ldCount++) {
307 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
308 		raid = MR_LdRaidGet(ld, drv_map);
309 		le32_to_cpus((u32 *)&raid->capability);
310 	}
311 
312 	return 1;
313 }
314 
315 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
316 		    struct MR_DRV_RAID_MAP_ALL *map)
317 {
318 	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
319 	struct MR_QUAD_ELEMENT    *quad;
320 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
321 	u32                span, j;
322 
323 	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
324 
325 		for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
326 			quad = &pSpanBlock->block_span_info.quad[j];
327 
328 			if (le32_to_cpu(quad->diff) == 0)
329 				return SPAN_INVALID;
330 			if (le64_to_cpu(quad->logStart) <= row && row <=
331 				le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
332 				le32_to_cpu(quad->diff))) == 0) {
333 				if (span_blk != NULL) {
334 					u64  blk, debugBlk;
335 					blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
336 					debugBlk = blk;
337 
338 					blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
339 					*span_blk = blk;
340 				}
341 				return span;
342 			}
343 		}
344 	}
345 	return SPAN_INVALID;
346 }
347 
348 /*
349 ******************************************************************************
350 *
351 * Function to print info about span set created in driver from FW raid map
352 *
353 * Inputs :
354 * map    - LD map
355 * ldSpanInfo - ldSpanInfo per HBA instance
356 */
357 #if SPAN_DEBUG
358 static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
359 	PLD_SPAN_INFO ldSpanInfo)
360 {
361 
362 	u8   span;
363 	u32    element;
364 	struct MR_LD_RAID *raid;
365 	LD_SPAN_SET *span_set;
366 	struct MR_QUAD_ELEMENT    *quad;
367 	int ldCount;
368 	u16 ld;
369 
370 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
371 		ld = MR_TargetIdToLdGet(ldCount, map);
372 			if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
373 				continue;
374 		raid = MR_LdRaidGet(ld, map);
375 		dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
376 			ld, raid->spanDepth);
377 		for (span = 0; span < raid->spanDepth; span++)
378 			dev_dbg(&instance->pdev->dev, "Span=%x,"
379 			" number of quads=%x\n", span,
380 			le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
381 			block_span_info.noElements));
382 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
383 			span_set = &(ldSpanInfo[ld].span_set[element]);
384 			if (span_set->span_row_data_width == 0)
385 				break;
386 
387 			dev_dbg(&instance->pdev->dev, "Span Set %x:"
388 				"width=%x, diff=%x\n", element,
389 				(unsigned int)span_set->span_row_data_width,
390 				(unsigned int)span_set->diff);
391 			dev_dbg(&instance->pdev->dev, "logical LBA"
392 				"start=0x%08lx, end=0x%08lx\n",
393 				(long unsigned int)span_set->log_start_lba,
394 				(long unsigned int)span_set->log_end_lba);
395 			dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
396 				" end=0x%08lx\n",
397 				(long unsigned int)span_set->span_row_start,
398 				(long unsigned int)span_set->span_row_end);
399 			dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
400 				" end=0x%08lx\n",
401 				(long unsigned int)span_set->data_row_start,
402 				(long unsigned int)span_set->data_row_end);
403 			dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
404 				" end=0x%08lx\n",
405 				(long unsigned int)span_set->data_strip_start,
406 				(long unsigned int)span_set->data_strip_end);
407 
408 			for (span = 0; span < raid->spanDepth; span++) {
409 				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
410 					block_span_info.noElements) >=
411 					element + 1) {
412 					quad = &map->raidMap.ldSpanMap[ld].
413 						spanBlock[span].block_span_info.
414 						quad[element];
415 				dev_dbg(&instance->pdev->dev, "Span=%x,"
416 					"Quad=%x, diff=%x\n", span,
417 					element, le32_to_cpu(quad->diff));
418 				dev_dbg(&instance->pdev->dev,
419 					"offset_in_span=0x%08lx\n",
420 					(long unsigned int)le64_to_cpu(quad->offsetInSpan));
421 				dev_dbg(&instance->pdev->dev,
422 					"logical start=0x%08lx, end=0x%08lx\n",
423 					(long unsigned int)le64_to_cpu(quad->logStart),
424 					(long unsigned int)le64_to_cpu(quad->logEnd));
425 				}
426 			}
427 		}
428 	}
429 	return 0;
430 }
431 #endif
432 
433 /*
434 ******************************************************************************
435 *
436 * This routine calculates the Span block for given row using spanset.
437 *
438 * Inputs :
439 *    instance - HBA instance
440 *    ld   - Logical drive number
441 *    row        - Row number
442 *    map    - LD map
443 *
444 * Outputs :
445 *
446 *    span          - Span number
447 *    block         - Absolute Block number in the physical disk
448 *    div_error	   - Devide error code.
449 */
450 
451 u32 mr_spanset_get_span_block(struct megasas_instance *instance,
452 		u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
453 {
454 	struct fusion_context *fusion = instance->ctrl_context;
455 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
456 	LD_SPAN_SET *span_set;
457 	struct MR_QUAD_ELEMENT    *quad;
458 	u32    span, info;
459 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
460 
461 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
462 		span_set = &(ldSpanInfo[ld].span_set[info]);
463 
464 		if (span_set->span_row_data_width == 0)
465 			break;
466 
467 		if (row > span_set->data_row_end)
468 			continue;
469 
470 		for (span = 0; span < raid->spanDepth; span++)
471 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
472 				block_span_info.noElements) >= info+1) {
473 				quad = &map->raidMap.ldSpanMap[ld].
474 					spanBlock[span].
475 					block_span_info.quad[info];
476 				if (le32_to_cpu(quad->diff) == 0)
477 					return SPAN_INVALID;
478 				if (le64_to_cpu(quad->logStart) <= row  &&
479 					row <= le64_to_cpu(quad->logEnd)  &&
480 					(mega_mod64(row - le64_to_cpu(quad->logStart),
481 						le32_to_cpu(quad->diff))) == 0) {
482 					if (span_blk != NULL) {
483 						u64  blk;
484 						blk = mega_div64_32
485 						    ((row - le64_to_cpu(quad->logStart)),
486 						    le32_to_cpu(quad->diff));
487 						blk = (blk + le64_to_cpu(quad->offsetInSpan))
488 							 << raid->stripeShift;
489 						*span_blk = blk;
490 					}
491 					return span;
492 				}
493 			}
494 	}
495 	return SPAN_INVALID;
496 }
497 
498 /*
499 ******************************************************************************
500 *
501 * This routine calculates the row for given strip using spanset.
502 *
503 * Inputs :
504 *    instance - HBA instance
505 *    ld   - Logical drive number
506 *    Strip        - Strip
507 *    map    - LD map
508 *
509 * Outputs :
510 *
511 *    row         - row associated with strip
512 */
513 
514 static u64  get_row_from_strip(struct megasas_instance *instance,
515 	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
516 {
517 	struct fusion_context *fusion = instance->ctrl_context;
518 	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
519 	LD_SPAN_SET	*span_set;
520 	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
521 	u32		info, strip_offset, span, span_offset;
522 	u64		span_set_Strip, span_set_Row, retval;
523 
524 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
525 		span_set = &(ldSpanInfo[ld].span_set[info]);
526 
527 		if (span_set->span_row_data_width == 0)
528 			break;
529 		if (strip > span_set->data_strip_end)
530 			continue;
531 
532 		span_set_Strip = strip - span_set->data_strip_start;
533 		strip_offset = mega_mod64(span_set_Strip,
534 				span_set->span_row_data_width);
535 		span_set_Row = mega_div64_32(span_set_Strip,
536 				span_set->span_row_data_width) * span_set->diff;
537 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
538 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
539 				block_span_info.noElements) >= info+1) {
540 				if (strip_offset >=
541 					span_set->strip_offset[span])
542 					span_offset++;
543 				else
544 					break;
545 			}
546 #if SPAN_DEBUG
547 		dev_info(&instance->pdev->dev, "Strip 0x%llx,"
548 			"span_set_Strip 0x%llx, span_set_Row 0x%llx"
549 			"data width 0x%llx span offset 0x%x\n", strip,
550 			(unsigned long long)span_set_Strip,
551 			(unsigned long long)span_set_Row,
552 			(unsigned long long)span_set->span_row_data_width,
553 			span_offset);
554 		dev_info(&instance->pdev->dev, "For strip 0x%llx"
555 			"row is 0x%llx\n", strip,
556 			(unsigned long long) span_set->data_row_start +
557 			(unsigned long long) span_set_Row + (span_offset - 1));
558 #endif
559 		retval = (span_set->data_row_start + span_set_Row +
560 				(span_offset - 1));
561 		return retval;
562 	}
563 	return -1LLU;
564 }
565 
566 
567 /*
568 ******************************************************************************
569 *
570 * This routine calculates the Start Strip for given row using spanset.
571 *
572 * Inputs :
573 *    instance - HBA instance
574 *    ld   - Logical drive number
575 *    row        - Row number
576 *    map    - LD map
577 *
578 * Outputs :
579 *
580 *    Strip         - Start strip associated with row
581 */
582 
583 static u64 get_strip_from_row(struct megasas_instance *instance,
584 		u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
585 {
586 	struct fusion_context *fusion = instance->ctrl_context;
587 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
588 	LD_SPAN_SET *span_set;
589 	struct MR_QUAD_ELEMENT    *quad;
590 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
591 	u32    span, info;
592 	u64  strip;
593 
594 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
595 		span_set = &(ldSpanInfo[ld].span_set[info]);
596 
597 		if (span_set->span_row_data_width == 0)
598 			break;
599 		if (row > span_set->data_row_end)
600 			continue;
601 
602 		for (span = 0; span < raid->spanDepth; span++)
603 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
604 				block_span_info.noElements) >= info+1) {
605 				quad = &map->raidMap.ldSpanMap[ld].
606 					spanBlock[span].block_span_info.quad[info];
607 				if (le64_to_cpu(quad->logStart) <= row  &&
608 					row <= le64_to_cpu(quad->logEnd)  &&
609 					mega_mod64((row - le64_to_cpu(quad->logStart)),
610 					le32_to_cpu(quad->diff)) == 0) {
611 					strip = mega_div64_32
612 						(((row - span_set->data_row_start)
613 							- le64_to_cpu(quad->logStart)),
614 							le32_to_cpu(quad->diff));
615 					strip *= span_set->span_row_data_width;
616 					strip += span_set->data_strip_start;
617 					strip += span_set->strip_offset[span];
618 					return strip;
619 				}
620 			}
621 	}
622 	dev_err(&instance->pdev->dev, "get_strip_from_row"
623 		"returns invalid strip for ld=%x, row=%lx\n",
624 		ld, (long unsigned int)row);
625 	return -1;
626 }
627 
628 /*
629 ******************************************************************************
630 *
631 * This routine calculates the Physical Arm for given strip using spanset.
632 *
633 * Inputs :
634 *    instance - HBA instance
635 *    ld   - Logical drive number
636 *    strip      - Strip
637 *    map    - LD map
638 *
639 * Outputs :
640 *
641 *    Phys Arm         - Phys Arm associated with strip
642 */
643 
644 static u32 get_arm_from_strip(struct megasas_instance *instance,
645 	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
646 {
647 	struct fusion_context *fusion = instance->ctrl_context;
648 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
649 	LD_SPAN_SET *span_set;
650 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
651 	u32    info, strip_offset, span, span_offset, retval;
652 
653 	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
654 		span_set = &(ldSpanInfo[ld].span_set[info]);
655 
656 		if (span_set->span_row_data_width == 0)
657 			break;
658 		if (strip > span_set->data_strip_end)
659 			continue;
660 
661 		strip_offset = (uint)mega_mod64
662 				((strip - span_set->data_strip_start),
663 				span_set->span_row_data_width);
664 
665 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
666 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
667 				block_span_info.noElements) >= info+1) {
668 				if (strip_offset >=
669 					span_set->strip_offset[span])
670 					span_offset =
671 						span_set->strip_offset[span];
672 				else
673 					break;
674 			}
675 #if SPAN_DEBUG
676 		dev_info(&instance->pdev->dev, "get_arm_from_strip:"
677 			"for ld=0x%x strip=0x%lx arm is  0x%x\n", ld,
678 			(long unsigned int)strip, (strip_offset - span_offset));
679 #endif
680 		retval = (strip_offset - span_offset);
681 		return retval;
682 	}
683 
684 	dev_err(&instance->pdev->dev, "get_arm_from_strip"
685 		"returns invalid arm for ld=%x strip=%lx\n",
686 		ld, (long unsigned int)strip);
687 
688 	return -1;
689 }
690 
691 /* This Function will return Phys arm */
692 u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
693 		struct MR_DRV_RAID_MAP_ALL *map)
694 {
695 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
696 	/* Need to check correct default value */
697 	u32    arm = 0;
698 
699 	switch (raid->level) {
700 	case 0:
701 	case 5:
702 	case 6:
703 		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
704 		break;
705 	case 1:
706 		/* start with logical arm */
707 		arm = get_arm_from_strip(instance, ld, stripe, map);
708 		if (arm != -1U)
709 			arm *= 2;
710 		break;
711 	}
712 
713 	return arm;
714 }
715 
716 
717 /*
718 ******************************************************************************
719 *
720 * This routine calculates the arm, span and block for the specified stripe and
721 * reference in stripe using spanset
722 *
723 * Inputs :
724 *
725 *    ld   - Logical drive number
726 *    stripRow        - Stripe number
727 *    stripRef    - Reference in stripe
728 *
729 * Outputs :
730 *
731 *    span          - Span number
732 *    block         - Absolute Block number in the physical disk
733 */
734 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
735 		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
736 		struct RAID_CONTEXT *pRAID_Context,
737 		struct MR_DRV_RAID_MAP_ALL *map)
738 {
739 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
740 	u32     pd, arRef;
741 	u8      physArm, span;
742 	u64     row;
743 	u8	retval = TRUE;
744 	u64	*pdBlock = &io_info->pdBlock;
745 	__le16	*pDevHandle = &io_info->devHandle;
746 	u32	logArm, rowMod, armQ, arm;
747 	struct fusion_context *fusion;
748 
749 	fusion = instance->ctrl_context;
750 
751 	/*Get row and span from io_info for Uneven Span IO.*/
752 	row	    = io_info->start_row;
753 	span	    = io_info->start_span;
754 
755 
756 	if (raid->level == 6) {
757 		logArm = get_arm_from_strip(instance, ld, stripRow, map);
758 		if (logArm == -1U)
759 			return FALSE;
760 		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
761 		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
762 		arm = armQ + 1 + logArm;
763 		if (arm >= SPAN_ROW_SIZE(map, ld, span))
764 			arm -= SPAN_ROW_SIZE(map, ld, span);
765 		physArm = (u8)arm;
766 	} else
767 		/* Calculate the arm */
768 		physArm = get_arm(instance, ld, span, stripRow, map);
769 	if (physArm == 0xFF)
770 		return FALSE;
771 
772 	arRef       = MR_LdSpanArrayGet(ld, span, map);
773 	pd          = MR_ArPdGet(arRef, physArm, map);
774 
775 	if (pd != MR_PD_INVALID)
776 		*pDevHandle = MR_PdDevHandleGet(pd, map);
777 	else {
778 		*pDevHandle = cpu_to_le16(MR_PD_INVALID);
779 		if ((raid->level >= 5) &&
780 			((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
781 			((fusion->adapter_type == INVADER_SERIES) &&
782 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
783 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
784 		else if (raid->level == 1) {
785 			pd = MR_ArPdGet(arRef, physArm + 1, map);
786 			if (pd != MR_PD_INVALID)
787 				*pDevHandle = MR_PdDevHandleGet(pd, map);
788 		}
789 	}
790 
791 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
792 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
793 					physArm;
794 	io_info->span_arm = pRAID_Context->spanArm;
795 	return retval;
796 }
797 
798 /*
799 ******************************************************************************
800 *
801 * This routine calculates the arm, span and block for the specified stripe and
802 * reference in stripe.
803 *
804 * Inputs :
805 *
806 *    ld   - Logical drive number
807 *    stripRow        - Stripe number
808 *    stripRef    - Reference in stripe
809 *
810 * Outputs :
811 *
812 *    span          - Span number
813 *    block         - Absolute Block number in the physical disk
814 */
815 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
816 		u16 stripRef, struct IO_REQUEST_INFO *io_info,
817 		struct RAID_CONTEXT *pRAID_Context,
818 		struct MR_DRV_RAID_MAP_ALL *map)
819 {
820 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
821 	u32         pd, arRef;
822 	u8          physArm, span;
823 	u64         row;
824 	u8	    retval = TRUE;
825 	u64	    *pdBlock = &io_info->pdBlock;
826 	__le16	    *pDevHandle = &io_info->devHandle;
827 	struct fusion_context *fusion;
828 
829 	fusion = instance->ctrl_context;
830 
831 
832 	row =  mega_div64_32(stripRow, raid->rowDataSize);
833 
834 	if (raid->level == 6) {
835 		/* logical arm within row */
836 		u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
837 		u32 rowMod, armQ, arm;
838 
839 		if (raid->rowSize == 0)
840 			return FALSE;
841 		/* get logical row mod */
842 		rowMod = mega_mod64(row, raid->rowSize);
843 		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
844 		arm = armQ+1+logArm; /* data always logically follows Q */
845 		if (arm >= raid->rowSize) /* handle wrap condition */
846 			arm -= raid->rowSize;
847 		physArm = (u8)arm;
848 	} else  {
849 		if (raid->modFactor == 0)
850 			return FALSE;
851 		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
852 							  raid->modFactor),
853 					  map);
854 	}
855 
856 	if (raid->spanDepth == 1) {
857 		span = 0;
858 		*pdBlock = row << raid->stripeShift;
859 	} else {
860 		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
861 		if (span == SPAN_INVALID)
862 			return FALSE;
863 	}
864 
865 	/* Get the array on which this span is present */
866 	arRef       = MR_LdSpanArrayGet(ld, span, map);
867 	pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
868 
869 	if (pd != MR_PD_INVALID)
870 		/* Get dev handle from Pd. */
871 		*pDevHandle = MR_PdDevHandleGet(pd, map);
872 	else {
873 		/* set dev handle as invalid. */
874 		*pDevHandle = cpu_to_le16(MR_PD_INVALID);
875 		if ((raid->level >= 5) &&
876 			((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
877 			((fusion->adapter_type == INVADER_SERIES) &&
878 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
879 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
880 		else if (raid->level == 1) {
881 			/* Get alternate Pd. */
882 			pd = MR_ArPdGet(arRef, physArm + 1, map);
883 			if (pd != MR_PD_INVALID)
884 				/* Get dev handle from Pd */
885 				*pDevHandle = MR_PdDevHandleGet(pd, map);
886 		}
887 	}
888 
889 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
890 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
891 		physArm;
892 	io_info->span_arm = pRAID_Context->spanArm;
893 	return retval;
894 }
895 
896 /*
897 ******************************************************************************
898 *
899 * MR_BuildRaidContext function
900 *
901 * This function will initiate command processing.  The start/end row and strip
902 * information is calculated then the lock is acquired.
903 * This function will return 0 if region lock was acquired OR return num strips
904 */
905 u8
906 MR_BuildRaidContext(struct megasas_instance *instance,
907 		    struct IO_REQUEST_INFO *io_info,
908 		    struct RAID_CONTEXT *pRAID_Context,
909 		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
910 {
911 	struct fusion_context *fusion;
912 	struct MR_LD_RAID  *raid;
913 	u32         ld, stripSize, stripe_mask;
914 	u64         endLba, endStrip, endRow, start_row, start_strip;
915 	u64         regStart;
916 	u32         regSize;
917 	u8          num_strips, numRows;
918 	u16         ref_in_start_stripe, ref_in_end_stripe;
919 	u64         ldStartBlock;
920 	u32         numBlocks, ldTgtId;
921 	u8          isRead;
922 	u8	    retval = 0;
923 	u8	    startlba_span = SPAN_INVALID;
924 	u64 *pdBlock = &io_info->pdBlock;
925 
926 	ldStartBlock = io_info->ldStartBlock;
927 	numBlocks = io_info->numBlocks;
928 	ldTgtId = io_info->ldTgtId;
929 	isRead = io_info->isRead;
930 	io_info->IoforUnevenSpan = 0;
931 	io_info->start_span	= SPAN_INVALID;
932 	fusion = instance->ctrl_context;
933 
934 	ld = MR_TargetIdToLdGet(ldTgtId, map);
935 	raid = MR_LdRaidGet(ld, map);
936 
937 	/*
938 	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
939 	 * return FALSE
940 	 */
941 	if (raid->rowDataSize == 0) {
942 		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
943 			return FALSE;
944 		else if (instance->UnevenSpanSupport) {
945 			io_info->IoforUnevenSpan = 1;
946 		} else {
947 			dev_info(&instance->pdev->dev,
948 				"raid->rowDataSize is 0, but has SPAN[0]"
949 				"rowDataSize = 0x%0x,"
950 				"but there is _NO_ UnevenSpanSupport\n",
951 				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
952 			return FALSE;
953 		}
954 	}
955 
956 	stripSize = 1 << raid->stripeShift;
957 	stripe_mask = stripSize-1;
958 
959 
960 	/*
961 	 * calculate starting row and stripe, and number of strips and rows
962 	 */
963 	start_strip         = ldStartBlock >> raid->stripeShift;
964 	ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
965 	endLba              = ldStartBlock + numBlocks - 1;
966 	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
967 	endStrip            = endLba >> raid->stripeShift;
968 	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
969 
970 	if (io_info->IoforUnevenSpan) {
971 		start_row = get_row_from_strip(instance, ld, start_strip, map);
972 		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
973 		if (start_row == -1ULL || endRow == -1ULL) {
974 			dev_info(&instance->pdev->dev, "return from %s %d."
975 				"Send IO w/o region lock.\n",
976 				__func__, __LINE__);
977 			return FALSE;
978 		}
979 
980 		if (raid->spanDepth == 1) {
981 			startlba_span = 0;
982 			*pdBlock = start_row << raid->stripeShift;
983 		} else
984 			startlba_span = (u8)mr_spanset_get_span_block(instance,
985 						ld, start_row, pdBlock, map);
986 		if (startlba_span == SPAN_INVALID) {
987 			dev_info(&instance->pdev->dev, "return from %s %d"
988 				"for row 0x%llx,start strip %llx"
989 				"endSrip %llx\n", __func__, __LINE__,
990 				(unsigned long long)start_row,
991 				(unsigned long long)start_strip,
992 				(unsigned long long)endStrip);
993 			return FALSE;
994 		}
995 		io_info->start_span	= startlba_span;
996 		io_info->start_row	= start_row;
997 #if SPAN_DEBUG
998 		dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
999 			"for row 0x%llx, start strip 0x%llx end strip 0x%llx"
1000 			" span 0x%x\n", __func__, __LINE__,
1001 			(unsigned long long)start_row,
1002 			(unsigned long long)start_strip,
1003 			(unsigned long long)endStrip, startlba_span);
1004 		dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
1005 			"Start span 0x%x\n", (unsigned long long)start_row,
1006 			(unsigned long long)endRow, startlba_span);
1007 #endif
1008 	} else {
1009 		start_row = mega_div64_32(start_strip, raid->rowDataSize);
1010 		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
1011 	}
1012 	numRows = (u8)(endRow - start_row + 1);
1013 
1014 	/*
1015 	 * calculate region info.
1016 	 */
1017 
1018 	/* assume region is at the start of the first row */
1019 	regStart            = start_row << raid->stripeShift;
1020 	/* assume this IO needs the full row - we'll adjust if not true */
1021 	regSize             = stripSize;
1022 
1023 	io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
1024 
1025 	/* Check if we can send this I/O via FastPath */
1026 	if (raid->capability.fpCapable) {
1027 		if (isRead)
1028 			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
1029 					      ((num_strips == 1) ||
1030 					       raid->capability.
1031 					       fpReadAcrossStripe));
1032 		else
1033 			io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
1034 					      ((num_strips == 1) ||
1035 					       raid->capability.
1036 					       fpWriteAcrossStripe));
1037 	} else
1038 		io_info->fpOkForIo = FALSE;
1039 
1040 	if (numRows == 1) {
1041 		/* single-strip IOs can always lock only the data needed */
1042 		if (num_strips == 1) {
1043 			regStart += ref_in_start_stripe;
1044 			regSize = numBlocks;
1045 		}
1046 		/* multi-strip IOs always need to full stripe locked */
1047 	} else if (io_info->IoforUnevenSpan == 0) {
1048 		/*
1049 		 * For Even span region lock optimization.
1050 		 * If the start strip is the last in the start row
1051 		 */
1052 		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
1053 			regStart += ref_in_start_stripe;
1054 			/* initialize count to sectors from startref to end
1055 			   of strip */
1056 			regSize = stripSize - ref_in_start_stripe;
1057 		}
1058 
1059 		/* add complete rows in the middle of the transfer */
1060 		if (numRows > 2)
1061 			regSize += (numRows-2) << raid->stripeShift;
1062 
1063 		/* if IO ends within first strip of last row*/
1064 		if (endStrip == endRow*raid->rowDataSize)
1065 			regSize += ref_in_end_stripe+1;
1066 		else
1067 			regSize += stripSize;
1068 	} else {
1069 		/*
1070 		 * For Uneven span region lock optimization.
1071 		 * If the start strip is the last in the start row
1072 		 */
1073 		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
1074 				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
1075 			regStart += ref_in_start_stripe;
1076 			/* initialize count to sectors from
1077 			 * startRef to end of strip
1078 			 */
1079 			regSize = stripSize - ref_in_start_stripe;
1080 		}
1081 		/* Add complete rows in the middle of the transfer*/
1082 
1083 		if (numRows > 2)
1084 			/* Add complete rows in the middle of the transfer*/
1085 			regSize += (numRows-2) << raid->stripeShift;
1086 
1087 		/* if IO ends within first strip of last row */
1088 		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
1089 			regSize += ref_in_end_stripe + 1;
1090 		else
1091 			regSize += stripSize;
1092 	}
1093 
1094 	pRAID_Context->timeoutValue =
1095 		cpu_to_le16(raid->fpIoTimeoutForLd ?
1096 			    raid->fpIoTimeoutForLd :
1097 			    map->raidMap.fpPdIoTimeoutSec);
1098 	if (fusion->adapter_type == INVADER_SERIES)
1099 		pRAID_Context->regLockFlags = (isRead) ?
1100 			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1101 	else
1102 		pRAID_Context->regLockFlags = (isRead) ?
1103 			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1104 	pRAID_Context->VirtualDiskTgtId = raid->targetId;
1105 	pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
1106 	pRAID_Context->regLockLength    = cpu_to_le32(regSize);
1107 	pRAID_Context->configSeqNum	= raid->seqNum;
1108 	/* save pointer to raid->LUN array */
1109 	*raidLUN = raid->LUN;
1110 
1111 
1112 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
1113 	  to do the calculation.*/
1114 	if (io_info->fpOkForIo) {
1115 		retval = io_info->IoforUnevenSpan ?
1116 				mr_spanset_get_phy_params(instance, ld,
1117 					start_strip, ref_in_start_stripe,
1118 					io_info, pRAID_Context, map) :
1119 				MR_GetPhyParams(instance, ld, start_strip,
1120 					ref_in_start_stripe, io_info,
1121 					pRAID_Context, map);
1122 		/* If IO on an invalid Pd, then FP is not possible.*/
1123 		if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
1124 			io_info->fpOkForIo = FALSE;
1125 		return retval;
1126 	} else if (isRead) {
1127 		uint stripIdx;
1128 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1129 			retval = io_info->IoforUnevenSpan ?
1130 				mr_spanset_get_phy_params(instance, ld,
1131 				    start_strip + stripIdx,
1132 				    ref_in_start_stripe, io_info,
1133 				    pRAID_Context, map) :
1134 				MR_GetPhyParams(instance, ld,
1135 				    start_strip + stripIdx, ref_in_start_stripe,
1136 				    io_info, pRAID_Context, map);
1137 			if (!retval)
1138 				return TRUE;
1139 		}
1140 	}
1141 
1142 #if SPAN_DEBUG
1143 	/* Just for testing what arm we get for strip.*/
1144 	if (io_info->IoforUnevenSpan)
1145 		get_arm_from_strip(instance, ld, start_strip, map);
1146 #endif
1147 	return TRUE;
1148 }
1149 
1150 /*
1151 ******************************************************************************
1152 *
1153 * This routine pepare spanset info from Valid Raid map and store it into
1154 * local copy of ldSpanInfo per instance data structure.
1155 *
1156 * Inputs :
1157 * map    - LD map
1158 * ldSpanInfo - ldSpanInfo per HBA instance
1159 *
1160 */
1161 void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1162 	PLD_SPAN_INFO ldSpanInfo)
1163 {
1164 	u8   span, count;
1165 	u32  element, span_row_width;
1166 	u64  span_row;
1167 	struct MR_LD_RAID *raid;
1168 	LD_SPAN_SET *span_set, *span_set_prev;
1169 	struct MR_QUAD_ELEMENT    *quad;
1170 	int ldCount;
1171 	u16 ld;
1172 
1173 
1174 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1175 		ld = MR_TargetIdToLdGet(ldCount, map);
1176 		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
1177 			continue;
1178 		raid = MR_LdRaidGet(ld, map);
1179 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1180 			for (span = 0; span < raid->spanDepth; span++) {
1181 				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1182 					block_span_info.noElements) <
1183 					element + 1)
1184 					continue;
1185 				span_set = &(ldSpanInfo[ld].span_set[element]);
1186 				quad = &map->raidMap.ldSpanMap[ld].
1187 					spanBlock[span].block_span_info.
1188 					quad[element];
1189 
1190 				span_set->diff = le32_to_cpu(quad->diff);
1191 
1192 				for (count = 0, span_row_width = 0;
1193 					count < raid->spanDepth; count++) {
1194 					if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1195 						spanBlock[count].
1196 						block_span_info.
1197 						noElements) >= element + 1) {
1198 						span_set->strip_offset[count] =
1199 							span_row_width;
1200 						span_row_width +=
1201 							MR_LdSpanPtrGet
1202 							(ld, count, map)->spanRowDataSize;
1203 					}
1204 				}
1205 
1206 				span_set->span_row_data_width = span_row_width;
1207 				span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1208 					le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1209 					le32_to_cpu(quad->diff));
1210 
1211 				if (element == 0) {
1212 					span_set->log_start_lba = 0;
1213 					span_set->log_end_lba =
1214 						((span_row << raid->stripeShift)
1215 						* span_row_width) - 1;
1216 
1217 					span_set->span_row_start = 0;
1218 					span_set->span_row_end = span_row - 1;
1219 
1220 					span_set->data_strip_start = 0;
1221 					span_set->data_strip_end =
1222 						(span_row * span_row_width) - 1;
1223 
1224 					span_set->data_row_start = 0;
1225 					span_set->data_row_end =
1226 						(span_row * le32_to_cpu(quad->diff)) - 1;
1227 				} else {
1228 					span_set_prev = &(ldSpanInfo[ld].
1229 							span_set[element - 1]);
1230 					span_set->log_start_lba =
1231 						span_set_prev->log_end_lba + 1;
1232 					span_set->log_end_lba =
1233 						span_set->log_start_lba +
1234 						((span_row << raid->stripeShift)
1235 						* span_row_width) - 1;
1236 
1237 					span_set->span_row_start =
1238 						span_set_prev->span_row_end + 1;
1239 					span_set->span_row_end =
1240 					span_set->span_row_start + span_row - 1;
1241 
1242 					span_set->data_strip_start =
1243 					span_set_prev->data_strip_end + 1;
1244 					span_set->data_strip_end =
1245 						span_set->data_strip_start +
1246 						(span_row * span_row_width) - 1;
1247 
1248 					span_set->data_row_start =
1249 						span_set_prev->data_row_end + 1;
1250 					span_set->data_row_end =
1251 						span_set->data_row_start +
1252 						(span_row * le32_to_cpu(quad->diff)) - 1;
1253 				}
1254 				break;
1255 		}
1256 		if (span == raid->spanDepth)
1257 			break;
1258 	    }
1259 	}
1260 #if SPAN_DEBUG
1261 	getSpanInfo(map, ldSpanInfo);
1262 #endif
1263 
1264 }
1265 
1266 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1267 	struct LD_LOAD_BALANCE_INFO *lbInfo)
1268 {
1269 	int ldCount;
1270 	u16 ld;
1271 	struct MR_LD_RAID *raid;
1272 
1273 	if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
1274 		lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
1275 
1276 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1277 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
1278 		if (ld >= MAX_LOGICAL_DRIVES_EXT) {
1279 			lbInfo[ldCount].loadBalanceFlag = 0;
1280 			continue;
1281 		}
1282 
1283 		raid = MR_LdRaidGet(ld, drv_map);
1284 		if ((raid->level != 1) ||
1285 			(raid->ldState != MR_LD_STATE_OPTIMAL)) {
1286 			lbInfo[ldCount].loadBalanceFlag = 0;
1287 			continue;
1288 		}
1289 		lbInfo[ldCount].loadBalanceFlag = 1;
1290 	}
1291 }
1292 
1293 u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1294 	struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1295 {
1296 	struct fusion_context *fusion;
1297 	struct MR_LD_RAID  *raid;
1298 	struct MR_DRV_RAID_MAP_ALL *drv_map;
1299 	u16     pend0, pend1, ld;
1300 	u64     diff0, diff1;
1301 	u8      bestArm, pd0, pd1, span, arm;
1302 	u32     arRef, span_row_size;
1303 
1304 	u64 block = io_info->ldStartBlock;
1305 	u32 count = io_info->numBlocks;
1306 
1307 	span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1308 			>> RAID_CTX_SPANARM_SPAN_SHIFT);
1309 	arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1310 
1311 
1312 	fusion = instance->ctrl_context;
1313 	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1314 	ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1315 	raid = MR_LdRaidGet(ld, drv_map);
1316 	span_row_size = instance->UnevenSpanSupport ?
1317 			SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1318 
1319 	arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1320 	pd0 = MR_ArPdGet(arRef, arm, drv_map);
1321 	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1322 		(arm + 1 - span_row_size) : arm + 1, drv_map);
1323 
1324 	/* get the pending cmds for the data and mirror arms */
1325 	pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1326 	pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1327 
1328 	/* Determine the disk whose head is nearer to the req. block */
1329 	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1330 	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1331 	bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1332 
1333 	if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
1334 			(bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1335 		bestArm ^= 1;
1336 
1337 	/* Update the last accessed block on the correct pd */
1338 	io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1339 	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1340 	io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1341 #if SPAN_DEBUG
1342 	if (arm != bestArm)
1343 		dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
1344 			"occur - span 0x%x arm 0x%x bestArm 0x%x "
1345 			"io_info->span_arm 0x%x\n",
1346 			span, arm, bestArm, io_info->span_arm);
1347 #endif
1348 	return io_info->pd_after_lb;
1349 }
1350 
1351 __le16 get_updated_dev_handle(struct megasas_instance *instance,
1352 	struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1353 {
1354 	u8 arm_pd;
1355 	__le16 devHandle;
1356 	struct fusion_context *fusion;
1357 	struct MR_DRV_RAID_MAP_ALL *drv_map;
1358 
1359 	fusion = instance->ctrl_context;
1360 	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1361 
1362 	/* get best new arm (PD ID) */
1363 	arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info);
1364 	devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1365 	atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1366 	return devHandle;
1367 }
1368