1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2009-2012  LSI Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version 2
9  *  of the License, or (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *  FILE: megaraid_sas_fp.c
21  *
22  *  Authors: LSI Corporation
23  *           Sumant Patro
24  *           Varad Talamacki
25  *           Manoj Jose
26  *
27  *  Send feedback to: <megaraidlinux@lsi.com>
28  *
29  *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
30  *     ATTN: Linuxraid
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/pci.h>
36 #include <linux/list.h>
37 #include <linux/moduleparam.h>
38 #include <linux/module.h>
39 #include <linux/spinlock.h>
40 #include <linux/interrupt.h>
41 #include <linux/delay.h>
42 #include <linux/uio.h>
43 #include <linux/uaccess.h>
44 #include <linux/fs.h>
45 #include <linux/compat.h>
46 #include <linux/blkdev.h>
47 #include <linux/poll.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 
54 #include "megaraid_sas_fusion.h"
55 #include "megaraid_sas.h"
56 #include <asm/div64.h>
57 
58 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
59 #define MR_LD_STATE_OPTIMAL 3
60 #define FALSE 0
61 #define TRUE 1
62 
63 #define SPAN_DEBUG 0
64 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
65 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
66 #define SPAN_INVALID  0xff
67 
68 /* Prototypes */
69 void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
70 	struct LD_LOAD_BALANCE_INFO *lbInfo);
71 
72 static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
73 	PLD_SPAN_INFO ldSpanInfo);
74 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
75 	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
76 	struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
77 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
78 	u64 strip, struct MR_FW_RAID_MAP_ALL *map);
79 
80 u32 mega_mod64(u64 dividend, u32 divisor)
81 {
82 	u64 d;
83 	u32 remainder;
84 
85 	if (!divisor)
86 		printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
87 	d = dividend;
88 	remainder = do_div(d, divisor);
89 	return remainder;
90 }
91 
92 /**
93  * @param dividend    : Dividend
94  * @param divisor    : Divisor
95  *
96  * @return quotient
97  **/
98 u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
99 {
100 	u32 remainder;
101 	u64 d;
102 
103 	if (!divisor)
104 		printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
105 
106 	d = dividend;
107 	remainder = do_div(d, divisor);
108 
109 	return d;
110 }
111 
112 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
113 {
114 	return &map->raidMap.ldSpanMap[ld].ldRaid;
115 }
116 
117 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
118 						   struct MR_FW_RAID_MAP_ALL
119 						   *map)
120 {
121 	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
122 }
123 
124 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
125 {
126 	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
127 }
128 
129 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
130 {
131 	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
132 }
133 
134 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
135 {
136 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
137 }
138 
139 u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
140 {
141 	return map->raidMap.devHndlInfo[pd].curDevHdl;
142 }
143 
144 u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
145 {
146 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
147 }
148 
149 u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
150 {
151 	return map->raidMap.ldTgtIdToLd[ldTgtId];
152 }
153 
154 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
155 					  struct MR_FW_RAID_MAP_ALL *map)
156 {
157 	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
158 }
159 
160 /*
161  * This function will validate Map info data provided by FW
162  */
163 u8 MR_ValidateMapInfo(struct megasas_instance *instance)
164 {
165 	struct fusion_context *fusion = instance->ctrl_context;
166 	struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
167 	struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
168 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
169 	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
170 	struct MR_LD_RAID         *raid;
171 	int ldCount, num_lds;
172 	u16 ld;
173 
174 
175 	if (le32_to_cpu(pFwRaidMap->totalSize) !=
176 	    (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
177 	     (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
178 		printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
179 		       (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
180 				       sizeof(struct MR_LD_SPAN_MAP)) +
181 				      (sizeof(struct MR_LD_SPAN_MAP) *
182 					le32_to_cpu(pFwRaidMap->ldCount))));
183 		printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
184 		       ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
185 			le32_to_cpu(pFwRaidMap->totalSize));
186 		return 0;
187 	}
188 
189 	if (instance->UnevenSpanSupport)
190 		mr_update_span_set(map, ldSpanInfo);
191 
192 	mr_update_load_balance_params(map, lbInfo);
193 
194 	num_lds = le32_to_cpu(map->raidMap.ldCount);
195 
196 	/*Convert Raid capability values to CPU arch */
197 	for (ldCount = 0; ldCount < num_lds; ldCount++) {
198 		ld = MR_TargetIdToLdGet(ldCount, map);
199 		raid = MR_LdRaidGet(ld, map);
200 		le32_to_cpus((u32 *)&raid->capability);
201 	}
202 
203 	return 1;
204 }
205 
206 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
207 		    struct MR_FW_RAID_MAP_ALL *map)
208 {
209 	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
210 	struct MR_QUAD_ELEMENT    *quad;
211 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
212 	u32                span, j;
213 
214 	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
215 
216 		for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
217 			quad = &pSpanBlock->block_span_info.quad[j];
218 
219 			if (le32_to_cpu(quad->diff) == 0)
220 				return SPAN_INVALID;
221 			if (le64_to_cpu(quad->logStart) <= row && row <=
222 				le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
223 				le32_to_cpu(quad->diff))) == 0) {
224 				if (span_blk != NULL) {
225 					u64  blk, debugBlk;
226 					blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
227 					debugBlk = blk;
228 
229 					blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
230 					*span_blk = blk;
231 				}
232 				return span;
233 			}
234 		}
235 	}
236 	return SPAN_INVALID;
237 }
238 
239 /*
240 ******************************************************************************
241 *
242 * Function to print info about span set created in driver from FW raid map
243 *
244 * Inputs :
245 * map    - LD map
246 * ldSpanInfo - ldSpanInfo per HBA instance
247 */
248 #if SPAN_DEBUG
249 static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
250 {
251 
252 	u8   span;
253 	u32    element;
254 	struct MR_LD_RAID *raid;
255 	LD_SPAN_SET *span_set;
256 	struct MR_QUAD_ELEMENT    *quad;
257 	int ldCount;
258 	u16 ld;
259 
260 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
261 		ld = MR_TargetIdToLdGet(ldCount, map);
262 			if (ld >= MAX_LOGICAL_DRIVES)
263 				continue;
264 		raid = MR_LdRaidGet(ld, map);
265 		dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
266 			ld, raid->spanDepth);
267 		for (span = 0; span < raid->spanDepth; span++)
268 			dev_dbg(&instance->pdev->dev, "Span=%x,"
269 			" number of quads=%x\n", span,
270 			le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
271 			block_span_info.noElements));
272 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
273 			span_set = &(ldSpanInfo[ld].span_set[element]);
274 			if (span_set->span_row_data_width == 0)
275 				break;
276 
277 			dev_dbg(&instance->pdev->dev, "Span Set %x:"
278 				"width=%x, diff=%x\n", element,
279 				(unsigned int)span_set->span_row_data_width,
280 				(unsigned int)span_set->diff);
281 			dev_dbg(&instance->pdev->dev, "logical LBA"
282 				"start=0x%08lx, end=0x%08lx\n",
283 				(long unsigned int)span_set->log_start_lba,
284 				(long unsigned int)span_set->log_end_lba);
285 			dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
286 				" end=0x%08lx\n",
287 				(long unsigned int)span_set->span_row_start,
288 				(long unsigned int)span_set->span_row_end);
289 			dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
290 				" end=0x%08lx\n",
291 				(long unsigned int)span_set->data_row_start,
292 				(long unsigned int)span_set->data_row_end);
293 			dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
294 				" end=0x%08lx\n",
295 				(long unsigned int)span_set->data_strip_start,
296 				(long unsigned int)span_set->data_strip_end);
297 
298 			for (span = 0; span < raid->spanDepth; span++) {
299 				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
300 					block_span_info.noElements) >=
301 					element + 1) {
302 					quad = &map->raidMap.ldSpanMap[ld].
303 						spanBlock[span].block_span_info.
304 						quad[element];
305 				dev_dbg(&instance->pdev->dev, "Span=%x,"
306 					"Quad=%x, diff=%x\n", span,
307 					element, le32_to_cpu(quad->diff));
308 				dev_dbg(&instance->pdev->dev,
309 					"offset_in_span=0x%08lx\n",
310 					(long unsigned int)le64_to_cpu(quad->offsetInSpan));
311 				dev_dbg(&instance->pdev->dev,
312 					"logical start=0x%08lx, end=0x%08lx\n",
313 					(long unsigned int)le64_to_cpu(quad->logStart),
314 					(long unsigned int)le64_to_cpu(quad->logEnd));
315 				}
316 			}
317 		}
318 	}
319 	return 0;
320 }
321 #endif
322 
323 /*
324 ******************************************************************************
325 *
326 * This routine calculates the Span block for given row using spanset.
327 *
328 * Inputs :
329 *    instance - HBA instance
330 *    ld   - Logical drive number
331 *    row        - Row number
332 *    map    - LD map
333 *
334 * Outputs :
335 *
336 *    span          - Span number
337 *    block         - Absolute Block number in the physical disk
338 *    div_error	   - Devide error code.
339 */
340 
341 u32 mr_spanset_get_span_block(struct megasas_instance *instance,
342 		u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
343 {
344 	struct fusion_context *fusion = instance->ctrl_context;
345 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
346 	LD_SPAN_SET *span_set;
347 	struct MR_QUAD_ELEMENT    *quad;
348 	u32    span, info;
349 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
350 
351 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
352 		span_set = &(ldSpanInfo[ld].span_set[info]);
353 
354 		if (span_set->span_row_data_width == 0)
355 			break;
356 
357 		if (row > span_set->data_row_end)
358 			continue;
359 
360 		for (span = 0; span < raid->spanDepth; span++)
361 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
362 				block_span_info.noElements) >= info+1) {
363 				quad = &map->raidMap.ldSpanMap[ld].
364 					spanBlock[span].
365 					block_span_info.quad[info];
366 				if (le32_to_cpu(quad->diff == 0))
367 					return SPAN_INVALID;
368 				if (le64_to_cpu(quad->logStart) <= row  &&
369 					row <= le64_to_cpu(quad->logEnd)  &&
370 					(mega_mod64(row - le64_to_cpu(quad->logStart),
371 						le32_to_cpu(quad->diff))) == 0) {
372 					if (span_blk != NULL) {
373 						u64  blk;
374 						blk = mega_div64_32
375 						    ((row - le64_to_cpu(quad->logStart)),
376 						    le32_to_cpu(quad->diff));
377 						blk = (blk + le64_to_cpu(quad->offsetInSpan))
378 							 << raid->stripeShift;
379 						*span_blk = blk;
380 					}
381 					return span;
382 				}
383 			}
384 	}
385 	return SPAN_INVALID;
386 }
387 
388 /*
389 ******************************************************************************
390 *
391 * This routine calculates the row for given strip using spanset.
392 *
393 * Inputs :
394 *    instance - HBA instance
395 *    ld   - Logical drive number
396 *    Strip        - Strip
397 *    map    - LD map
398 *
399 * Outputs :
400 *
401 *    row         - row associated with strip
402 */
403 
404 static u64  get_row_from_strip(struct megasas_instance *instance,
405 	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
406 {
407 	struct fusion_context *fusion = instance->ctrl_context;
408 	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
409 	LD_SPAN_SET	*span_set;
410 	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
411 	u32		info, strip_offset, span, span_offset;
412 	u64		span_set_Strip, span_set_Row, retval;
413 
414 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
415 		span_set = &(ldSpanInfo[ld].span_set[info]);
416 
417 		if (span_set->span_row_data_width == 0)
418 			break;
419 		if (strip > span_set->data_strip_end)
420 			continue;
421 
422 		span_set_Strip = strip - span_set->data_strip_start;
423 		strip_offset = mega_mod64(span_set_Strip,
424 				span_set->span_row_data_width);
425 		span_set_Row = mega_div64_32(span_set_Strip,
426 				span_set->span_row_data_width) * span_set->diff;
427 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
428 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
429 				block_span_info.noElements >= info+1)) {
430 				if (strip_offset >=
431 					span_set->strip_offset[span])
432 					span_offset++;
433 				else
434 					break;
435 			}
436 #if SPAN_DEBUG
437 		dev_info(&instance->pdev->dev, "Strip 0x%llx,"
438 			"span_set_Strip 0x%llx, span_set_Row 0x%llx"
439 			"data width 0x%llx span offset 0x%x\n", strip,
440 			(unsigned long long)span_set_Strip,
441 			(unsigned long long)span_set_Row,
442 			(unsigned long long)span_set->span_row_data_width,
443 			span_offset);
444 		dev_info(&instance->pdev->dev, "For strip 0x%llx"
445 			"row is 0x%llx\n", strip,
446 			(unsigned long long) span_set->data_row_start +
447 			(unsigned long long) span_set_Row + (span_offset - 1));
448 #endif
449 		retval = (span_set->data_row_start + span_set_Row +
450 				(span_offset - 1));
451 		return retval;
452 	}
453 	return -1LLU;
454 }
455 
456 
457 /*
458 ******************************************************************************
459 *
460 * This routine calculates the Start Strip for given row using spanset.
461 *
462 * Inputs :
463 *    instance - HBA instance
464 *    ld   - Logical drive number
465 *    row        - Row number
466 *    map    - LD map
467 *
468 * Outputs :
469 *
470 *    Strip         - Start strip associated with row
471 */
472 
473 static u64 get_strip_from_row(struct megasas_instance *instance,
474 		u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
475 {
476 	struct fusion_context *fusion = instance->ctrl_context;
477 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
478 	LD_SPAN_SET *span_set;
479 	struct MR_QUAD_ELEMENT    *quad;
480 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
481 	u32    span, info;
482 	u64  strip;
483 
484 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
485 		span_set = &(ldSpanInfo[ld].span_set[info]);
486 
487 		if (span_set->span_row_data_width == 0)
488 			break;
489 		if (row > span_set->data_row_end)
490 			continue;
491 
492 		for (span = 0; span < raid->spanDepth; span++)
493 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
494 				block_span_info.noElements) >= info+1) {
495 				quad = &map->raidMap.ldSpanMap[ld].
496 					spanBlock[span].block_span_info.quad[info];
497 				if (le64_to_cpu(quad->logStart) <= row  &&
498 					row <= le64_to_cpu(quad->logEnd)  &&
499 					mega_mod64((row - le64_to_cpu(quad->logStart)),
500 					le32_to_cpu(quad->diff)) == 0) {
501 					strip = mega_div64_32
502 						(((row - span_set->data_row_start)
503 							- le64_to_cpu(quad->logStart)),
504 							le32_to_cpu(quad->diff));
505 					strip *= span_set->span_row_data_width;
506 					strip += span_set->data_strip_start;
507 					strip += span_set->strip_offset[span];
508 					return strip;
509 				}
510 			}
511 	}
512 	dev_err(&instance->pdev->dev, "get_strip_from_row"
513 		"returns invalid strip for ld=%x, row=%lx\n",
514 		ld, (long unsigned int)row);
515 	return -1;
516 }
517 
518 /*
519 ******************************************************************************
520 *
521 * This routine calculates the Physical Arm for given strip using spanset.
522 *
523 * Inputs :
524 *    instance - HBA instance
525 *    ld   - Logical drive number
526 *    strip      - Strip
527 *    map    - LD map
528 *
529 * Outputs :
530 *
531 *    Phys Arm         - Phys Arm associated with strip
532 */
533 
534 static u32 get_arm_from_strip(struct megasas_instance *instance,
535 	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
536 {
537 	struct fusion_context *fusion = instance->ctrl_context;
538 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
539 	LD_SPAN_SET *span_set;
540 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
541 	u32    info, strip_offset, span, span_offset, retval;
542 
543 	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
544 		span_set = &(ldSpanInfo[ld].span_set[info]);
545 
546 		if (span_set->span_row_data_width == 0)
547 			break;
548 		if (strip > span_set->data_strip_end)
549 			continue;
550 
551 		strip_offset = (uint)mega_mod64
552 				((strip - span_set->data_strip_start),
553 				span_set->span_row_data_width);
554 
555 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
556 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
557 				block_span_info.noElements) >= info+1) {
558 				if (strip_offset >=
559 					span_set->strip_offset[span])
560 					span_offset =
561 						span_set->strip_offset[span];
562 				else
563 					break;
564 			}
565 #if SPAN_DEBUG
566 		dev_info(&instance->pdev->dev, "get_arm_from_strip:"
567 			"for ld=0x%x strip=0x%lx arm is  0x%x\n", ld,
568 			(long unsigned int)strip, (strip_offset - span_offset));
569 #endif
570 		retval = (strip_offset - span_offset);
571 		return retval;
572 	}
573 
574 	dev_err(&instance->pdev->dev, "get_arm_from_strip"
575 		"returns invalid arm for ld=%x strip=%lx\n",
576 		ld, (long unsigned int)strip);
577 
578 	return -1;
579 }
580 
581 /* This Function will return Phys arm */
582 u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
583 		struct MR_FW_RAID_MAP_ALL *map)
584 {
585 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
586 	/* Need to check correct default value */
587 	u32    arm = 0;
588 
589 	switch (raid->level) {
590 	case 0:
591 	case 5:
592 	case 6:
593 		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
594 		break;
595 	case 1:
596 		/* start with logical arm */
597 		arm = get_arm_from_strip(instance, ld, stripe, map);
598 		if (arm != -1U)
599 			arm *= 2;
600 		break;
601 	}
602 
603 	return arm;
604 }
605 
606 
607 /*
608 ******************************************************************************
609 *
610 * This routine calculates the arm, span and block for the specified stripe and
611 * reference in stripe using spanset
612 *
613 * Inputs :
614 *
615 *    ld   - Logical drive number
616 *    stripRow        - Stripe number
617 *    stripRef    - Reference in stripe
618 *
619 * Outputs :
620 *
621 *    span          - Span number
622 *    block         - Absolute Block number in the physical disk
623 */
624 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
625 		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
626 		struct RAID_CONTEXT *pRAID_Context,
627 		struct MR_FW_RAID_MAP_ALL *map)
628 {
629 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
630 	u32     pd, arRef;
631 	u8      physArm, span;
632 	u64     row;
633 	u8	retval = TRUE;
634 	u8	do_invader = 0;
635 	u64	*pdBlock = &io_info->pdBlock;
636 	u16	*pDevHandle = &io_info->devHandle;
637 	u32	logArm, rowMod, armQ, arm;
638 
639 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
640 		instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
641 		do_invader = 1;
642 
643 	/*Get row and span from io_info for Uneven Span IO.*/
644 	row	    = io_info->start_row;
645 	span	    = io_info->start_span;
646 
647 
648 	if (raid->level == 6) {
649 		logArm = get_arm_from_strip(instance, ld, stripRow, map);
650 		if (logArm == -1U)
651 			return FALSE;
652 		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
653 		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
654 		arm = armQ + 1 + logArm;
655 		if (arm >= SPAN_ROW_SIZE(map, ld, span))
656 			arm -= SPAN_ROW_SIZE(map, ld, span);
657 		physArm = (u8)arm;
658 	} else
659 		/* Calculate the arm */
660 		physArm = get_arm(instance, ld, span, stripRow, map);
661 	if (physArm == 0xFF)
662 		return FALSE;
663 
664 	arRef       = MR_LdSpanArrayGet(ld, span, map);
665 	pd          = MR_ArPdGet(arRef, physArm, map);
666 
667 	if (pd != MR_PD_INVALID)
668 		*pDevHandle = MR_PdDevHandleGet(pd, map);
669 	else {
670 		*pDevHandle = MR_PD_INVALID;
671 		if ((raid->level >= 5) &&
672 			(!do_invader  || (do_invader &&
673 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
674 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
675 		else if (raid->level == 1) {
676 			pd = MR_ArPdGet(arRef, physArm + 1, map);
677 			if (pd != MR_PD_INVALID)
678 				*pDevHandle = MR_PdDevHandleGet(pd, map);
679 		}
680 	}
681 
682 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
683 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
684 					physArm;
685 	return retval;
686 }
687 
688 /*
689 ******************************************************************************
690 *
691 * This routine calculates the arm, span and block for the specified stripe and
692 * reference in stripe.
693 *
694 * Inputs :
695 *
696 *    ld   - Logical drive number
697 *    stripRow        - Stripe number
698 *    stripRef    - Reference in stripe
699 *
700 * Outputs :
701 *
702 *    span          - Span number
703 *    block         - Absolute Block number in the physical disk
704 */
705 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
706 		u16 stripRef, struct IO_REQUEST_INFO *io_info,
707 		struct RAID_CONTEXT *pRAID_Context,
708 		struct MR_FW_RAID_MAP_ALL *map)
709 {
710 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
711 	u32         pd, arRef;
712 	u8          physArm, span;
713 	u64         row;
714 	u8	    retval = TRUE;
715 	u8          do_invader = 0;
716 	u64	    *pdBlock = &io_info->pdBlock;
717 	u16	    *pDevHandle = &io_info->devHandle;
718 
719 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
720 		instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
721 		do_invader = 1;
722 
723 	row =  mega_div64_32(stripRow, raid->rowDataSize);
724 
725 	if (raid->level == 6) {
726 		/* logical arm within row */
727 		u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
728 		u32 rowMod, armQ, arm;
729 
730 		if (raid->rowSize == 0)
731 			return FALSE;
732 		/* get logical row mod */
733 		rowMod = mega_mod64(row, raid->rowSize);
734 		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
735 		arm = armQ+1+logArm; /* data always logically follows Q */
736 		if (arm >= raid->rowSize) /* handle wrap condition */
737 			arm -= raid->rowSize;
738 		physArm = (u8)arm;
739 	} else  {
740 		if (raid->modFactor == 0)
741 			return FALSE;
742 		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
743 							  raid->modFactor),
744 					  map);
745 	}
746 
747 	if (raid->spanDepth == 1) {
748 		span = 0;
749 		*pdBlock = row << raid->stripeShift;
750 	} else {
751 		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
752 		if (span == SPAN_INVALID)
753 			return FALSE;
754 	}
755 
756 	/* Get the array on which this span is present */
757 	arRef       = MR_LdSpanArrayGet(ld, span, map);
758 	pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
759 
760 	if (pd != MR_PD_INVALID)
761 		/* Get dev handle from Pd. */
762 		*pDevHandle = MR_PdDevHandleGet(pd, map);
763 	else {
764 		*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
765 		if ((raid->level >= 5) &&
766 			(!do_invader  || (do_invader &&
767 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
768 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
769 		else if (raid->level == 1) {
770 			/* Get alternate Pd. */
771 			pd = MR_ArPdGet(arRef, physArm + 1, map);
772 			if (pd != MR_PD_INVALID)
773 				/* Get dev handle from Pd */
774 				*pDevHandle = MR_PdDevHandleGet(pd, map);
775 		}
776 	}
777 
778 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
779 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
780 		physArm;
781 	return retval;
782 }
783 
784 /*
785 ******************************************************************************
786 *
787 * MR_BuildRaidContext function
788 *
789 * This function will initiate command processing.  The start/end row and strip
790 * information is calculated then the lock is acquired.
791 * This function will return 0 if region lock was acquired OR return num strips
792 */
793 u8
794 MR_BuildRaidContext(struct megasas_instance *instance,
795 		    struct IO_REQUEST_INFO *io_info,
796 		    struct RAID_CONTEXT *pRAID_Context,
797 		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
798 {
799 	struct MR_LD_RAID  *raid;
800 	u32         ld, stripSize, stripe_mask;
801 	u64         endLba, endStrip, endRow, start_row, start_strip;
802 	u64         regStart;
803 	u32         regSize;
804 	u8          num_strips, numRows;
805 	u16         ref_in_start_stripe, ref_in_end_stripe;
806 	u64         ldStartBlock;
807 	u32         numBlocks, ldTgtId;
808 	u8          isRead;
809 	u8	    retval = 0;
810 	u8	    startlba_span = SPAN_INVALID;
811 	u64 *pdBlock = &io_info->pdBlock;
812 
813 	ldStartBlock = io_info->ldStartBlock;
814 	numBlocks = io_info->numBlocks;
815 	ldTgtId = io_info->ldTgtId;
816 	isRead = io_info->isRead;
817 	io_info->IoforUnevenSpan = 0;
818 	io_info->start_span	= SPAN_INVALID;
819 
820 	ld = MR_TargetIdToLdGet(ldTgtId, map);
821 	raid = MR_LdRaidGet(ld, map);
822 
823 	/*
824 	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
825 	 * return FALSE
826 	 */
827 	if (raid->rowDataSize == 0) {
828 		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
829 			return FALSE;
830 		else if (instance->UnevenSpanSupport) {
831 			io_info->IoforUnevenSpan = 1;
832 		} else {
833 			dev_info(&instance->pdev->dev,
834 				"raid->rowDataSize is 0, but has SPAN[0]"
835 				"rowDataSize = 0x%0x,"
836 				"but there is _NO_ UnevenSpanSupport\n",
837 				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
838 			return FALSE;
839 		}
840 	}
841 
842 	stripSize = 1 << raid->stripeShift;
843 	stripe_mask = stripSize-1;
844 
845 
846 	/*
847 	 * calculate starting row and stripe, and number of strips and rows
848 	 */
849 	start_strip         = ldStartBlock >> raid->stripeShift;
850 	ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
851 	endLba              = ldStartBlock + numBlocks - 1;
852 	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
853 	endStrip            = endLba >> raid->stripeShift;
854 	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
855 
856 	if (io_info->IoforUnevenSpan) {
857 		start_row = get_row_from_strip(instance, ld, start_strip, map);
858 		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
859 		if (start_row == -1ULL || endRow == -1ULL) {
860 			dev_info(&instance->pdev->dev, "return from %s %d."
861 				"Send IO w/o region lock.\n",
862 				__func__, __LINE__);
863 			return FALSE;
864 		}
865 
866 		if (raid->spanDepth == 1) {
867 			startlba_span = 0;
868 			*pdBlock = start_row << raid->stripeShift;
869 		} else
870 			startlba_span = (u8)mr_spanset_get_span_block(instance,
871 						ld, start_row, pdBlock, map);
872 		if (startlba_span == SPAN_INVALID) {
873 			dev_info(&instance->pdev->dev, "return from %s %d"
874 				"for row 0x%llx,start strip %llx"
875 				"endSrip %llx\n", __func__, __LINE__,
876 				(unsigned long long)start_row,
877 				(unsigned long long)start_strip,
878 				(unsigned long long)endStrip);
879 			return FALSE;
880 		}
881 		io_info->start_span	= startlba_span;
882 		io_info->start_row	= start_row;
883 #if SPAN_DEBUG
884 		dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
885 			"for row 0x%llx, start strip 0x%llx end strip 0x%llx"
886 			" span 0x%x\n", __func__, __LINE__,
887 			(unsigned long long)start_row,
888 			(unsigned long long)start_strip,
889 			(unsigned long long)endStrip, startlba_span);
890 		dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
891 			"Start span 0x%x\n", (unsigned long long)start_row,
892 			(unsigned long long)endRow, startlba_span);
893 #endif
894 	} else {
895 		start_row = mega_div64_32(start_strip, raid->rowDataSize);
896 		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
897 	}
898 	numRows = (u8)(endRow - start_row + 1);
899 
900 	/*
901 	 * calculate region info.
902 	 */
903 
904 	/* assume region is at the start of the first row */
905 	regStart            = start_row << raid->stripeShift;
906 	/* assume this IO needs the full row - we'll adjust if not true */
907 	regSize             = stripSize;
908 
909 	/* Check if we can send this I/O via FastPath */
910 	if (raid->capability.fpCapable) {
911 		if (isRead)
912 			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
913 					      ((num_strips == 1) ||
914 					       raid->capability.
915 					       fpReadAcrossStripe));
916 		else
917 			io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
918 					      ((num_strips == 1) ||
919 					       raid->capability.
920 					       fpWriteAcrossStripe));
921 	} else
922 		io_info->fpOkForIo = FALSE;
923 
924 	if (numRows == 1) {
925 		/* single-strip IOs can always lock only the data needed */
926 		if (num_strips == 1) {
927 			regStart += ref_in_start_stripe;
928 			regSize = numBlocks;
929 		}
930 		/* multi-strip IOs always need to full stripe locked */
931 	} else if (io_info->IoforUnevenSpan == 0) {
932 		/*
933 		 * For Even span region lock optimization.
934 		 * If the start strip is the last in the start row
935 		 */
936 		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
937 			regStart += ref_in_start_stripe;
938 			/* initialize count to sectors from startref to end
939 			   of strip */
940 			regSize = stripSize - ref_in_start_stripe;
941 		}
942 
943 		/* add complete rows in the middle of the transfer */
944 		if (numRows > 2)
945 			regSize += (numRows-2) << raid->stripeShift;
946 
947 		/* if IO ends within first strip of last row*/
948 		if (endStrip == endRow*raid->rowDataSize)
949 			regSize += ref_in_end_stripe+1;
950 		else
951 			regSize += stripSize;
952 	} else {
953 		/*
954 		 * For Uneven span region lock optimization.
955 		 * If the start strip is the last in the start row
956 		 */
957 		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
958 				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
959 			regStart += ref_in_start_stripe;
960 			/* initialize count to sectors from
961 			 * startRef to end of strip
962 			 */
963 			regSize = stripSize - ref_in_start_stripe;
964 		}
965 		/* Add complete rows in the middle of the transfer*/
966 
967 		if (numRows > 2)
968 			/* Add complete rows in the middle of the transfer*/
969 			regSize += (numRows-2) << raid->stripeShift;
970 
971 		/* if IO ends within first strip of last row */
972 		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
973 			regSize += ref_in_end_stripe + 1;
974 		else
975 			regSize += stripSize;
976 	}
977 
978 	pRAID_Context->timeoutValue =
979 		cpu_to_le16(raid->fpIoTimeoutForLd ?
980 			    raid->fpIoTimeoutForLd :
981 			    map->raidMap.fpPdIoTimeoutSec);
982 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
983 		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
984 		pRAID_Context->regLockFlags = (isRead) ?
985 			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
986 	else
987 		pRAID_Context->regLockFlags = (isRead) ?
988 			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
989 	pRAID_Context->VirtualDiskTgtId = raid->targetId;
990 	pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
991 	pRAID_Context->regLockLength    = cpu_to_le32(regSize);
992 	pRAID_Context->configSeqNum	= raid->seqNum;
993 	/* save pointer to raid->LUN array */
994 	*raidLUN = raid->LUN;
995 
996 
997 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
998 	  to do the calculation.*/
999 	if (io_info->fpOkForIo) {
1000 		retval = io_info->IoforUnevenSpan ?
1001 				mr_spanset_get_phy_params(instance, ld,
1002 					start_strip, ref_in_start_stripe,
1003 					io_info, pRAID_Context, map) :
1004 				MR_GetPhyParams(instance, ld, start_strip,
1005 					ref_in_start_stripe, io_info,
1006 					pRAID_Context, map);
1007 		/* If IO on an invalid Pd, then FP is not possible.*/
1008 		if (io_info->devHandle == MR_PD_INVALID)
1009 			io_info->fpOkForIo = FALSE;
1010 		return retval;
1011 	} else if (isRead) {
1012 		uint stripIdx;
1013 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1014 			retval = io_info->IoforUnevenSpan ?
1015 				mr_spanset_get_phy_params(instance, ld,
1016 				    start_strip + stripIdx,
1017 				    ref_in_start_stripe, io_info,
1018 				    pRAID_Context, map) :
1019 				MR_GetPhyParams(instance, ld,
1020 				    start_strip + stripIdx, ref_in_start_stripe,
1021 				    io_info, pRAID_Context, map);
1022 			if (!retval)
1023 				return TRUE;
1024 		}
1025 	}
1026 
1027 #if SPAN_DEBUG
1028 	/* Just for testing what arm we get for strip.*/
1029 	if (io_info->IoforUnevenSpan)
1030 		get_arm_from_strip(instance, ld, start_strip, map);
1031 #endif
1032 	return TRUE;
1033 }
1034 
1035 /*
1036 ******************************************************************************
1037 *
1038 * This routine pepare spanset info from Valid Raid map and store it into
1039 * local copy of ldSpanInfo per instance data structure.
1040 *
1041 * Inputs :
1042 * map    - LD map
1043 * ldSpanInfo - ldSpanInfo per HBA instance
1044 *
1045 */
1046 void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1047 			PLD_SPAN_INFO ldSpanInfo)
1048 {
1049 	u8   span, count;
1050 	u32  element, span_row_width;
1051 	u64  span_row;
1052 	struct MR_LD_RAID *raid;
1053 	LD_SPAN_SET *span_set, *span_set_prev;
1054 	struct MR_QUAD_ELEMENT    *quad;
1055 	int ldCount;
1056 	u16 ld;
1057 
1058 
1059 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
1060 		ld = MR_TargetIdToLdGet(ldCount, map);
1061 		if (ld >= MAX_LOGICAL_DRIVES)
1062 			continue;
1063 		raid = MR_LdRaidGet(ld, map);
1064 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1065 			for (span = 0; span < raid->spanDepth; span++) {
1066 				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1067 					block_span_info.noElements) <
1068 					element + 1)
1069 					continue;
1070 				span_set = &(ldSpanInfo[ld].span_set[element]);
1071 				quad = &map->raidMap.ldSpanMap[ld].
1072 					spanBlock[span].block_span_info.
1073 					quad[element];
1074 
1075 				span_set->diff = le32_to_cpu(quad->diff);
1076 
1077 				for (count = 0, span_row_width = 0;
1078 					count < raid->spanDepth; count++) {
1079 					if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1080 						spanBlock[count].
1081 						block_span_info.
1082 						noElements) >= element + 1) {
1083 						span_set->strip_offset[count] =
1084 							span_row_width;
1085 						span_row_width +=
1086 							MR_LdSpanPtrGet
1087 							(ld, count, map)->spanRowDataSize;
1088 						printk(KERN_INFO "megasas:"
1089 							"span %x rowDataSize %x\n",
1090 							count, MR_LdSpanPtrGet
1091 							(ld, count, map)->spanRowDataSize);
1092 					}
1093 				}
1094 
1095 				span_set->span_row_data_width = span_row_width;
1096 				span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1097 					le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1098 					le32_to_cpu(quad->diff));
1099 
1100 				if (element == 0) {
1101 					span_set->log_start_lba = 0;
1102 					span_set->log_end_lba =
1103 						((span_row << raid->stripeShift)
1104 						* span_row_width) - 1;
1105 
1106 					span_set->span_row_start = 0;
1107 					span_set->span_row_end = span_row - 1;
1108 
1109 					span_set->data_strip_start = 0;
1110 					span_set->data_strip_end =
1111 						(span_row * span_row_width) - 1;
1112 
1113 					span_set->data_row_start = 0;
1114 					span_set->data_row_end =
1115 						(span_row * le32_to_cpu(quad->diff)) - 1;
1116 				} else {
1117 					span_set_prev = &(ldSpanInfo[ld].
1118 							span_set[element - 1]);
1119 					span_set->log_start_lba =
1120 						span_set_prev->log_end_lba + 1;
1121 					span_set->log_end_lba =
1122 						span_set->log_start_lba +
1123 						((span_row << raid->stripeShift)
1124 						* span_row_width) - 1;
1125 
1126 					span_set->span_row_start =
1127 						span_set_prev->span_row_end + 1;
1128 					span_set->span_row_end =
1129 					span_set->span_row_start + span_row - 1;
1130 
1131 					span_set->data_strip_start =
1132 					span_set_prev->data_strip_end + 1;
1133 					span_set->data_strip_end =
1134 						span_set->data_strip_start +
1135 						(span_row * span_row_width) - 1;
1136 
1137 					span_set->data_row_start =
1138 						span_set_prev->data_row_end + 1;
1139 					span_set->data_row_end =
1140 						span_set->data_row_start +
1141 						(span_row * le32_to_cpu(quad->diff)) - 1;
1142 				}
1143 				break;
1144 		}
1145 		if (span == raid->spanDepth)
1146 			break;
1147 	    }
1148 	}
1149 #if SPAN_DEBUG
1150 	getSpanInfo(map, ldSpanInfo);
1151 #endif
1152 
1153 }
1154 
1155 void
1156 mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
1157 			      struct LD_LOAD_BALANCE_INFO *lbInfo)
1158 {
1159 	int ldCount;
1160 	u16 ld;
1161 	struct MR_LD_RAID *raid;
1162 
1163 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
1164 		ld = MR_TargetIdToLdGet(ldCount, map);
1165 		if (ld >= MAX_LOGICAL_DRIVES) {
1166 			lbInfo[ldCount].loadBalanceFlag = 0;
1167 			continue;
1168 		}
1169 
1170 		raid = MR_LdRaidGet(ld, map);
1171 
1172 		/* Two drive Optimal RAID 1 */
1173 		if ((raid->level == 1)  &&  (raid->rowSize == 2) &&
1174 		    (raid->spanDepth == 1) && raid->ldState ==
1175 		    MR_LD_STATE_OPTIMAL) {
1176 			u32 pd, arRef;
1177 
1178 			lbInfo[ldCount].loadBalanceFlag = 1;
1179 
1180 			/* Get the array on which this span is present */
1181 			arRef = MR_LdSpanArrayGet(ld, 0, map);
1182 
1183 			/* Get the Pd */
1184 			pd = MR_ArPdGet(arRef, 0, map);
1185 			/* Get dev handle from Pd */
1186 			lbInfo[ldCount].raid1DevHandle[0] =
1187 				MR_PdDevHandleGet(pd, map);
1188 			/* Get the Pd */
1189 			pd = MR_ArPdGet(arRef, 1, map);
1190 
1191 			/* Get the dev handle from Pd */
1192 			lbInfo[ldCount].raid1DevHandle[1] =
1193 				MR_PdDevHandleGet(pd, map);
1194 		} else
1195 			lbInfo[ldCount].loadBalanceFlag = 0;
1196 	}
1197 }
1198 
1199 u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
1200 			u32 count)
1201 {
1202 	u16     pend0, pend1;
1203 	u64     diff0, diff1;
1204 	u8      bestArm;
1205 
1206 	/* get the pending cmds for the data and mirror arms */
1207 	pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
1208 	pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
1209 
1210 	/* Determine the disk whose head is nearer to the req. block */
1211 	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
1212 	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
1213 	bestArm = (diff0 <= diff1 ? 0 : 1);
1214 
1215 	/*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
1216 	if ((bestArm == arm && pend0 > pend1 + 4)  ||
1217 	    (bestArm != arm && pend1 > pend0 + 4))
1218 		bestArm ^= 1;
1219 
1220 	/* Update the last accessed block on the correct pd */
1221 	lbInfo->last_accessed_block[bestArm] = block + count - 1;
1222 
1223 	return bestArm;
1224 }
1225 
1226 u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
1227 			   struct IO_REQUEST_INFO *io_info)
1228 {
1229 	u8 arm, old_arm;
1230 	u16 devHandle;
1231 
1232 	old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
1233 
1234 	/* get best new arm */
1235 	arm  = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
1236 				    io_info->numBlocks);
1237 	devHandle = lbInfo->raid1DevHandle[arm];
1238 	atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
1239 
1240 	return devHandle;
1241 }
1242