1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2009-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * FILE: megaraid_sas_fp.c 10 * 11 * Authors: Broadcom Inc. 12 * Sumant Patro 13 * Varad Talamacki 14 * Manoj Jose 15 * Kashyap Desai <kashyap.desai@broadcom.com> 16 * Sumit Saxena <sumit.saxena@broadcom.com> 17 * 18 * Send feedback to: megaraidlinux.pdl@broadcom.com 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/list.h> 25 #include <linux/moduleparam.h> 26 #include <linux/module.h> 27 #include <linux/spinlock.h> 28 #include <linux/interrupt.h> 29 #include <linux/delay.h> 30 #include <linux/uio.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/compat.h> 34 #include <linux/blkdev.h> 35 #include <linux/poll.h> 36 #include <linux/irq_poll.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_cmnd.h> 40 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_host.h> 42 43 #include "megaraid_sas_fusion.h" 44 #include "megaraid_sas.h" 45 #include <asm/div64.h> 46 47 #define LB_PENDING_CMDS_DEFAULT 4 48 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 49 module_param(lb_pending_cmds, int, 0444); 50 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " 51 "threshold. Valid Values are 1-128. Default: 4"); 52 53 54 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) 55 #define MR_LD_STATE_OPTIMAL 3 56 57 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) 58 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) 59 #define SPAN_INVALID 0xff 60 61 /* Prototypes */ 62 static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, 63 PLD_SPAN_INFO ldSpanInfo); 64 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, 65 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, 66 struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); 67 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, 68 u64 strip, struct MR_DRV_RAID_MAP_ALL *map); 69 70 u32 mega_mod64(u64 dividend, u32 divisor) 71 { 72 u64 d; 73 u32 remainder; 74 75 if (!divisor) 76 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n"); 77 d = dividend; 78 remainder = do_div(d, divisor); 79 return remainder; 80 } 81 82 /** 83 * @param dividend : Dividend 84 * @param divisor : Divisor 85 * 86 * @return quotient 87 **/ 88 u64 mega_div64_32(uint64_t dividend, uint32_t divisor) 89 { 90 u32 remainder; 91 u64 d; 92 93 if (!divisor) 94 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n"); 95 96 d = dividend; 97 remainder = do_div(d, divisor); 98 99 return d; 100 } 101 102 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) 103 { 104 return &map->raidMap.ldSpanMap[ld].ldRaid; 105 } 106 107 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, 108 struct MR_DRV_RAID_MAP_ALL 109 *map) 110 { 111 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 112 } 113 114 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) 115 { 116 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 117 } 118 119 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) 120 { 121 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); 122 } 123 124 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) 125 { 126 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 127 } 128 129 __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) 130 { 131 return map->raidMap.devHndlInfo[pd].curDevHdl; 132 } 133 134 static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) 135 { 136 return map->raidMap.devHndlInfo[pd].interfaceType; 137 } 138 139 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) 140 { 141 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 142 } 143 144 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) 145 { 146 return map->raidMap.ldTgtIdToLd[ldTgtId]; 147 } 148 149 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, 150 struct MR_DRV_RAID_MAP_ALL *map) 151 { 152 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 153 } 154 155 /* 156 * This function will Populate Driver Map using firmware raid map 157 */ 158 static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id) 159 { 160 struct fusion_context *fusion = instance->ctrl_context; 161 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 162 struct MR_FW_RAID_MAP *pFwRaidMap = NULL; 163 int i, j; 164 u16 ld_count; 165 struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; 166 struct MR_FW_RAID_MAP_EXT *fw_map_ext; 167 struct MR_RAID_MAP_DESC_TABLE *desc_table; 168 169 170 struct MR_DRV_RAID_MAP_ALL *drv_map = 171 fusion->ld_drv_map[(map_id & 1)]; 172 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 173 void *raid_map_data = NULL; 174 175 memset(drv_map, 0, fusion->drv_map_sz); 176 memset(pDrvRaidMap->ldTgtIdToLd, 177 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); 178 179 if (instance->max_raid_mapsize) { 180 fw_map_dyn = fusion->ld_map[(map_id & 1)]; 181 desc_table = 182 (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); 183 if (desc_table != fw_map_dyn->raid_map_desc_table) 184 dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n", 185 desc_table, fw_map_dyn->raid_map_desc_table); 186 187 ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count); 188 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 189 pDrvRaidMap->fpPdIoTimeoutSec = 190 fw_map_dyn->fp_pd_io_timeout_sec; 191 pDrvRaidMap->totalSize = 192 cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL)); 193 /* point to actual data starting point*/ 194 raid_map_data = (void *)fw_map_dyn + 195 le32_to_cpu(fw_map_dyn->desc_table_offset) + 196 le32_to_cpu(fw_map_dyn->desc_table_size); 197 198 for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { 199 switch (le32_to_cpu(desc_table->raid_map_desc_type)) { 200 case RAID_MAP_DESC_TYPE_DEVHDL_INFO: 201 fw_map_dyn->dev_hndl_info = 202 (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); 203 memcpy(pDrvRaidMap->devHndlInfo, 204 fw_map_dyn->dev_hndl_info, 205 sizeof(struct MR_DEV_HANDLE_INFO) * 206 le32_to_cpu(desc_table->raid_map_desc_elements)); 207 break; 208 case RAID_MAP_DESC_TYPE_TGTID_INFO: 209 fw_map_dyn->ld_tgt_id_to_ld = 210 (u16 *)(raid_map_data + 211 le32_to_cpu(desc_table->raid_map_desc_offset)); 212 for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { 213 pDrvRaidMap->ldTgtIdToLd[j] = 214 le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); 215 } 216 break; 217 case RAID_MAP_DESC_TYPE_ARRAY_INFO: 218 fw_map_dyn->ar_map_info = 219 (struct MR_ARRAY_INFO *) 220 (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); 221 memcpy(pDrvRaidMap->arMapInfo, 222 fw_map_dyn->ar_map_info, 223 sizeof(struct MR_ARRAY_INFO) * 224 le32_to_cpu(desc_table->raid_map_desc_elements)); 225 break; 226 case RAID_MAP_DESC_TYPE_SPAN_INFO: 227 fw_map_dyn->ld_span_map = 228 (struct MR_LD_SPAN_MAP *) 229 (raid_map_data + 230 le32_to_cpu(desc_table->raid_map_desc_offset)); 231 memcpy(pDrvRaidMap->ldSpanMap, 232 fw_map_dyn->ld_span_map, 233 sizeof(struct MR_LD_SPAN_MAP) * 234 le32_to_cpu(desc_table->raid_map_desc_elements)); 235 break; 236 default: 237 dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", 238 fw_map_dyn->desc_table_num_elements); 239 } 240 ++desc_table; 241 } 242 243 } else if (instance->supportmax256vd) { 244 fw_map_ext = 245 (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)]; 246 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); 247 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 248 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); 249 return 1; 250 } 251 252 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 253 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; 254 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) 255 pDrvRaidMap->ldTgtIdToLd[i] = 256 (u16)fw_map_ext->ldTgtIdToLd[i]; 257 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, 258 sizeof(struct MR_LD_SPAN_MAP) * ld_count); 259 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, 260 sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); 261 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, 262 sizeof(struct MR_DEV_HANDLE_INFO) * 263 MAX_RAIDMAP_PHYSICAL_DEVICES); 264 265 /* New Raid map will not set totalSize, so keep expected value 266 * for legacy code in ValidateMapInfo 267 */ 268 pDrvRaidMap->totalSize = 269 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); 270 } else { 271 fw_map_old = (struct MR_FW_RAID_MAP_ALL *) 272 fusion->ld_map[(map_id & 1)]; 273 pFwRaidMap = &fw_map_old->raidMap; 274 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); 275 if (ld_count > MAX_LOGICAL_DRIVES) { 276 dev_dbg(&instance->pdev->dev, 277 "LD count exposed in RAID map in not valid\n"); 278 return 1; 279 } 280 281 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 282 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 283 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; 284 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) 285 pDrvRaidMap->ldTgtIdToLd[i] = 286 (u8)pFwRaidMap->ldTgtIdToLd[i]; 287 for (i = 0; i < ld_count; i++) { 288 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; 289 } 290 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 291 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 292 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 293 sizeof(struct MR_DEV_HANDLE_INFO) * 294 MAX_RAIDMAP_PHYSICAL_DEVICES); 295 } 296 297 return 0; 298 } 299 300 /* 301 * This function will validate Map info data provided by FW 302 */ 303 u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) 304 { 305 struct fusion_context *fusion; 306 struct MR_DRV_RAID_MAP_ALL *drv_map; 307 struct MR_DRV_RAID_MAP *pDrvRaidMap; 308 struct LD_LOAD_BALANCE_INFO *lbInfo; 309 PLD_SPAN_INFO ldSpanInfo; 310 struct MR_LD_RAID *raid; 311 u16 num_lds, i; 312 u16 ld; 313 u32 expected_size; 314 315 if (MR_PopulateDrvRaidMap(instance, map_id)) 316 return 0; 317 318 fusion = instance->ctrl_context; 319 drv_map = fusion->ld_drv_map[(map_id & 1)]; 320 pDrvRaidMap = &drv_map->raidMap; 321 322 lbInfo = fusion->load_balance_info; 323 ldSpanInfo = fusion->log_to_span; 324 325 if (instance->max_raid_mapsize) 326 expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL); 327 else if (instance->supportmax256vd) 328 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); 329 else 330 expected_size = 331 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + 332 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); 333 334 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { 335 dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", 336 le32_to_cpu(pDrvRaidMap->totalSize)); 337 dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", 338 (unsigned int)expected_size); 339 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", 340 (unsigned int)sizeof(struct MR_LD_SPAN_MAP), 341 le32_to_cpu(pDrvRaidMap->totalSize)); 342 return 0; 343 } 344 345 if (instance->UnevenSpanSupport) 346 mr_update_span_set(drv_map, ldSpanInfo); 347 348 if (lbInfo) 349 mr_update_load_balance_params(drv_map, lbInfo); 350 351 num_lds = le16_to_cpu(drv_map->raidMap.ldCount); 352 353 /*Convert Raid capability values to CPU arch */ 354 for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) { 355 ld = MR_TargetIdToLdGet(i, drv_map); 356 357 /* For non existing VDs, iterate to next VD*/ 358 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) 359 continue; 360 361 raid = MR_LdRaidGet(ld, drv_map); 362 le32_to_cpus((u32 *)&raid->capability); 363 364 num_lds--; 365 } 366 367 return 1; 368 } 369 370 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, 371 struct MR_DRV_RAID_MAP_ALL *map) 372 { 373 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 374 struct MR_QUAD_ELEMENT *quad; 375 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 376 u32 span, j; 377 378 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 379 380 for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { 381 quad = &pSpanBlock->block_span_info.quad[j]; 382 383 if (le32_to_cpu(quad->diff) == 0) 384 return SPAN_INVALID; 385 if (le64_to_cpu(quad->logStart) <= row && row <= 386 le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), 387 le32_to_cpu(quad->diff))) == 0) { 388 if (span_blk != NULL) { 389 u64 blk, debugBlk; 390 blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); 391 debugBlk = blk; 392 393 blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; 394 *span_blk = blk; 395 } 396 return span; 397 } 398 } 399 } 400 return SPAN_INVALID; 401 } 402 403 /* 404 ****************************************************************************** 405 * 406 * This routine calculates the Span block for given row using spanset. 407 * 408 * Inputs : 409 * instance - HBA instance 410 * ld - Logical drive number 411 * row - Row number 412 * map - LD map 413 * 414 * Outputs : 415 * 416 * span - Span number 417 * block - Absolute Block number in the physical disk 418 * div_error - Devide error code. 419 */ 420 421 u32 mr_spanset_get_span_block(struct megasas_instance *instance, 422 u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) 423 { 424 struct fusion_context *fusion = instance->ctrl_context; 425 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 426 LD_SPAN_SET *span_set; 427 struct MR_QUAD_ELEMENT *quad; 428 u32 span, info; 429 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 430 431 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 432 span_set = &(ldSpanInfo[ld].span_set[info]); 433 434 if (span_set->span_row_data_width == 0) 435 break; 436 437 if (row > span_set->data_row_end) 438 continue; 439 440 for (span = 0; span < raid->spanDepth; span++) 441 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 442 block_span_info.noElements) >= info+1) { 443 quad = &map->raidMap.ldSpanMap[ld]. 444 spanBlock[span]. 445 block_span_info.quad[info]; 446 if (le32_to_cpu(quad->diff) == 0) 447 return SPAN_INVALID; 448 if (le64_to_cpu(quad->logStart) <= row && 449 row <= le64_to_cpu(quad->logEnd) && 450 (mega_mod64(row - le64_to_cpu(quad->logStart), 451 le32_to_cpu(quad->diff))) == 0) { 452 if (span_blk != NULL) { 453 u64 blk; 454 blk = mega_div64_32 455 ((row - le64_to_cpu(quad->logStart)), 456 le32_to_cpu(quad->diff)); 457 blk = (blk + le64_to_cpu(quad->offsetInSpan)) 458 << raid->stripeShift; 459 *span_blk = blk; 460 } 461 return span; 462 } 463 } 464 } 465 return SPAN_INVALID; 466 } 467 468 /* 469 ****************************************************************************** 470 * 471 * This routine calculates the row for given strip using spanset. 472 * 473 * Inputs : 474 * instance - HBA instance 475 * ld - Logical drive number 476 * Strip - Strip 477 * map - LD map 478 * 479 * Outputs : 480 * 481 * row - row associated with strip 482 */ 483 484 static u64 get_row_from_strip(struct megasas_instance *instance, 485 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) 486 { 487 struct fusion_context *fusion = instance->ctrl_context; 488 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 489 LD_SPAN_SET *span_set; 490 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 491 u32 info, strip_offset, span, span_offset; 492 u64 span_set_Strip, span_set_Row, retval; 493 494 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 495 span_set = &(ldSpanInfo[ld].span_set[info]); 496 497 if (span_set->span_row_data_width == 0) 498 break; 499 if (strip > span_set->data_strip_end) 500 continue; 501 502 span_set_Strip = strip - span_set->data_strip_start; 503 strip_offset = mega_mod64(span_set_Strip, 504 span_set->span_row_data_width); 505 span_set_Row = mega_div64_32(span_set_Strip, 506 span_set->span_row_data_width) * span_set->diff; 507 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 508 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 509 block_span_info.noElements) >= info+1) { 510 if (strip_offset >= 511 span_set->strip_offset[span]) 512 span_offset++; 513 else 514 break; 515 } 516 517 retval = (span_set->data_row_start + span_set_Row + 518 (span_offset - 1)); 519 return retval; 520 } 521 return -1LLU; 522 } 523 524 525 /* 526 ****************************************************************************** 527 * 528 * This routine calculates the Start Strip for given row using spanset. 529 * 530 * Inputs : 531 * instance - HBA instance 532 * ld - Logical drive number 533 * row - Row number 534 * map - LD map 535 * 536 * Outputs : 537 * 538 * Strip - Start strip associated with row 539 */ 540 541 static u64 get_strip_from_row(struct megasas_instance *instance, 542 u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) 543 { 544 struct fusion_context *fusion = instance->ctrl_context; 545 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 546 LD_SPAN_SET *span_set; 547 struct MR_QUAD_ELEMENT *quad; 548 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 549 u32 span, info; 550 u64 strip; 551 552 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 553 span_set = &(ldSpanInfo[ld].span_set[info]); 554 555 if (span_set->span_row_data_width == 0) 556 break; 557 if (row > span_set->data_row_end) 558 continue; 559 560 for (span = 0; span < raid->spanDepth; span++) 561 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 562 block_span_info.noElements) >= info+1) { 563 quad = &map->raidMap.ldSpanMap[ld]. 564 spanBlock[span].block_span_info.quad[info]; 565 if (le64_to_cpu(quad->logStart) <= row && 566 row <= le64_to_cpu(quad->logEnd) && 567 mega_mod64((row - le64_to_cpu(quad->logStart)), 568 le32_to_cpu(quad->diff)) == 0) { 569 strip = mega_div64_32 570 (((row - span_set->data_row_start) 571 - le64_to_cpu(quad->logStart)), 572 le32_to_cpu(quad->diff)); 573 strip *= span_set->span_row_data_width; 574 strip += span_set->data_strip_start; 575 strip += span_set->strip_offset[span]; 576 return strip; 577 } 578 } 579 } 580 dev_err(&instance->pdev->dev, "get_strip_from_row" 581 "returns invalid strip for ld=%x, row=%lx\n", 582 ld, (long unsigned int)row); 583 return -1; 584 } 585 586 /* 587 ****************************************************************************** 588 * 589 * This routine calculates the Physical Arm for given strip using spanset. 590 * 591 * Inputs : 592 * instance - HBA instance 593 * ld - Logical drive number 594 * strip - Strip 595 * map - LD map 596 * 597 * Outputs : 598 * 599 * Phys Arm - Phys Arm associated with strip 600 */ 601 602 static u32 get_arm_from_strip(struct megasas_instance *instance, 603 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) 604 { 605 struct fusion_context *fusion = instance->ctrl_context; 606 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 607 LD_SPAN_SET *span_set; 608 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 609 u32 info, strip_offset, span, span_offset, retval; 610 611 for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { 612 span_set = &(ldSpanInfo[ld].span_set[info]); 613 614 if (span_set->span_row_data_width == 0) 615 break; 616 if (strip > span_set->data_strip_end) 617 continue; 618 619 strip_offset = (uint)mega_mod64 620 ((strip - span_set->data_strip_start), 621 span_set->span_row_data_width); 622 623 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 624 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 625 block_span_info.noElements) >= info+1) { 626 if (strip_offset >= 627 span_set->strip_offset[span]) 628 span_offset = 629 span_set->strip_offset[span]; 630 else 631 break; 632 } 633 634 retval = (strip_offset - span_offset); 635 return retval; 636 } 637 638 dev_err(&instance->pdev->dev, "get_arm_from_strip" 639 "returns invalid arm for ld=%x strip=%lx\n", 640 ld, (long unsigned int)strip); 641 642 return -1; 643 } 644 645 /* This Function will return Phys arm */ 646 u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, 647 struct MR_DRV_RAID_MAP_ALL *map) 648 { 649 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 650 /* Need to check correct default value */ 651 u32 arm = 0; 652 653 switch (raid->level) { 654 case 0: 655 case 5: 656 case 6: 657 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 658 break; 659 case 1: 660 /* start with logical arm */ 661 arm = get_arm_from_strip(instance, ld, stripe, map); 662 if (arm != -1U) 663 arm *= 2; 664 break; 665 } 666 667 return arm; 668 } 669 670 671 /* 672 ****************************************************************************** 673 * 674 * This routine calculates the arm, span and block for the specified stripe and 675 * reference in stripe using spanset 676 * 677 * Inputs : 678 * 679 * ld - Logical drive number 680 * stripRow - Stripe number 681 * stripRef - Reference in stripe 682 * 683 * Outputs : 684 * 685 * span - Span number 686 * block - Absolute Block number in the physical disk 687 */ 688 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, 689 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, 690 struct RAID_CONTEXT *pRAID_Context, 691 struct MR_DRV_RAID_MAP_ALL *map) 692 { 693 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 694 u32 pd, arRef, r1_alt_pd; 695 u8 physArm, span; 696 u64 row; 697 u8 retval = true; 698 u64 *pdBlock = &io_info->pdBlock; 699 __le16 *pDevHandle = &io_info->devHandle; 700 u8 *pPdInterface = &io_info->pd_interface; 701 u32 logArm, rowMod, armQ, arm; 702 struct fusion_context *fusion; 703 704 fusion = instance->ctrl_context; 705 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); 706 707 /*Get row and span from io_info for Uneven Span IO.*/ 708 row = io_info->start_row; 709 span = io_info->start_span; 710 711 712 if (raid->level == 6) { 713 logArm = get_arm_from_strip(instance, ld, stripRow, map); 714 if (logArm == -1U) 715 return false; 716 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 717 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 718 arm = armQ + 1 + logArm; 719 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 720 arm -= SPAN_ROW_SIZE(map, ld, span); 721 physArm = (u8)arm; 722 } else 723 /* Calculate the arm */ 724 physArm = get_arm(instance, ld, span, stripRow, map); 725 if (physArm == 0xFF) 726 return false; 727 728 arRef = MR_LdSpanArrayGet(ld, span, map); 729 pd = MR_ArPdGet(arRef, physArm, map); 730 731 if (pd != MR_PD_INVALID) { 732 *pDevHandle = MR_PdDevHandleGet(pd, map); 733 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 734 /* get second pd also for raid 1/10 fast path writes*/ 735 if ((instance->adapter_type >= VENTURA_SERIES) && 736 (raid->level == 1) && 737 !io_info->isRead) { 738 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 739 if (r1_alt_pd != MR_PD_INVALID) 740 io_info->r1_alt_dev_handle = 741 MR_PdDevHandleGet(r1_alt_pd, map); 742 } 743 } else { 744 if ((raid->level >= 5) && 745 ((instance->adapter_type == THUNDERBOLT_SERIES) || 746 ((instance->adapter_type == INVADER_SERIES) && 747 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 748 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; 749 else if (raid->level == 1) { 750 physArm = physArm + 1; 751 pd = MR_ArPdGet(arRef, physArm, map); 752 if (pd != MR_PD_INVALID) { 753 *pDevHandle = MR_PdDevHandleGet(pd, map); 754 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 755 } 756 } 757 } 758 759 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 760 if (instance->adapter_type >= VENTURA_SERIES) { 761 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 762 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 763 io_info->span_arm = 764 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 765 } else { 766 pRAID_Context->span_arm = 767 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 768 io_info->span_arm = pRAID_Context->span_arm; 769 } 770 io_info->pd_after_lb = pd; 771 return retval; 772 } 773 774 /* 775 ****************************************************************************** 776 * 777 * This routine calculates the arm, span and block for the specified stripe and 778 * reference in stripe. 779 * 780 * Inputs : 781 * 782 * ld - Logical drive number 783 * stripRow - Stripe number 784 * stripRef - Reference in stripe 785 * 786 * Outputs : 787 * 788 * span - Span number 789 * block - Absolute Block number in the physical disk 790 */ 791 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, 792 u16 stripRef, struct IO_REQUEST_INFO *io_info, 793 struct RAID_CONTEXT *pRAID_Context, 794 struct MR_DRV_RAID_MAP_ALL *map) 795 { 796 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 797 u32 pd, arRef, r1_alt_pd; 798 u8 physArm, span; 799 u64 row; 800 u8 retval = true; 801 u64 *pdBlock = &io_info->pdBlock; 802 __le16 *pDevHandle = &io_info->devHandle; 803 u8 *pPdInterface = &io_info->pd_interface; 804 struct fusion_context *fusion; 805 806 fusion = instance->ctrl_context; 807 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); 808 809 row = mega_div64_32(stripRow, raid->rowDataSize); 810 811 if (raid->level == 6) { 812 /* logical arm within row */ 813 u32 logArm = mega_mod64(stripRow, raid->rowDataSize); 814 u32 rowMod, armQ, arm; 815 816 if (raid->rowSize == 0) 817 return false; 818 /* get logical row mod */ 819 rowMod = mega_mod64(row, raid->rowSize); 820 armQ = raid->rowSize-1-rowMod; /* index of Q drive */ 821 arm = armQ+1+logArm; /* data always logically follows Q */ 822 if (arm >= raid->rowSize) /* handle wrap condition */ 823 arm -= raid->rowSize; 824 physArm = (u8)arm; 825 } else { 826 if (raid->modFactor == 0) 827 return false; 828 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, 829 raid->modFactor), 830 map); 831 } 832 833 if (raid->spanDepth == 1) { 834 span = 0; 835 *pdBlock = row << raid->stripeShift; 836 } else { 837 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); 838 if (span == SPAN_INVALID) 839 return false; 840 } 841 842 /* Get the array on which this span is present */ 843 arRef = MR_LdSpanArrayGet(ld, span, map); 844 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ 845 846 if (pd != MR_PD_INVALID) { 847 /* Get dev handle from Pd. */ 848 *pDevHandle = MR_PdDevHandleGet(pd, map); 849 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 850 /* get second pd also for raid 1/10 fast path writes*/ 851 if ((instance->adapter_type >= VENTURA_SERIES) && 852 (raid->level == 1) && 853 !io_info->isRead) { 854 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 855 if (r1_alt_pd != MR_PD_INVALID) 856 io_info->r1_alt_dev_handle = 857 MR_PdDevHandleGet(r1_alt_pd, map); 858 } 859 } else { 860 if ((raid->level >= 5) && 861 ((instance->adapter_type == THUNDERBOLT_SERIES) || 862 ((instance->adapter_type == INVADER_SERIES) && 863 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 864 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; 865 else if (raid->level == 1) { 866 /* Get alternate Pd. */ 867 physArm = physArm + 1; 868 pd = MR_ArPdGet(arRef, physArm, map); 869 if (pd != MR_PD_INVALID) { 870 /* Get dev handle from Pd */ 871 *pDevHandle = MR_PdDevHandleGet(pd, map); 872 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 873 } 874 } 875 } 876 877 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 878 if (instance->adapter_type >= VENTURA_SERIES) { 879 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 880 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 881 io_info->span_arm = 882 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 883 } else { 884 pRAID_Context->span_arm = 885 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 886 io_info->span_arm = pRAID_Context->span_arm; 887 } 888 io_info->pd_after_lb = pd; 889 return retval; 890 } 891 892 /* 893 * mr_get_phy_params_r56_rmw - Calculate parameters for R56 CTIO write operation 894 * @instance: Adapter soft state 895 * @ld: LD index 896 * @stripNo: Strip Number 897 * @io_info: IO info structure pointer 898 * pRAID_Context: RAID context pointer 899 * map: RAID map pointer 900 * 901 * This routine calculates the logical arm, data Arm, row number and parity arm 902 * for R56 CTIO write operation. 903 */ 904 static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance, 905 u32 ld, u64 stripNo, 906 struct IO_REQUEST_INFO *io_info, 907 struct RAID_CONTEXT_G35 *pRAID_Context, 908 struct MR_DRV_RAID_MAP_ALL *map) 909 { 910 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 911 u8 span, dataArms, arms, dataArm, logArm; 912 s8 rightmostParityArm, PParityArm; 913 u64 rowNum; 914 u64 *pdBlock = &io_info->pdBlock; 915 916 dataArms = raid->rowDataSize; 917 arms = raid->rowSize; 918 919 rowNum = mega_div64_32(stripNo, dataArms); 920 /* parity disk arm, first arm is 0 */ 921 rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms); 922 923 /* logical arm within row */ 924 logArm = mega_mod64(stripNo, dataArms); 925 /* physical arm for data */ 926 dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms); 927 928 if (raid->spanDepth == 1) { 929 span = 0; 930 } else { 931 span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map); 932 if (span == SPAN_INVALID) 933 return; 934 } 935 936 if (raid->level == 6) { 937 /* P Parity arm, note this can go negative adjust if negative */ 938 PParityArm = (arms - 2) - mega_mod64(rowNum, arms); 939 940 if (PParityArm < 0) 941 PParityArm += arms; 942 943 /* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */ 944 pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm; 945 pRAID_Context->flow_specific.r56_arm_map |= 946 (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT); 947 } else { 948 pRAID_Context->flow_specific.r56_arm_map |= 949 (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT); 950 } 951 952 pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum); 953 pRAID_Context->flow_specific.r56_arm_map |= 954 (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT); 955 cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map); 956 pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm; 957 pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD << 958 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 959 960 return; 961 } 962 963 /* 964 ****************************************************************************** 965 * 966 * MR_BuildRaidContext function 967 * 968 * This function will initiate command processing. The start/end row and strip 969 * information is calculated then the lock is acquired. 970 * This function will return 0 if region lock was acquired OR return num strips 971 */ 972 u8 973 MR_BuildRaidContext(struct megasas_instance *instance, 974 struct IO_REQUEST_INFO *io_info, 975 struct RAID_CONTEXT *pRAID_Context, 976 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) 977 { 978 struct fusion_context *fusion; 979 struct MR_LD_RAID *raid; 980 u32 stripSize, stripe_mask; 981 u64 endLba, endStrip, endRow, start_row, start_strip; 982 u64 regStart; 983 u32 regSize; 984 u8 num_strips, numRows; 985 u16 ref_in_start_stripe, ref_in_end_stripe; 986 u64 ldStartBlock; 987 u32 numBlocks, ldTgtId; 988 u8 isRead; 989 u8 retval = 0; 990 u8 startlba_span = SPAN_INVALID; 991 u64 *pdBlock = &io_info->pdBlock; 992 u16 ld; 993 994 ldStartBlock = io_info->ldStartBlock; 995 numBlocks = io_info->numBlocks; 996 ldTgtId = io_info->ldTgtId; 997 isRead = io_info->isRead; 998 io_info->IoforUnevenSpan = 0; 999 io_info->start_span = SPAN_INVALID; 1000 fusion = instance->ctrl_context; 1001 1002 ld = MR_TargetIdToLdGet(ldTgtId, map); 1003 raid = MR_LdRaidGet(ld, map); 1004 /*check read ahead bit*/ 1005 io_info->ra_capable = raid->capability.ra_capable; 1006 1007 /* 1008 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero 1009 * return FALSE 1010 */ 1011 if (raid->rowDataSize == 0) { 1012 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 1013 return false; 1014 else if (instance->UnevenSpanSupport) { 1015 io_info->IoforUnevenSpan = 1; 1016 } else { 1017 dev_info(&instance->pdev->dev, 1018 "raid->rowDataSize is 0, but has SPAN[0]" 1019 "rowDataSize = 0x%0x," 1020 "but there is _NO_ UnevenSpanSupport\n", 1021 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 1022 return false; 1023 } 1024 } 1025 1026 stripSize = 1 << raid->stripeShift; 1027 stripe_mask = stripSize-1; 1028 1029 io_info->data_arms = raid->rowDataSize; 1030 1031 /* 1032 * calculate starting row and stripe, and number of strips and rows 1033 */ 1034 start_strip = ldStartBlock >> raid->stripeShift; 1035 ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); 1036 endLba = ldStartBlock + numBlocks - 1; 1037 ref_in_end_stripe = (u16)(endLba & stripe_mask); 1038 endStrip = endLba >> raid->stripeShift; 1039 num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ 1040 1041 if (io_info->IoforUnevenSpan) { 1042 start_row = get_row_from_strip(instance, ld, start_strip, map); 1043 endRow = get_row_from_strip(instance, ld, endStrip, map); 1044 if (start_row == -1ULL || endRow == -1ULL) { 1045 dev_info(&instance->pdev->dev, "return from %s %d." 1046 "Send IO w/o region lock.\n", 1047 __func__, __LINE__); 1048 return false; 1049 } 1050 1051 if (raid->spanDepth == 1) { 1052 startlba_span = 0; 1053 *pdBlock = start_row << raid->stripeShift; 1054 } else 1055 startlba_span = (u8)mr_spanset_get_span_block(instance, 1056 ld, start_row, pdBlock, map); 1057 if (startlba_span == SPAN_INVALID) { 1058 dev_info(&instance->pdev->dev, "return from %s %d" 1059 "for row 0x%llx,start strip %llx" 1060 "endSrip %llx\n", __func__, __LINE__, 1061 (unsigned long long)start_row, 1062 (unsigned long long)start_strip, 1063 (unsigned long long)endStrip); 1064 return false; 1065 } 1066 io_info->start_span = startlba_span; 1067 io_info->start_row = start_row; 1068 } else { 1069 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1070 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1071 } 1072 numRows = (u8)(endRow - start_row + 1); 1073 1074 /* 1075 * calculate region info. 1076 */ 1077 1078 /* assume region is at the start of the first row */ 1079 regStart = start_row << raid->stripeShift; 1080 /* assume this IO needs the full row - we'll adjust if not true */ 1081 regSize = stripSize; 1082 1083 io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock; 1084 1085 /* Check if we can send this I/O via FastPath */ 1086 if (raid->capability.fpCapable) { 1087 if (isRead) 1088 io_info->fpOkForIo = (raid->capability.fpReadCapable && 1089 ((num_strips == 1) || 1090 raid->capability. 1091 fpReadAcrossStripe)); 1092 else 1093 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 1094 ((num_strips == 1) || 1095 raid->capability. 1096 fpWriteAcrossStripe)); 1097 } else 1098 io_info->fpOkForIo = false; 1099 1100 if (numRows == 1) { 1101 /* single-strip IOs can always lock only the data needed */ 1102 if (num_strips == 1) { 1103 regStart += ref_in_start_stripe; 1104 regSize = numBlocks; 1105 } 1106 /* multi-strip IOs always need to full stripe locked */ 1107 } else if (io_info->IoforUnevenSpan == 0) { 1108 /* 1109 * For Even span region lock optimization. 1110 * If the start strip is the last in the start row 1111 */ 1112 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 1113 regStart += ref_in_start_stripe; 1114 /* initialize count to sectors from startref to end 1115 of strip */ 1116 regSize = stripSize - ref_in_start_stripe; 1117 } 1118 1119 /* add complete rows in the middle of the transfer */ 1120 if (numRows > 2) 1121 regSize += (numRows-2) << raid->stripeShift; 1122 1123 /* if IO ends within first strip of last row*/ 1124 if (endStrip == endRow*raid->rowDataSize) 1125 regSize += ref_in_end_stripe+1; 1126 else 1127 regSize += stripSize; 1128 } else { 1129 /* 1130 * For Uneven span region lock optimization. 1131 * If the start strip is the last in the start row 1132 */ 1133 if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + 1134 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 1135 regStart += ref_in_start_stripe; 1136 /* initialize count to sectors from 1137 * startRef to end of strip 1138 */ 1139 regSize = stripSize - ref_in_start_stripe; 1140 } 1141 /* Add complete rows in the middle of the transfer*/ 1142 1143 if (numRows > 2) 1144 /* Add complete rows in the middle of the transfer*/ 1145 regSize += (numRows-2) << raid->stripeShift; 1146 1147 /* if IO ends within first strip of last row */ 1148 if (endStrip == get_strip_from_row(instance, ld, endRow, map)) 1149 regSize += ref_in_end_stripe + 1; 1150 else 1151 regSize += stripSize; 1152 } 1153 1154 pRAID_Context->timeout_value = 1155 cpu_to_le16(raid->fpIoTimeoutForLd ? 1156 raid->fpIoTimeoutForLd : 1157 map->raidMap.fpPdIoTimeoutSec); 1158 if (instance->adapter_type == INVADER_SERIES) 1159 pRAID_Context->reg_lock_flags = (isRead) ? 1160 raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1161 else if (instance->adapter_type == THUNDERBOLT_SERIES) 1162 pRAID_Context->reg_lock_flags = (isRead) ? 1163 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1164 pRAID_Context->virtual_disk_tgt_id = raid->targetId; 1165 pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart); 1166 pRAID_Context->reg_lock_length = cpu_to_le32(regSize); 1167 pRAID_Context->config_seq_num = raid->seqNum; 1168 /* save pointer to raid->LUN array */ 1169 *raidLUN = raid->LUN; 1170 1171 /* Aero R5/6 Division Offload for WRITE */ 1172 if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) { 1173 mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info, 1174 (struct RAID_CONTEXT_G35 *)pRAID_Context, 1175 map); 1176 return true; 1177 } 1178 1179 /*Get Phy Params only if FP capable, or else leave it to MR firmware 1180 to do the calculation.*/ 1181 if (io_info->fpOkForIo) { 1182 retval = io_info->IoforUnevenSpan ? 1183 mr_spanset_get_phy_params(instance, ld, 1184 start_strip, ref_in_start_stripe, 1185 io_info, pRAID_Context, map) : 1186 MR_GetPhyParams(instance, ld, start_strip, 1187 ref_in_start_stripe, io_info, 1188 pRAID_Context, map); 1189 /* If IO on an invalid Pd, then FP is not possible.*/ 1190 if (io_info->devHandle == MR_DEVHANDLE_INVALID) 1191 io_info->fpOkForIo = false; 1192 return retval; 1193 } else if (isRead) { 1194 uint stripIdx; 1195 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 1196 retval = io_info->IoforUnevenSpan ? 1197 mr_spanset_get_phy_params(instance, ld, 1198 start_strip + stripIdx, 1199 ref_in_start_stripe, io_info, 1200 pRAID_Context, map) : 1201 MR_GetPhyParams(instance, ld, 1202 start_strip + stripIdx, ref_in_start_stripe, 1203 io_info, pRAID_Context, map); 1204 if (!retval) 1205 return true; 1206 } 1207 } 1208 return true; 1209 } 1210 1211 /* 1212 ****************************************************************************** 1213 * 1214 * This routine pepare spanset info from Valid Raid map and store it into 1215 * local copy of ldSpanInfo per instance data structure. 1216 * 1217 * Inputs : 1218 * map - LD map 1219 * ldSpanInfo - ldSpanInfo per HBA instance 1220 * 1221 */ 1222 void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, 1223 PLD_SPAN_INFO ldSpanInfo) 1224 { 1225 u8 span, count; 1226 u32 element, span_row_width; 1227 u64 span_row; 1228 struct MR_LD_RAID *raid; 1229 LD_SPAN_SET *span_set, *span_set_prev; 1230 struct MR_QUAD_ELEMENT *quad; 1231 int ldCount; 1232 u16 ld; 1233 1234 1235 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1236 ld = MR_TargetIdToLdGet(ldCount, map); 1237 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) 1238 continue; 1239 raid = MR_LdRaidGet(ld, map); 1240 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1241 for (span = 0; span < raid->spanDepth; span++) { 1242 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 1243 block_span_info.noElements) < 1244 element + 1) 1245 continue; 1246 span_set = &(ldSpanInfo[ld].span_set[element]); 1247 quad = &map->raidMap.ldSpanMap[ld]. 1248 spanBlock[span].block_span_info. 1249 quad[element]; 1250 1251 span_set->diff = le32_to_cpu(quad->diff); 1252 1253 for (count = 0, span_row_width = 0; 1254 count < raid->spanDepth; count++) { 1255 if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. 1256 spanBlock[count]. 1257 block_span_info. 1258 noElements) >= element + 1) { 1259 span_set->strip_offset[count] = 1260 span_row_width; 1261 span_row_width += 1262 MR_LdSpanPtrGet 1263 (ld, count, map)->spanRowDataSize; 1264 } 1265 } 1266 1267 span_set->span_row_data_width = span_row_width; 1268 span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - 1269 le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), 1270 le32_to_cpu(quad->diff)); 1271 1272 if (element == 0) { 1273 span_set->log_start_lba = 0; 1274 span_set->log_end_lba = 1275 ((span_row << raid->stripeShift) 1276 * span_row_width) - 1; 1277 1278 span_set->span_row_start = 0; 1279 span_set->span_row_end = span_row - 1; 1280 1281 span_set->data_strip_start = 0; 1282 span_set->data_strip_end = 1283 (span_row * span_row_width) - 1; 1284 1285 span_set->data_row_start = 0; 1286 span_set->data_row_end = 1287 (span_row * le32_to_cpu(quad->diff)) - 1; 1288 } else { 1289 span_set_prev = &(ldSpanInfo[ld]. 1290 span_set[element - 1]); 1291 span_set->log_start_lba = 1292 span_set_prev->log_end_lba + 1; 1293 span_set->log_end_lba = 1294 span_set->log_start_lba + 1295 ((span_row << raid->stripeShift) 1296 * span_row_width) - 1; 1297 1298 span_set->span_row_start = 1299 span_set_prev->span_row_end + 1; 1300 span_set->span_row_end = 1301 span_set->span_row_start + span_row - 1; 1302 1303 span_set->data_strip_start = 1304 span_set_prev->data_strip_end + 1; 1305 span_set->data_strip_end = 1306 span_set->data_strip_start + 1307 (span_row * span_row_width) - 1; 1308 1309 span_set->data_row_start = 1310 span_set_prev->data_row_end + 1; 1311 span_set->data_row_end = 1312 span_set->data_row_start + 1313 (span_row * le32_to_cpu(quad->diff)) - 1; 1314 } 1315 break; 1316 } 1317 if (span == raid->spanDepth) 1318 break; 1319 } 1320 } 1321 } 1322 1323 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, 1324 struct LD_LOAD_BALANCE_INFO *lbInfo) 1325 { 1326 int ldCount; 1327 u16 ld; 1328 struct MR_LD_RAID *raid; 1329 1330 if (lb_pending_cmds > 128 || lb_pending_cmds < 1) 1331 lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1332 1333 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1334 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1335 if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { 1336 lbInfo[ldCount].loadBalanceFlag = 0; 1337 continue; 1338 } 1339 1340 raid = MR_LdRaidGet(ld, drv_map); 1341 if ((raid->level != 1) || 1342 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1343 lbInfo[ldCount].loadBalanceFlag = 0; 1344 continue; 1345 } 1346 lbInfo[ldCount].loadBalanceFlag = 1; 1347 } 1348 } 1349 1350 u8 megasas_get_best_arm_pd(struct megasas_instance *instance, 1351 struct LD_LOAD_BALANCE_INFO *lbInfo, 1352 struct IO_REQUEST_INFO *io_info, 1353 struct MR_DRV_RAID_MAP_ALL *drv_map) 1354 { 1355 struct MR_LD_RAID *raid; 1356 u16 pd1_dev_handle; 1357 u16 pend0, pend1, ld; 1358 u64 diff0, diff1; 1359 u8 bestArm, pd0, pd1, span, arm; 1360 u32 arRef, span_row_size; 1361 1362 u64 block = io_info->ldStartBlock; 1363 u32 count = io_info->numBlocks; 1364 1365 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1366 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1367 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1368 1369 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1370 raid = MR_LdRaidGet(ld, drv_map); 1371 span_row_size = instance->UnevenSpanSupport ? 1372 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1373 1374 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1375 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1376 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1377 (arm + 1 - span_row_size) : arm + 1, drv_map); 1378 1379 /* Get PD1 Dev Handle */ 1380 1381 pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); 1382 1383 if (pd1_dev_handle == MR_DEVHANDLE_INVALID) { 1384 bestArm = arm; 1385 } else { 1386 /* get the pending cmds for the data and mirror arms */ 1387 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1388 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1389 1390 /* Determine the disk whose head is nearer to the req. block */ 1391 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1392 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1393 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1394 1395 /* Make balance count from 16 to 4 to 1396 * keep driver in sync with Firmware 1397 */ 1398 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || 1399 (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) 1400 bestArm ^= 1; 1401 1402 /* Update the last accessed block on the correct pd */ 1403 io_info->span_arm = 1404 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1405 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1406 } 1407 1408 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; 1409 return io_info->pd_after_lb; 1410 } 1411 1412 __le16 get_updated_dev_handle(struct megasas_instance *instance, 1413 struct LD_LOAD_BALANCE_INFO *lbInfo, 1414 struct IO_REQUEST_INFO *io_info, 1415 struct MR_DRV_RAID_MAP_ALL *drv_map) 1416 { 1417 u8 arm_pd; 1418 __le16 devHandle; 1419 1420 /* get best new arm (PD ID) */ 1421 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map); 1422 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1423 io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map); 1424 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1425 1426 return devHandle; 1427 } 1428