1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2009-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * FILE: megaraid_sas_fp.c 10 * 11 * Authors: Broadcom Inc. 12 * Sumant Patro 13 * Varad Talamacki 14 * Manoj Jose 15 * Kashyap Desai <kashyap.desai@broadcom.com> 16 * Sumit Saxena <sumit.saxena@broadcom.com> 17 * 18 * Send feedback to: megaraidlinux.pdl@broadcom.com 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/list.h> 25 #include <linux/moduleparam.h> 26 #include <linux/module.h> 27 #include <linux/spinlock.h> 28 #include <linux/interrupt.h> 29 #include <linux/delay.h> 30 #include <linux/uio.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/compat.h> 34 #include <linux/blkdev.h> 35 #include <linux/poll.h> 36 #include <linux/irq_poll.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_cmnd.h> 40 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_host.h> 42 43 #include "megaraid_sas_fusion.h" 44 #include "megaraid_sas.h" 45 #include <asm/div64.h> 46 47 #define LB_PENDING_CMDS_DEFAULT 4 48 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 49 module_param(lb_pending_cmds, int, 0444); 50 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " 51 "threshold. Valid Values are 1-128. Default: 4"); 52 53 54 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) 55 #define MR_LD_STATE_OPTIMAL 3 56 57 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) 58 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) 59 #define SPAN_INVALID 0xff 60 61 /* Prototypes */ 62 static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, 63 PLD_SPAN_INFO ldSpanInfo); 64 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, 65 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, 66 struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); 67 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, 68 u64 strip, struct MR_DRV_RAID_MAP_ALL *map); 69 70 u32 mega_mod64(u64 dividend, u32 divisor) 71 { 72 u64 d; 73 u32 remainder; 74 75 if (!divisor) 76 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n"); 77 d = dividend; 78 remainder = do_div(d, divisor); 79 return remainder; 80 } 81 82 /** 83 * @param dividend : Dividend 84 * @param divisor : Divisor 85 * 86 * @return quotient 87 **/ 88 static u64 mega_div64_32(uint64_t dividend, uint32_t divisor) 89 { 90 u32 remainder; 91 u64 d; 92 93 if (!divisor) 94 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n"); 95 96 d = dividend; 97 remainder = do_div(d, divisor); 98 99 return d; 100 } 101 102 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) 103 { 104 return &map->raidMap.ldSpanMap[ld].ldRaid; 105 } 106 107 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, 108 struct MR_DRV_RAID_MAP_ALL 109 *map) 110 { 111 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 112 } 113 114 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) 115 { 116 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 117 } 118 119 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) 120 { 121 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); 122 } 123 124 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) 125 { 126 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 127 } 128 129 __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) 130 { 131 return map->raidMap.devHndlInfo[pd].curDevHdl; 132 } 133 134 static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) 135 { 136 return map->raidMap.devHndlInfo[pd].interfaceType; 137 } 138 139 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) 140 { 141 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 142 } 143 144 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) 145 { 146 return map->raidMap.ldTgtIdToLd[ldTgtId]; 147 } 148 149 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, 150 struct MR_DRV_RAID_MAP_ALL *map) 151 { 152 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 153 } 154 155 /* 156 * This function will Populate Driver Map using firmware raid map 157 */ 158 static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id) 159 { 160 struct fusion_context *fusion = instance->ctrl_context; 161 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 162 struct MR_FW_RAID_MAP *pFwRaidMap = NULL; 163 int i, j; 164 u16 ld_count; 165 struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; 166 struct MR_FW_RAID_MAP_EXT *fw_map_ext; 167 struct MR_RAID_MAP_DESC_TABLE *desc_table; 168 169 170 struct MR_DRV_RAID_MAP_ALL *drv_map = 171 fusion->ld_drv_map[(map_id & 1)]; 172 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 173 void *raid_map_data = NULL; 174 175 memset(drv_map, 0, fusion->drv_map_sz); 176 memset(pDrvRaidMap->ldTgtIdToLd, 177 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); 178 179 if (instance->max_raid_mapsize) { 180 fw_map_dyn = fusion->ld_map[(map_id & 1)]; 181 desc_table = 182 (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); 183 if (desc_table != fw_map_dyn->raid_map_desc_table) 184 dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n", 185 desc_table, fw_map_dyn->raid_map_desc_table); 186 187 ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count); 188 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 189 pDrvRaidMap->fpPdIoTimeoutSec = 190 fw_map_dyn->fp_pd_io_timeout_sec; 191 pDrvRaidMap->totalSize = 192 cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL)); 193 /* point to actual data starting point*/ 194 raid_map_data = (void *)fw_map_dyn + 195 le32_to_cpu(fw_map_dyn->desc_table_offset) + 196 le32_to_cpu(fw_map_dyn->desc_table_size); 197 198 for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { 199 switch (le32_to_cpu(desc_table->raid_map_desc_type)) { 200 case RAID_MAP_DESC_TYPE_DEVHDL_INFO: 201 fw_map_dyn->dev_hndl_info = 202 (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); 203 memcpy(pDrvRaidMap->devHndlInfo, 204 fw_map_dyn->dev_hndl_info, 205 sizeof(struct MR_DEV_HANDLE_INFO) * 206 le32_to_cpu(desc_table->raid_map_desc_elements)); 207 break; 208 case RAID_MAP_DESC_TYPE_TGTID_INFO: 209 fw_map_dyn->ld_tgt_id_to_ld = 210 (u16 *)(raid_map_data + 211 le32_to_cpu(desc_table->raid_map_desc_offset)); 212 for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { 213 pDrvRaidMap->ldTgtIdToLd[j] = 214 le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); 215 } 216 break; 217 case RAID_MAP_DESC_TYPE_ARRAY_INFO: 218 fw_map_dyn->ar_map_info = 219 (struct MR_ARRAY_INFO *) 220 (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); 221 memcpy(pDrvRaidMap->arMapInfo, 222 fw_map_dyn->ar_map_info, 223 sizeof(struct MR_ARRAY_INFO) * 224 le32_to_cpu(desc_table->raid_map_desc_elements)); 225 break; 226 case RAID_MAP_DESC_TYPE_SPAN_INFO: 227 fw_map_dyn->ld_span_map = 228 (struct MR_LD_SPAN_MAP *) 229 (raid_map_data + 230 le32_to_cpu(desc_table->raid_map_desc_offset)); 231 memcpy(pDrvRaidMap->ldSpanMap, 232 fw_map_dyn->ld_span_map, 233 sizeof(struct MR_LD_SPAN_MAP) * 234 le32_to_cpu(desc_table->raid_map_desc_elements)); 235 break; 236 default: 237 dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", 238 fw_map_dyn->desc_table_num_elements); 239 } 240 ++desc_table; 241 } 242 243 } else if (instance->supportmax256vd) { 244 fw_map_ext = 245 (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)]; 246 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); 247 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 248 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); 249 return 1; 250 } 251 252 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 253 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; 254 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) 255 pDrvRaidMap->ldTgtIdToLd[i] = 256 (u16)fw_map_ext->ldTgtIdToLd[i]; 257 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, 258 sizeof(struct MR_LD_SPAN_MAP) * ld_count); 259 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, 260 sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); 261 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, 262 sizeof(struct MR_DEV_HANDLE_INFO) * 263 MAX_RAIDMAP_PHYSICAL_DEVICES); 264 265 /* New Raid map will not set totalSize, so keep expected value 266 * for legacy code in ValidateMapInfo 267 */ 268 pDrvRaidMap->totalSize = 269 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); 270 } else { 271 fw_map_old = (struct MR_FW_RAID_MAP_ALL *) 272 fusion->ld_map[(map_id & 1)]; 273 pFwRaidMap = &fw_map_old->raidMap; 274 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); 275 if (ld_count > MAX_LOGICAL_DRIVES) { 276 dev_dbg(&instance->pdev->dev, 277 "LD count exposed in RAID map in not valid\n"); 278 return 1; 279 } 280 281 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 282 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 283 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; 284 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) 285 pDrvRaidMap->ldTgtIdToLd[i] = 286 (u8)pFwRaidMap->ldTgtIdToLd[i]; 287 for (i = 0; i < ld_count; i++) { 288 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; 289 } 290 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 291 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 292 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 293 sizeof(struct MR_DEV_HANDLE_INFO) * 294 MAX_RAIDMAP_PHYSICAL_DEVICES); 295 } 296 297 return 0; 298 } 299 300 /* 301 * This function will validate Map info data provided by FW 302 */ 303 u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) 304 { 305 struct fusion_context *fusion; 306 struct MR_DRV_RAID_MAP_ALL *drv_map; 307 struct MR_DRV_RAID_MAP *pDrvRaidMap; 308 struct LD_LOAD_BALANCE_INFO *lbInfo; 309 PLD_SPAN_INFO ldSpanInfo; 310 struct MR_LD_RAID *raid; 311 u16 num_lds, i; 312 u16 ld; 313 u32 expected_size; 314 315 if (MR_PopulateDrvRaidMap(instance, map_id)) 316 return 0; 317 318 fusion = instance->ctrl_context; 319 drv_map = fusion->ld_drv_map[(map_id & 1)]; 320 pDrvRaidMap = &drv_map->raidMap; 321 322 lbInfo = fusion->load_balance_info; 323 ldSpanInfo = fusion->log_to_span; 324 325 if (instance->max_raid_mapsize) 326 expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL); 327 else if (instance->supportmax256vd) 328 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); 329 else 330 expected_size = 331 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + 332 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); 333 334 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { 335 dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", 336 le32_to_cpu(pDrvRaidMap->totalSize)); 337 dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", 338 (unsigned int)expected_size); 339 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", 340 (unsigned int)sizeof(struct MR_LD_SPAN_MAP), 341 le32_to_cpu(pDrvRaidMap->totalSize)); 342 return 0; 343 } 344 345 if (instance->UnevenSpanSupport) 346 mr_update_span_set(drv_map, ldSpanInfo); 347 348 if (lbInfo) 349 mr_update_load_balance_params(drv_map, lbInfo); 350 351 num_lds = le16_to_cpu(drv_map->raidMap.ldCount); 352 353 /*Convert Raid capability values to CPU arch */ 354 for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) { 355 ld = MR_TargetIdToLdGet(i, drv_map); 356 357 /* For non existing VDs, iterate to next VD*/ 358 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) 359 continue; 360 361 raid = MR_LdRaidGet(ld, drv_map); 362 le32_to_cpus((u32 *)&raid->capability); 363 364 num_lds--; 365 } 366 367 return 1; 368 } 369 370 static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, 371 struct MR_DRV_RAID_MAP_ALL *map) 372 { 373 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 374 struct MR_QUAD_ELEMENT *quad; 375 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 376 u32 span, j; 377 378 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 379 380 for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { 381 quad = &pSpanBlock->block_span_info.quad[j]; 382 383 if (le32_to_cpu(quad->diff) == 0) 384 return SPAN_INVALID; 385 if (le64_to_cpu(quad->logStart) <= row && row <= 386 le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), 387 le32_to_cpu(quad->diff))) == 0) { 388 if (span_blk != NULL) { 389 u64 blk; 390 blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); 391 392 blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; 393 *span_blk = blk; 394 } 395 return span; 396 } 397 } 398 } 399 return SPAN_INVALID; 400 } 401 402 /* 403 ****************************************************************************** 404 * 405 * This routine calculates the Span block for given row using spanset. 406 * 407 * Inputs : 408 * instance - HBA instance 409 * ld - Logical drive number 410 * row - Row number 411 * map - LD map 412 * 413 * Outputs : 414 * 415 * span - Span number 416 * block - Absolute Block number in the physical disk 417 * div_error - Devide error code. 418 */ 419 420 static u32 mr_spanset_get_span_block(struct megasas_instance *instance, 421 u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) 422 { 423 struct fusion_context *fusion = instance->ctrl_context; 424 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 425 LD_SPAN_SET *span_set; 426 struct MR_QUAD_ELEMENT *quad; 427 u32 span, info; 428 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 429 430 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 431 span_set = &(ldSpanInfo[ld].span_set[info]); 432 433 if (span_set->span_row_data_width == 0) 434 break; 435 436 if (row > span_set->data_row_end) 437 continue; 438 439 for (span = 0; span < raid->spanDepth; span++) 440 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 441 block_span_info.noElements) >= info+1) { 442 quad = &map->raidMap.ldSpanMap[ld]. 443 spanBlock[span]. 444 block_span_info.quad[info]; 445 if (le32_to_cpu(quad->diff) == 0) 446 return SPAN_INVALID; 447 if (le64_to_cpu(quad->logStart) <= row && 448 row <= le64_to_cpu(quad->logEnd) && 449 (mega_mod64(row - le64_to_cpu(quad->logStart), 450 le32_to_cpu(quad->diff))) == 0) { 451 if (span_blk != NULL) { 452 u64 blk; 453 blk = mega_div64_32 454 ((row - le64_to_cpu(quad->logStart)), 455 le32_to_cpu(quad->diff)); 456 blk = (blk + le64_to_cpu(quad->offsetInSpan)) 457 << raid->stripeShift; 458 *span_blk = blk; 459 } 460 return span; 461 } 462 } 463 } 464 return SPAN_INVALID; 465 } 466 467 /* 468 ****************************************************************************** 469 * 470 * This routine calculates the row for given strip using spanset. 471 * 472 * Inputs : 473 * instance - HBA instance 474 * ld - Logical drive number 475 * Strip - Strip 476 * map - LD map 477 * 478 * Outputs : 479 * 480 * row - row associated with strip 481 */ 482 483 static u64 get_row_from_strip(struct megasas_instance *instance, 484 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) 485 { 486 struct fusion_context *fusion = instance->ctrl_context; 487 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 488 LD_SPAN_SET *span_set; 489 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 490 u32 info, strip_offset, span, span_offset; 491 u64 span_set_Strip, span_set_Row, retval; 492 493 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 494 span_set = &(ldSpanInfo[ld].span_set[info]); 495 496 if (span_set->span_row_data_width == 0) 497 break; 498 if (strip > span_set->data_strip_end) 499 continue; 500 501 span_set_Strip = strip - span_set->data_strip_start; 502 strip_offset = mega_mod64(span_set_Strip, 503 span_set->span_row_data_width); 504 span_set_Row = mega_div64_32(span_set_Strip, 505 span_set->span_row_data_width) * span_set->diff; 506 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 507 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 508 block_span_info.noElements) >= info+1) { 509 if (strip_offset >= 510 span_set->strip_offset[span]) 511 span_offset++; 512 else 513 break; 514 } 515 516 retval = (span_set->data_row_start + span_set_Row + 517 (span_offset - 1)); 518 return retval; 519 } 520 return -1LLU; 521 } 522 523 524 /* 525 ****************************************************************************** 526 * 527 * This routine calculates the Start Strip for given row using spanset. 528 * 529 * Inputs : 530 * instance - HBA instance 531 * ld - Logical drive number 532 * row - Row number 533 * map - LD map 534 * 535 * Outputs : 536 * 537 * Strip - Start strip associated with row 538 */ 539 540 static u64 get_strip_from_row(struct megasas_instance *instance, 541 u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) 542 { 543 struct fusion_context *fusion = instance->ctrl_context; 544 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 545 LD_SPAN_SET *span_set; 546 struct MR_QUAD_ELEMENT *quad; 547 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 548 u32 span, info; 549 u64 strip; 550 551 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 552 span_set = &(ldSpanInfo[ld].span_set[info]); 553 554 if (span_set->span_row_data_width == 0) 555 break; 556 if (row > span_set->data_row_end) 557 continue; 558 559 for (span = 0; span < raid->spanDepth; span++) 560 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 561 block_span_info.noElements) >= info+1) { 562 quad = &map->raidMap.ldSpanMap[ld]. 563 spanBlock[span].block_span_info.quad[info]; 564 if (le64_to_cpu(quad->logStart) <= row && 565 row <= le64_to_cpu(quad->logEnd) && 566 mega_mod64((row - le64_to_cpu(quad->logStart)), 567 le32_to_cpu(quad->diff)) == 0) { 568 strip = mega_div64_32 569 (((row - span_set->data_row_start) 570 - le64_to_cpu(quad->logStart)), 571 le32_to_cpu(quad->diff)); 572 strip *= span_set->span_row_data_width; 573 strip += span_set->data_strip_start; 574 strip += span_set->strip_offset[span]; 575 return strip; 576 } 577 } 578 } 579 dev_err(&instance->pdev->dev, "get_strip_from_row" 580 "returns invalid strip for ld=%x, row=%lx\n", 581 ld, (long unsigned int)row); 582 return -1; 583 } 584 585 /* 586 ****************************************************************************** 587 * 588 * This routine calculates the Physical Arm for given strip using spanset. 589 * 590 * Inputs : 591 * instance - HBA instance 592 * ld - Logical drive number 593 * strip - Strip 594 * map - LD map 595 * 596 * Outputs : 597 * 598 * Phys Arm - Phys Arm associated with strip 599 */ 600 601 static u32 get_arm_from_strip(struct megasas_instance *instance, 602 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) 603 { 604 struct fusion_context *fusion = instance->ctrl_context; 605 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 606 LD_SPAN_SET *span_set; 607 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 608 u32 info, strip_offset, span, span_offset, retval; 609 610 for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { 611 span_set = &(ldSpanInfo[ld].span_set[info]); 612 613 if (span_set->span_row_data_width == 0) 614 break; 615 if (strip > span_set->data_strip_end) 616 continue; 617 618 strip_offset = (uint)mega_mod64 619 ((strip - span_set->data_strip_start), 620 span_set->span_row_data_width); 621 622 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 623 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 624 block_span_info.noElements) >= info+1) { 625 if (strip_offset >= 626 span_set->strip_offset[span]) 627 span_offset = 628 span_set->strip_offset[span]; 629 else 630 break; 631 } 632 633 retval = (strip_offset - span_offset); 634 return retval; 635 } 636 637 dev_err(&instance->pdev->dev, "get_arm_from_strip" 638 "returns invalid arm for ld=%x strip=%lx\n", 639 ld, (long unsigned int)strip); 640 641 return -1; 642 } 643 644 /* This Function will return Phys arm */ 645 static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, 646 struct MR_DRV_RAID_MAP_ALL *map) 647 { 648 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 649 /* Need to check correct default value */ 650 u32 arm = 0; 651 652 switch (raid->level) { 653 case 0: 654 case 5: 655 case 6: 656 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 657 break; 658 case 1: 659 /* start with logical arm */ 660 arm = get_arm_from_strip(instance, ld, stripe, map); 661 if (arm != -1U) 662 arm *= 2; 663 break; 664 } 665 666 return arm; 667 } 668 669 670 /* 671 ****************************************************************************** 672 * 673 * This routine calculates the arm, span and block for the specified stripe and 674 * reference in stripe using spanset 675 * 676 * Inputs : 677 * 678 * ld - Logical drive number 679 * stripRow - Stripe number 680 * stripRef - Reference in stripe 681 * 682 * Outputs : 683 * 684 * span - Span number 685 * block - Absolute Block number in the physical disk 686 */ 687 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, 688 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, 689 struct RAID_CONTEXT *pRAID_Context, 690 struct MR_DRV_RAID_MAP_ALL *map) 691 { 692 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 693 u32 pd, arRef, r1_alt_pd; 694 u8 physArm, span; 695 u64 row; 696 u8 retval = true; 697 u64 *pdBlock = &io_info->pdBlock; 698 __le16 *pDevHandle = &io_info->devHandle; 699 u8 *pPdInterface = &io_info->pd_interface; 700 u32 logArm, rowMod, armQ, arm; 701 702 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); 703 704 /*Get row and span from io_info for Uneven Span IO.*/ 705 row = io_info->start_row; 706 span = io_info->start_span; 707 708 709 if (raid->level == 6) { 710 logArm = get_arm_from_strip(instance, ld, stripRow, map); 711 if (logArm == -1U) 712 return false; 713 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 714 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 715 arm = armQ + 1 + logArm; 716 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 717 arm -= SPAN_ROW_SIZE(map, ld, span); 718 physArm = (u8)arm; 719 } else 720 /* Calculate the arm */ 721 physArm = get_arm(instance, ld, span, stripRow, map); 722 if (physArm == 0xFF) 723 return false; 724 725 arRef = MR_LdSpanArrayGet(ld, span, map); 726 pd = MR_ArPdGet(arRef, physArm, map); 727 728 if (pd != MR_PD_INVALID) { 729 *pDevHandle = MR_PdDevHandleGet(pd, map); 730 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 731 /* get second pd also for raid 1/10 fast path writes*/ 732 if ((instance->adapter_type >= VENTURA_SERIES) && 733 (raid->level == 1) && 734 !io_info->isRead) { 735 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 736 if (r1_alt_pd != MR_PD_INVALID) 737 io_info->r1_alt_dev_handle = 738 MR_PdDevHandleGet(r1_alt_pd, map); 739 } 740 } else { 741 if ((raid->level >= 5) && 742 ((instance->adapter_type == THUNDERBOLT_SERIES) || 743 ((instance->adapter_type == INVADER_SERIES) && 744 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 745 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; 746 else if (raid->level == 1) { 747 physArm = physArm + 1; 748 pd = MR_ArPdGet(arRef, physArm, map); 749 if (pd != MR_PD_INVALID) { 750 *pDevHandle = MR_PdDevHandleGet(pd, map); 751 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 752 } 753 } 754 } 755 756 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 757 if (instance->adapter_type >= VENTURA_SERIES) { 758 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 759 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 760 io_info->span_arm = 761 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 762 } else { 763 pRAID_Context->span_arm = 764 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 765 io_info->span_arm = pRAID_Context->span_arm; 766 } 767 io_info->pd_after_lb = pd; 768 return retval; 769 } 770 771 /* 772 ****************************************************************************** 773 * 774 * This routine calculates the arm, span and block for the specified stripe and 775 * reference in stripe. 776 * 777 * Inputs : 778 * 779 * ld - Logical drive number 780 * stripRow - Stripe number 781 * stripRef - Reference in stripe 782 * 783 * Outputs : 784 * 785 * span - Span number 786 * block - Absolute Block number in the physical disk 787 */ 788 static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, 789 u16 stripRef, struct IO_REQUEST_INFO *io_info, 790 struct RAID_CONTEXT *pRAID_Context, 791 struct MR_DRV_RAID_MAP_ALL *map) 792 { 793 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 794 u32 pd, arRef, r1_alt_pd; 795 u8 physArm, span; 796 u64 row; 797 u8 retval = true; 798 u64 *pdBlock = &io_info->pdBlock; 799 __le16 *pDevHandle = &io_info->devHandle; 800 u8 *pPdInterface = &io_info->pd_interface; 801 802 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); 803 804 row = mega_div64_32(stripRow, raid->rowDataSize); 805 806 if (raid->level == 6) { 807 /* logical arm within row */ 808 u32 logArm = mega_mod64(stripRow, raid->rowDataSize); 809 u32 rowMod, armQ, arm; 810 811 if (raid->rowSize == 0) 812 return false; 813 /* get logical row mod */ 814 rowMod = mega_mod64(row, raid->rowSize); 815 armQ = raid->rowSize-1-rowMod; /* index of Q drive */ 816 arm = armQ+1+logArm; /* data always logically follows Q */ 817 if (arm >= raid->rowSize) /* handle wrap condition */ 818 arm -= raid->rowSize; 819 physArm = (u8)arm; 820 } else { 821 if (raid->modFactor == 0) 822 return false; 823 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, 824 raid->modFactor), 825 map); 826 } 827 828 if (raid->spanDepth == 1) { 829 span = 0; 830 *pdBlock = row << raid->stripeShift; 831 } else { 832 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); 833 if (span == SPAN_INVALID) 834 return false; 835 } 836 837 /* Get the array on which this span is present */ 838 arRef = MR_LdSpanArrayGet(ld, span, map); 839 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ 840 841 if (pd != MR_PD_INVALID) { 842 /* Get dev handle from Pd. */ 843 *pDevHandle = MR_PdDevHandleGet(pd, map); 844 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 845 /* get second pd also for raid 1/10 fast path writes*/ 846 if ((instance->adapter_type >= VENTURA_SERIES) && 847 (raid->level == 1) && 848 !io_info->isRead) { 849 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 850 if (r1_alt_pd != MR_PD_INVALID) 851 io_info->r1_alt_dev_handle = 852 MR_PdDevHandleGet(r1_alt_pd, map); 853 } 854 } else { 855 if ((raid->level >= 5) && 856 ((instance->adapter_type == THUNDERBOLT_SERIES) || 857 ((instance->adapter_type == INVADER_SERIES) && 858 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 859 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; 860 else if (raid->level == 1) { 861 /* Get alternate Pd. */ 862 physArm = physArm + 1; 863 pd = MR_ArPdGet(arRef, physArm, map); 864 if (pd != MR_PD_INVALID) { 865 /* Get dev handle from Pd */ 866 *pDevHandle = MR_PdDevHandleGet(pd, map); 867 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 868 } 869 } 870 } 871 872 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 873 if (instance->adapter_type >= VENTURA_SERIES) { 874 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 875 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 876 io_info->span_arm = 877 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 878 } else { 879 pRAID_Context->span_arm = 880 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 881 io_info->span_arm = pRAID_Context->span_arm; 882 } 883 io_info->pd_after_lb = pd; 884 return retval; 885 } 886 887 /* 888 * mr_get_phy_params_r56_rmw - Calculate parameters for R56 CTIO write operation 889 * @instance: Adapter soft state 890 * @ld: LD index 891 * @stripNo: Strip Number 892 * @io_info: IO info structure pointer 893 * pRAID_Context: RAID context pointer 894 * map: RAID map pointer 895 * 896 * This routine calculates the logical arm, data Arm, row number and parity arm 897 * for R56 CTIO write operation. 898 */ 899 static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance, 900 u32 ld, u64 stripNo, 901 struct IO_REQUEST_INFO *io_info, 902 struct RAID_CONTEXT_G35 *pRAID_Context, 903 struct MR_DRV_RAID_MAP_ALL *map) 904 { 905 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 906 u8 span, dataArms, arms, dataArm, logArm; 907 s8 rightmostParityArm, PParityArm; 908 u64 rowNum; 909 u64 *pdBlock = &io_info->pdBlock; 910 911 dataArms = raid->rowDataSize; 912 arms = raid->rowSize; 913 914 rowNum = mega_div64_32(stripNo, dataArms); 915 /* parity disk arm, first arm is 0 */ 916 rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms); 917 918 /* logical arm within row */ 919 logArm = mega_mod64(stripNo, dataArms); 920 /* physical arm for data */ 921 dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms); 922 923 if (raid->spanDepth == 1) { 924 span = 0; 925 } else { 926 span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map); 927 if (span == SPAN_INVALID) 928 return; 929 } 930 931 if (raid->level == 6) { 932 /* P Parity arm, note this can go negative adjust if negative */ 933 PParityArm = (arms - 2) - mega_mod64(rowNum, arms); 934 935 if (PParityArm < 0) 936 PParityArm += arms; 937 938 /* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */ 939 pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm; 940 pRAID_Context->flow_specific.r56_arm_map |= 941 (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT); 942 } else { 943 pRAID_Context->flow_specific.r56_arm_map |= 944 (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT); 945 } 946 947 pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum); 948 pRAID_Context->flow_specific.r56_arm_map |= 949 (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT); 950 cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map); 951 pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm; 952 pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD << 953 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 954 955 return; 956 } 957 958 /* 959 ****************************************************************************** 960 * 961 * MR_BuildRaidContext function 962 * 963 * This function will initiate command processing. The start/end row and strip 964 * information is calculated then the lock is acquired. 965 * This function will return 0 if region lock was acquired OR return num strips 966 */ 967 u8 968 MR_BuildRaidContext(struct megasas_instance *instance, 969 struct IO_REQUEST_INFO *io_info, 970 struct RAID_CONTEXT *pRAID_Context, 971 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) 972 { 973 struct fusion_context *fusion; 974 struct MR_LD_RAID *raid; 975 u32 stripSize, stripe_mask; 976 u64 endLba, endStrip, endRow, start_row, start_strip; 977 u64 regStart; 978 u32 regSize; 979 u8 num_strips, numRows; 980 u16 ref_in_start_stripe, ref_in_end_stripe; 981 u64 ldStartBlock; 982 u32 numBlocks, ldTgtId; 983 u8 isRead; 984 u8 retval = 0; 985 u8 startlba_span = SPAN_INVALID; 986 u64 *pdBlock = &io_info->pdBlock; 987 u16 ld; 988 989 ldStartBlock = io_info->ldStartBlock; 990 numBlocks = io_info->numBlocks; 991 ldTgtId = io_info->ldTgtId; 992 isRead = io_info->isRead; 993 io_info->IoforUnevenSpan = 0; 994 io_info->start_span = SPAN_INVALID; 995 fusion = instance->ctrl_context; 996 997 ld = MR_TargetIdToLdGet(ldTgtId, map); 998 raid = MR_LdRaidGet(ld, map); 999 /*check read ahead bit*/ 1000 io_info->ra_capable = raid->capability.ra_capable; 1001 1002 /* 1003 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero 1004 * return FALSE 1005 */ 1006 if (raid->rowDataSize == 0) { 1007 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 1008 return false; 1009 else if (instance->UnevenSpanSupport) { 1010 io_info->IoforUnevenSpan = 1; 1011 } else { 1012 dev_info(&instance->pdev->dev, 1013 "raid->rowDataSize is 0, but has SPAN[0]" 1014 "rowDataSize = 0x%0x," 1015 "but there is _NO_ UnevenSpanSupport\n", 1016 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 1017 return false; 1018 } 1019 } 1020 1021 stripSize = 1 << raid->stripeShift; 1022 stripe_mask = stripSize-1; 1023 1024 io_info->data_arms = raid->rowDataSize; 1025 1026 /* 1027 * calculate starting row and stripe, and number of strips and rows 1028 */ 1029 start_strip = ldStartBlock >> raid->stripeShift; 1030 ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); 1031 endLba = ldStartBlock + numBlocks - 1; 1032 ref_in_end_stripe = (u16)(endLba & stripe_mask); 1033 endStrip = endLba >> raid->stripeShift; 1034 num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ 1035 1036 if (io_info->IoforUnevenSpan) { 1037 start_row = get_row_from_strip(instance, ld, start_strip, map); 1038 endRow = get_row_from_strip(instance, ld, endStrip, map); 1039 if (start_row == -1ULL || endRow == -1ULL) { 1040 dev_info(&instance->pdev->dev, "return from %s %d." 1041 "Send IO w/o region lock.\n", 1042 __func__, __LINE__); 1043 return false; 1044 } 1045 1046 if (raid->spanDepth == 1) { 1047 startlba_span = 0; 1048 *pdBlock = start_row << raid->stripeShift; 1049 } else 1050 startlba_span = (u8)mr_spanset_get_span_block(instance, 1051 ld, start_row, pdBlock, map); 1052 if (startlba_span == SPAN_INVALID) { 1053 dev_info(&instance->pdev->dev, "return from %s %d" 1054 "for row 0x%llx,start strip %llx" 1055 "endSrip %llx\n", __func__, __LINE__, 1056 (unsigned long long)start_row, 1057 (unsigned long long)start_strip, 1058 (unsigned long long)endStrip); 1059 return false; 1060 } 1061 io_info->start_span = startlba_span; 1062 io_info->start_row = start_row; 1063 } else { 1064 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1065 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1066 } 1067 numRows = (u8)(endRow - start_row + 1); 1068 1069 /* 1070 * calculate region info. 1071 */ 1072 1073 /* assume region is at the start of the first row */ 1074 regStart = start_row << raid->stripeShift; 1075 /* assume this IO needs the full row - we'll adjust if not true */ 1076 regSize = stripSize; 1077 1078 io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock; 1079 1080 /* Check if we can send this I/O via FastPath */ 1081 if (raid->capability.fpCapable) { 1082 if (isRead) 1083 io_info->fpOkForIo = (raid->capability.fpReadCapable && 1084 ((num_strips == 1) || 1085 raid->capability. 1086 fpReadAcrossStripe)); 1087 else 1088 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 1089 ((num_strips == 1) || 1090 raid->capability. 1091 fpWriteAcrossStripe)); 1092 } else 1093 io_info->fpOkForIo = false; 1094 1095 if (numRows == 1) { 1096 /* single-strip IOs can always lock only the data needed */ 1097 if (num_strips == 1) { 1098 regStart += ref_in_start_stripe; 1099 regSize = numBlocks; 1100 } 1101 /* multi-strip IOs always need to full stripe locked */ 1102 } else if (io_info->IoforUnevenSpan == 0) { 1103 /* 1104 * For Even span region lock optimization. 1105 * If the start strip is the last in the start row 1106 */ 1107 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 1108 regStart += ref_in_start_stripe; 1109 /* initialize count to sectors from startref to end 1110 of strip */ 1111 regSize = stripSize - ref_in_start_stripe; 1112 } 1113 1114 /* add complete rows in the middle of the transfer */ 1115 if (numRows > 2) 1116 regSize += (numRows-2) << raid->stripeShift; 1117 1118 /* if IO ends within first strip of last row*/ 1119 if (endStrip == endRow*raid->rowDataSize) 1120 regSize += ref_in_end_stripe+1; 1121 else 1122 regSize += stripSize; 1123 } else { 1124 /* 1125 * For Uneven span region lock optimization. 1126 * If the start strip is the last in the start row 1127 */ 1128 if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + 1129 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 1130 regStart += ref_in_start_stripe; 1131 /* initialize count to sectors from 1132 * startRef to end of strip 1133 */ 1134 regSize = stripSize - ref_in_start_stripe; 1135 } 1136 /* Add complete rows in the middle of the transfer*/ 1137 1138 if (numRows > 2) 1139 /* Add complete rows in the middle of the transfer*/ 1140 regSize += (numRows-2) << raid->stripeShift; 1141 1142 /* if IO ends within first strip of last row */ 1143 if (endStrip == get_strip_from_row(instance, ld, endRow, map)) 1144 regSize += ref_in_end_stripe + 1; 1145 else 1146 regSize += stripSize; 1147 } 1148 1149 pRAID_Context->timeout_value = 1150 cpu_to_le16(raid->fpIoTimeoutForLd ? 1151 raid->fpIoTimeoutForLd : 1152 map->raidMap.fpPdIoTimeoutSec); 1153 if (instance->adapter_type == INVADER_SERIES) 1154 pRAID_Context->reg_lock_flags = (isRead) ? 1155 raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1156 else if (instance->adapter_type == THUNDERBOLT_SERIES) 1157 pRAID_Context->reg_lock_flags = (isRead) ? 1158 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1159 pRAID_Context->virtual_disk_tgt_id = raid->targetId; 1160 pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart); 1161 pRAID_Context->reg_lock_length = cpu_to_le32(regSize); 1162 pRAID_Context->config_seq_num = raid->seqNum; 1163 /* save pointer to raid->LUN array */ 1164 *raidLUN = raid->LUN; 1165 1166 /* Aero R5/6 Division Offload for WRITE */ 1167 if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) { 1168 mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info, 1169 (struct RAID_CONTEXT_G35 *)pRAID_Context, 1170 map); 1171 return true; 1172 } 1173 1174 /*Get Phy Params only if FP capable, or else leave it to MR firmware 1175 to do the calculation.*/ 1176 if (io_info->fpOkForIo) { 1177 retval = io_info->IoforUnevenSpan ? 1178 mr_spanset_get_phy_params(instance, ld, 1179 start_strip, ref_in_start_stripe, 1180 io_info, pRAID_Context, map) : 1181 MR_GetPhyParams(instance, ld, start_strip, 1182 ref_in_start_stripe, io_info, 1183 pRAID_Context, map); 1184 /* If IO on an invalid Pd, then FP is not possible.*/ 1185 if (io_info->devHandle == MR_DEVHANDLE_INVALID) 1186 io_info->fpOkForIo = false; 1187 return retval; 1188 } else if (isRead) { 1189 uint stripIdx; 1190 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 1191 retval = io_info->IoforUnevenSpan ? 1192 mr_spanset_get_phy_params(instance, ld, 1193 start_strip + stripIdx, 1194 ref_in_start_stripe, io_info, 1195 pRAID_Context, map) : 1196 MR_GetPhyParams(instance, ld, 1197 start_strip + stripIdx, ref_in_start_stripe, 1198 io_info, pRAID_Context, map); 1199 if (!retval) 1200 return true; 1201 } 1202 } 1203 return true; 1204 } 1205 1206 /* 1207 ****************************************************************************** 1208 * 1209 * This routine pepare spanset info from Valid Raid map and store it into 1210 * local copy of ldSpanInfo per instance data structure. 1211 * 1212 * Inputs : 1213 * map - LD map 1214 * ldSpanInfo - ldSpanInfo per HBA instance 1215 * 1216 */ 1217 void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, 1218 PLD_SPAN_INFO ldSpanInfo) 1219 { 1220 u8 span, count; 1221 u32 element, span_row_width; 1222 u64 span_row; 1223 struct MR_LD_RAID *raid; 1224 LD_SPAN_SET *span_set, *span_set_prev; 1225 struct MR_QUAD_ELEMENT *quad; 1226 int ldCount; 1227 u16 ld; 1228 1229 1230 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1231 ld = MR_TargetIdToLdGet(ldCount, map); 1232 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) 1233 continue; 1234 raid = MR_LdRaidGet(ld, map); 1235 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1236 for (span = 0; span < raid->spanDepth; span++) { 1237 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 1238 block_span_info.noElements) < 1239 element + 1) 1240 continue; 1241 span_set = &(ldSpanInfo[ld].span_set[element]); 1242 quad = &map->raidMap.ldSpanMap[ld]. 1243 spanBlock[span].block_span_info. 1244 quad[element]; 1245 1246 span_set->diff = le32_to_cpu(quad->diff); 1247 1248 for (count = 0, span_row_width = 0; 1249 count < raid->spanDepth; count++) { 1250 if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. 1251 spanBlock[count]. 1252 block_span_info. 1253 noElements) >= element + 1) { 1254 span_set->strip_offset[count] = 1255 span_row_width; 1256 span_row_width += 1257 MR_LdSpanPtrGet 1258 (ld, count, map)->spanRowDataSize; 1259 } 1260 } 1261 1262 span_set->span_row_data_width = span_row_width; 1263 span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - 1264 le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), 1265 le32_to_cpu(quad->diff)); 1266 1267 if (element == 0) { 1268 span_set->log_start_lba = 0; 1269 span_set->log_end_lba = 1270 ((span_row << raid->stripeShift) 1271 * span_row_width) - 1; 1272 1273 span_set->span_row_start = 0; 1274 span_set->span_row_end = span_row - 1; 1275 1276 span_set->data_strip_start = 0; 1277 span_set->data_strip_end = 1278 (span_row * span_row_width) - 1; 1279 1280 span_set->data_row_start = 0; 1281 span_set->data_row_end = 1282 (span_row * le32_to_cpu(quad->diff)) - 1; 1283 } else { 1284 span_set_prev = &(ldSpanInfo[ld]. 1285 span_set[element - 1]); 1286 span_set->log_start_lba = 1287 span_set_prev->log_end_lba + 1; 1288 span_set->log_end_lba = 1289 span_set->log_start_lba + 1290 ((span_row << raid->stripeShift) 1291 * span_row_width) - 1; 1292 1293 span_set->span_row_start = 1294 span_set_prev->span_row_end + 1; 1295 span_set->span_row_end = 1296 span_set->span_row_start + span_row - 1; 1297 1298 span_set->data_strip_start = 1299 span_set_prev->data_strip_end + 1; 1300 span_set->data_strip_end = 1301 span_set->data_strip_start + 1302 (span_row * span_row_width) - 1; 1303 1304 span_set->data_row_start = 1305 span_set_prev->data_row_end + 1; 1306 span_set->data_row_end = 1307 span_set->data_row_start + 1308 (span_row * le32_to_cpu(quad->diff)) - 1; 1309 } 1310 break; 1311 } 1312 if (span == raid->spanDepth) 1313 break; 1314 } 1315 } 1316 } 1317 1318 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, 1319 struct LD_LOAD_BALANCE_INFO *lbInfo) 1320 { 1321 int ldCount; 1322 u16 ld; 1323 struct MR_LD_RAID *raid; 1324 1325 if (lb_pending_cmds > 128 || lb_pending_cmds < 1) 1326 lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1327 1328 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1329 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1330 if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { 1331 lbInfo[ldCount].loadBalanceFlag = 0; 1332 continue; 1333 } 1334 1335 raid = MR_LdRaidGet(ld, drv_map); 1336 if ((raid->level != 1) || 1337 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1338 lbInfo[ldCount].loadBalanceFlag = 0; 1339 continue; 1340 } 1341 lbInfo[ldCount].loadBalanceFlag = 1; 1342 } 1343 } 1344 1345 static u8 megasas_get_best_arm_pd(struct megasas_instance *instance, 1346 struct LD_LOAD_BALANCE_INFO *lbInfo, 1347 struct IO_REQUEST_INFO *io_info, 1348 struct MR_DRV_RAID_MAP_ALL *drv_map) 1349 { 1350 struct MR_LD_RAID *raid; 1351 u16 pd1_dev_handle; 1352 u16 pend0, pend1, ld; 1353 u64 diff0, diff1; 1354 u8 bestArm, pd0, pd1, span, arm; 1355 u32 arRef, span_row_size; 1356 1357 u64 block = io_info->ldStartBlock; 1358 u32 count = io_info->numBlocks; 1359 1360 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1361 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1362 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1363 1364 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1365 raid = MR_LdRaidGet(ld, drv_map); 1366 span_row_size = instance->UnevenSpanSupport ? 1367 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1368 1369 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1370 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1371 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1372 (arm + 1 - span_row_size) : arm + 1, drv_map); 1373 1374 /* Get PD1 Dev Handle */ 1375 1376 pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); 1377 1378 if (pd1_dev_handle == MR_DEVHANDLE_INVALID) { 1379 bestArm = arm; 1380 } else { 1381 /* get the pending cmds for the data and mirror arms */ 1382 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1383 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1384 1385 /* Determine the disk whose head is nearer to the req. block */ 1386 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1387 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1388 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1389 1390 /* Make balance count from 16 to 4 to 1391 * keep driver in sync with Firmware 1392 */ 1393 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || 1394 (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) 1395 bestArm ^= 1; 1396 1397 /* Update the last accessed block on the correct pd */ 1398 io_info->span_arm = 1399 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1400 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1401 } 1402 1403 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; 1404 return io_info->pd_after_lb; 1405 } 1406 1407 __le16 get_updated_dev_handle(struct megasas_instance *instance, 1408 struct LD_LOAD_BALANCE_INFO *lbInfo, 1409 struct IO_REQUEST_INFO *io_info, 1410 struct MR_DRV_RAID_MAP_ALL *drv_map) 1411 { 1412 u8 arm_pd; 1413 __le16 devHandle; 1414 1415 /* get best new arm (PD ID) */ 1416 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map); 1417 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1418 io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map); 1419 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1420 1421 return devHandle; 1422 } 1423