1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include "bfad_drv.h" 19 #include "bfa_ioc.h" 20 #include "bfi_ctreg.h" 21 #include "bfa_defs.h" 22 23 BFA_TRC_FILE(CNA, IOC_CT); 24 25 #define bfa_ioc_ct_sync_pos(__ioc) \ 26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc))) 27 #define BFA_IOC_SYNC_REQD_SH 16 28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) 29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) 30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) 31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ 32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) 33 34 /* 35 * forward declarations 36 */ 37 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 39 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 40 static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); 41 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 42 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 43 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 44 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); 45 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 46 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 47 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); 48 49 static struct bfa_ioc_hwif_s hwif_ct; 50 51 /* 52 * Called from bfa_ioc_attach() to map asic specific calls. 53 */ 54 void 55 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) 56 { 57 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; 58 hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; 59 hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; 60 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 61 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 62 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 63 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 64 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 65 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 66 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 67 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 68 hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; 69 70 ioc->ioc_hwif = &hwif_ct; 71 } 72 73 /* 74 * Return true if firmware of current driver matches the running firmware. 75 */ 76 static bfa_boolean_t 77 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) 78 { 79 enum bfi_ioc_state ioc_fwstate; 80 u32 usecnt; 81 struct bfi_ioc_image_hdr_s fwhdr; 82 83 /* 84 * Firmware match check is relevant only for CNA. 85 */ 86 if (!ioc->cna) 87 return BFA_TRUE; 88 89 /* 90 * If bios boot (flash based) -- do not increment usage count 91 */ 92 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 93 BFA_IOC_FWIMG_MINSZ) 94 return BFA_TRUE; 95 96 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 97 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 98 99 /* 100 * If usage count is 0, always return TRUE. 101 */ 102 if (usecnt == 0) { 103 writel(1, ioc->ioc_regs.ioc_usage_reg); 104 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 105 writel(0, ioc->ioc_regs.ioc_fail_sync); 106 bfa_trc(ioc, usecnt); 107 return BFA_TRUE; 108 } 109 110 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 111 bfa_trc(ioc, ioc_fwstate); 112 113 /* 114 * Use count cannot be non-zero and chip in uninitialized state. 115 */ 116 WARN_ON(ioc_fwstate == BFI_IOC_UNINIT); 117 118 /* 119 * Check if another driver with a different firmware is active 120 */ 121 bfa_ioc_fwver_get(ioc, &fwhdr); 122 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 123 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 124 bfa_trc(ioc, usecnt); 125 return BFA_FALSE; 126 } 127 128 /* 129 * Same firmware version. Increment the reference count. 130 */ 131 usecnt++; 132 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 133 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 134 bfa_trc(ioc, usecnt); 135 return BFA_TRUE; 136 } 137 138 static void 139 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) 140 { 141 u32 usecnt; 142 143 /* 144 * Firmware lock is relevant only for CNA. 145 */ 146 if (!ioc->cna) 147 return; 148 149 /* 150 * If bios boot (flash based) -- do not decrement usage count 151 */ 152 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 153 BFA_IOC_FWIMG_MINSZ) 154 return; 155 156 /* 157 * decrement usage count 158 */ 159 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 160 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 161 WARN_ON(usecnt <= 0); 162 163 usecnt--; 164 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 165 bfa_trc(ioc, usecnt); 166 167 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 168 } 169 170 /* 171 * Notify other functions on HB failure. 172 */ 173 static void 174 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) 175 { 176 if (ioc->cna) { 177 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 178 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 179 /* Wait for halt to take effect */ 180 readl(ioc->ioc_regs.ll_halt); 181 readl(ioc->ioc_regs.alt_ll_halt); 182 } else { 183 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 184 readl(ioc->ioc_regs.err_set); 185 } 186 } 187 188 /* 189 * Host to LPU mailbox message addresses 190 */ 191 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 192 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 193 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 194 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 195 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 196 }; 197 198 /* 199 * Host <-> LPU mailbox command/status registers - port 0 200 */ 201 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { 202 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT }, 203 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT }, 204 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT }, 205 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 206 }; 207 208 /* 209 * Host <-> LPU mailbox command/status registers - port 1 210 */ 211 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { 212 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT }, 213 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT }, 214 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT }, 215 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT } 216 }; 217 218 static void 219 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) 220 { 221 void __iomem *rb; 222 int pcifn = bfa_ioc_pcifn(ioc); 223 224 rb = bfa_ioc_bar0(ioc); 225 226 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; 227 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; 228 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; 229 230 if (ioc->port_id == 0) { 231 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 232 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 233 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 234 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 235 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 236 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 237 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 238 } else { 239 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 240 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 241 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 242 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 243 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 244 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 245 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 246 } 247 248 /* 249 * PSS control registers 250 */ 251 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 252 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 253 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); 254 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); 255 256 /* 257 * IOC semaphore registers and serialization 258 */ 259 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 260 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 261 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 262 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 263 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); 264 265 /* 266 * sram memory access 267 */ 268 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 269 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 270 271 /* 272 * err set reg : for notification of hb failure in fcmode 273 */ 274 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 275 } 276 277 /* 278 * Initialize IOC to port mapping. 279 */ 280 281 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 282 static void 283 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) 284 { 285 void __iomem *rb = ioc->pcidev.pci_bar_kva; 286 u32 r32; 287 288 /* 289 * For catapult, base port id on personality register and IOC type 290 */ 291 r32 = readl(rb + FNC_PERS_REG); 292 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 293 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 294 295 bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 296 bfa_trc(ioc, ioc->port_id); 297 } 298 299 /* 300 * Set interrupt mode for a function: INTX or MSIX 301 */ 302 static void 303 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 304 { 305 void __iomem *rb = ioc->pcidev.pci_bar_kva; 306 u32 r32, mode; 307 308 r32 = readl(rb + FNC_PERS_REG); 309 bfa_trc(ioc, r32); 310 311 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 312 __F0_INTX_STATUS; 313 314 /* 315 * If already in desired mode, do not change anything 316 */ 317 if (!msix && mode) 318 return; 319 320 if (msix) 321 mode = __F0_INTX_STATUS_MSIX; 322 else 323 mode = __F0_INTX_STATUS_INTA; 324 325 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 326 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 327 bfa_trc(ioc, r32); 328 329 writel(r32, rb + FNC_PERS_REG); 330 } 331 332 /* 333 * Cleanup hw semaphore and usecnt registers 334 */ 335 static void 336 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 337 { 338 339 if (ioc->cna) { 340 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 341 writel(0, ioc->ioc_regs.ioc_usage_reg); 342 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 343 } 344 345 /* 346 * Read the hw sem reg to make sure that it is locked 347 * before we clear it. If it is not locked, writing 1 348 * will lock it instead of clearing it. 349 */ 350 readl(ioc->ioc_regs.ioc_sem_reg); 351 writel(1, ioc->ioc_regs.ioc_sem_reg); 352 } 353 354 /* 355 * Synchronized IOC failure processing routines 356 */ 357 static void 358 bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc) 359 { 360 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 361 uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); 362 363 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); 364 } 365 366 static void 367 bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc) 368 { 369 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 370 uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | 371 bfa_ioc_ct_sync_pos(ioc); 372 373 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); 374 } 375 376 static void 377 bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc) 378 { 379 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 380 381 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), 382 ioc->ioc_regs.ioc_fail_sync); 383 } 384 385 static bfa_boolean_t 386 bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) 387 { 388 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 389 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 390 uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); 391 uint32_t tmp_ackd; 392 393 if (sync_ackd == 0) 394 return BFA_TRUE; 395 396 /* 397 * The check below is to see whether any other PCI fn 398 * has reinitialized the ASIC (reset sync_ackd bits) 399 * and failed again while this IOC was waiting for hw 400 * semaphore (in bfa_iocpf_sm_semwait()). 401 */ 402 tmp_ackd = sync_ackd; 403 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && 404 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) 405 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); 406 407 if (sync_reqd == sync_ackd) { 408 writel(bfa_ioc_ct_clear_sync_ackd(r32), 409 ioc->ioc_regs.ioc_fail_sync); 410 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 411 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); 412 return BFA_TRUE; 413 } 414 415 /* 416 * If another PCI fn reinitialized and failed again while 417 * this IOC was waiting for hw sem, the sync_ackd bit for 418 * this IOC need to be set again to allow reinitialization. 419 */ 420 if (tmp_ackd != sync_ackd) 421 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); 422 423 return BFA_FALSE; 424 } 425 426 /* 427 * Check the firmware state to know if pll_init has been completed already 428 */ 429 bfa_boolean_t 430 bfa_ioc_ct_pll_init_complete(void __iomem *rb) 431 { 432 if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || 433 (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) 434 return BFA_TRUE; 435 436 return BFA_FALSE; 437 } 438 439 bfa_status_t 440 bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 441 { 442 u32 pll_sclk, pll_fclk, r32; 443 444 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | 445 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | 446 __APP_PLL_312_JITLMT0_1(3U) | 447 __APP_PLL_312_CNTLMT0_1(1U); 448 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST | 449 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | 450 __APP_PLL_425_JITLMT0_1(3U) | 451 __APP_PLL_425_CNTLMT0_1(1U); 452 if (fcmode) { 453 writel(0, (rb + OP_MODE)); 454 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | 455 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); 456 } else { 457 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); 458 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); 459 } 460 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 461 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 462 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 463 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 464 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 465 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 466 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 467 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 468 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET, 469 rb + APP_PLL_312_CTL_REG); 470 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET, 471 rb + APP_PLL_425_CTL_REG); 472 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE, 473 rb + APP_PLL_312_CTL_REG); 474 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE, 475 rb + APP_PLL_425_CTL_REG); 476 readl(rb + HOSTFN0_INT_MSK); 477 udelay(2000); 478 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 479 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 480 writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG); 481 writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG); 482 if (!fcmode) { 483 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 484 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 485 } 486 r32 = readl((rb + PSS_CTL_REG)); 487 r32 &= ~__PSS_LMEM_RESET; 488 writel(r32, (rb + PSS_CTL_REG)); 489 udelay(1000); 490 if (!fcmode) { 491 writel(0, (rb + PMM_1T_RESET_REG_P0)); 492 writel(0, (rb + PMM_1T_RESET_REG_P1)); 493 } 494 495 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); 496 udelay(1000); 497 r32 = readl((rb + MBIST_STAT_REG)); 498 writel(0, (rb + MBIST_CTL_REG)); 499 return BFA_STATUS_OK; 500 } 501