1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6 #include "ddr3_init.h" 7 #include "mv_ddr_common.h" 8 #include "xor_regs.h" 9 10 /* defines */ 11 #ifdef MV_DEBUG 12 #define DB(x) x 13 #else 14 #define DB(x) 15 #endif 16 17 static u32 ui_xor_regs_ctrl_backup; 18 static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1]; 19 static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1]; 20 21 void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta) 22 { 23 u32 reg, ui, cs_count; 24 uint64_t base, size_mask; 25 26 ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0)); 27 for (ui = 0; ui < MAX_CS_NUM + 1; ui++) 28 ui_xor_regs_base_backup[ui] = 29 reg_read(XOR_BASE_ADDR_REG(0, ui)); 30 for (ui = 0; ui < MAX_CS_NUM + 1; ui++) 31 ui_xor_regs_mask_backup[ui] = 32 reg_read(XOR_SIZE_MASK_REG(0, ui)); 33 34 reg = 0; 35 for (ui = 0, cs_count = 0; 36 (cs_count < num_of_cs) && (ui < 8); 37 ui++, cs_count++) { 38 if (cs_ena & (1 << ui)) { 39 /* Enable Window x for each CS */ 40 reg |= (0x1 << (ui)); 41 /* Enable Window x for each CS */ 42 reg |= (0x3 << ((ui * 2) + 16)); 43 } 44 } 45 46 reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg); 47 48 cs_count = 0; 49 for (ui = 0, cs_count = 0; 50 (cs_count < num_of_cs) && (ui < 8); 51 ui++, cs_count++) { 52 if (cs_ena & (1 << ui)) { 53 /* 54 * window x - Base - 0x00000000, 55 * Attribute 0x0e - DRAM 56 */ 57 base = cs_size * ui + base_delta; 58 /* fixed size 2GB for each CS */ 59 size_mask = 0x7FFF0000; 60 switch (ui) { 61 case 0: 62 base |= 0xe00; 63 break; 64 case 1: 65 base |= 0xd00; 66 break; 67 case 2: 68 base |= 0xb00; 69 break; 70 case 3: 71 base |= 0x700; 72 break; 73 case 4: /* SRAM */ 74 base = 0x40000000; 75 /* configure as shared transaction */ 76 base |= 0x1F00; 77 size_mask = 0xF0000; 78 break; 79 } 80 81 reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base); 82 size_mask = (cs_size / _64K) - 1; 83 size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK; 84 /* window x - Size */ 85 reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask); 86 } 87 } 88 89 mv_xor_hal_init(1); 90 91 return; 92 } 93 94 void mv_sys_xor_finish(void) 95 { 96 u32 ui; 97 98 reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup); 99 for (ui = 0; ui < MAX_CS_NUM + 1; ui++) 100 reg_write(XOR_BASE_ADDR_REG(0, ui), 101 ui_xor_regs_base_backup[ui]); 102 for (ui = 0; ui < MAX_CS_NUM + 1; ui++) 103 reg_write(XOR_SIZE_MASK_REG(0, ui), 104 ui_xor_regs_mask_backup[ui]); 105 106 reg_write(XOR_ADDR_OVRD_REG(0, 0), 0); 107 } 108 109 /* 110 * mv_xor_hal_init - Initialize XOR engine 111 * 112 * DESCRIPTION: 113 * This function initialize XOR unit. 114 * INPUT: 115 * None. 116 * 117 * OUTPUT: 118 * None. 119 * 120 * RETURN: 121 * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. 122 */ 123 void mv_xor_hal_init(u32 xor_chan_num) 124 { 125 u32 i; 126 127 /* Abort any XOR activity & set default configuration */ 128 for (i = 0; i < xor_chan_num; i++) { 129 mv_xor_command_set(i, MV_STOP); 130 mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) | 131 (4 << XEXCR_DST_BURST_LIMIT_OFFS) | 132 (4 << XEXCR_SRC_BURST_LIMIT_OFFS)); 133 } 134 } 135 136 /* 137 * mv_xor_ctrl_set - Set XOR channel control registers 138 * 139 * DESCRIPTION: 140 * 141 * INPUT: 142 * 143 * OUTPUT: 144 * None. 145 * 146 * RETURN: 147 * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. 148 * NOTE: 149 * This function does not modify the Operation_mode field of control register. 150 */ 151 int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl) 152 { 153 u32 old_value; 154 155 /* update the XOR Engine [0..1] Configuration Registers (XEx_c_r) */ 156 old_value = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) & 157 XEXCR_OPERATION_MODE_MASK; 158 xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK; 159 xor_ctrl |= old_value; 160 reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl); 161 162 return MV_OK; 163 } 164 165 int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size, 166 u32 init_val_high, u32 init_val_low) 167 { 168 u32 temp; 169 170 if (block_size == _4G) 171 block_size -= 1; 172 173 /* Parameter checking */ 174 if (chan >= MV_XOR_MAX_CHAN) 175 return MV_BAD_PARAM; 176 177 if (MV_ACTIVE == mv_xor_state_get(chan)) 178 return MV_BUSY; 179 180 if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) || 181 (block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE)) 182 return MV_BAD_PARAM; 183 184 /* set the operation mode to Memory Init */ 185 temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); 186 temp &= ~XEXCR_OPERATION_MODE_MASK; 187 temp |= XEXCR_OPERATION_MODE_MEM_INIT; 188 reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp); 189 190 /* 191 * update the start_ptr field in XOR Engine [0..1] Destination Pointer 192 * Register 193 */ 194 reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr); 195 196 /* 197 * update the Block_size field in the XOR Engine[0..1] Block Size 198 * Registers 199 */ 200 reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)), 201 block_size); 202 203 /* 204 * update the field Init_val_l in the XOR Engine Initial Value Register 205 * Low (XEIVRL) 206 */ 207 reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low); 208 209 /* 210 * update the field Init_val_h in the XOR Engine Initial Value Register 211 * High (XEIVRH) 212 */ 213 reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high); 214 215 /* start transfer */ 216 reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), 217 XEXACTR_XESTART_MASK); 218 219 return MV_OK; 220 } 221 222 /* 223 * mv_xor_state_get - Get XOR channel state. 224 * 225 * DESCRIPTION: 226 * XOR channel activity state can be active, idle, paused. 227 * This function retrunes the channel activity state. 228 * 229 * INPUT: 230 * chan - the channel number 231 * 232 * OUTPUT: 233 * None. 234 * 235 * RETURN: 236 * XOR_CHANNEL_IDLE - If the engine is idle. 237 * XOR_CHANNEL_ACTIVE - If the engine is busy. 238 * XOR_CHANNEL_PAUSED - If the engine is paused. 239 * MV_UNDEFINED_STATE - If the engine state is undefind or there is no 240 * such engine 241 */ 242 enum mv_state mv_xor_state_get(u32 chan) 243 { 244 u32 state; 245 246 /* Parameter checking */ 247 if (chan >= MV_XOR_MAX_CHAN) { 248 DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan)); 249 return MV_UNDEFINED_STATE; 250 } 251 252 /* read the current state */ 253 state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan))); 254 state &= XEXACTR_XESTATUS_MASK; 255 256 /* return the state */ 257 switch (state) { 258 case XEXACTR_XESTATUS_IDLE: 259 return MV_IDLE; 260 case XEXACTR_XESTATUS_ACTIVE: 261 return MV_ACTIVE; 262 case XEXACTR_XESTATUS_PAUSED: 263 return MV_PAUSED; 264 } 265 266 return MV_UNDEFINED_STATE; 267 } 268 269 /* 270 * mv_xor_command_set - Set command of XOR channel 271 * 272 * DESCRIPTION: 273 * XOR channel can be started, idle, paused and restarted. 274 * Paused can be set only if channel is active. 275 * Start can be set only if channel is idle or paused. 276 * Restart can be set only if channel is paused. 277 * Stop can be set only if channel is active. 278 * 279 * INPUT: 280 * chan - The channel number 281 * command - The command type (start, stop, restart, pause) 282 * 283 * OUTPUT: 284 * None. 285 * 286 * RETURN: 287 * MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on 288 * undefind XOR engine mode 289 */ 290 int mv_xor_command_set(u32 chan, enum mv_command command) 291 { 292 enum mv_state state; 293 294 /* Parameter checking */ 295 if (chan >= MV_XOR_MAX_CHAN) { 296 DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan)); 297 return MV_BAD_PARAM; 298 } 299 300 /* get the current state */ 301 state = mv_xor_state_get(chan); 302 303 if ((command == MV_START) && (state == MV_IDLE)) { 304 /* command is start and current state is idle */ 305 reg_bit_set(XOR_ACTIVATION_REG 306 (XOR_UNIT(chan), XOR_CHAN(chan)), 307 XEXACTR_XESTART_MASK); 308 return MV_OK; 309 } else if ((command == MV_STOP) && (state == MV_ACTIVE)) { 310 /* command is stop and current state is active */ 311 reg_bit_set(XOR_ACTIVATION_REG 312 (XOR_UNIT(chan), XOR_CHAN(chan)), 313 XEXACTR_XESTOP_MASK); 314 return MV_OK; 315 } else if (((enum mv_state)command == MV_PAUSED) && 316 (state == MV_ACTIVE)) { 317 /* command is paused and current state is active */ 318 reg_bit_set(XOR_ACTIVATION_REG 319 (XOR_UNIT(chan), XOR_CHAN(chan)), 320 XEXACTR_XEPAUSE_MASK); 321 return MV_OK; 322 } else if ((command == MV_RESTART) && (state == MV_PAUSED)) { 323 /* command is restart and current state is paused */ 324 reg_bit_set(XOR_ACTIVATION_REG 325 (XOR_UNIT(chan), XOR_CHAN(chan)), 326 XEXACTR_XERESTART_MASK); 327 return MV_OK; 328 } else if ((command == MV_STOP) && (state == MV_IDLE)) { 329 /* command is stop and current state is active */ 330 return MV_OK; 331 } 332 333 /* illegal command */ 334 DB(printf("%s: ERR. Illegal command\n", __func__)); 335 336 return MV_BAD_PARAM; 337 } 338 339 void ddr3_new_tip_ecc_scrub(void) 340 { 341 u32 cs_c, max_cs; 342 u32 cs_ena = 0; 343 uint64_t total_mem_size, cs_mem_size = 0; 344 345 printf("DDR Training Sequence - Start scrubbing\n"); 346 max_cs = mv_ddr_cs_num_get(); 347 for (cs_c = 0; cs_c < max_cs; cs_c++) 348 cs_ena |= 1 << cs_c; 349 350 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X) 351 /* all chip-selects are of same size */ 352 ddr3_calc_mem_cs_size(0, &cs_mem_size); 353 #endif 354 355 mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0); 356 total_mem_size = max_cs * cs_mem_size; 357 mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef); 358 /* wait for previous transfer completion */ 359 while (mv_xor_state_get(0) != MV_IDLE) 360 ; 361 /* Return XOR State */ 362 mv_sys_xor_finish(); 363 364 printf("DDR3 Training Sequence - End scrubbing\n"); 365 } 366 367 /* 368 * mv_xor_transfer - Transfer data from source to destination in one of 369 * three modes: XOR, CRC32 or DMA 370 * 371 * DESCRIPTION: 372 * This function initiates XOR channel, according to function parameters, 373 * in order to perform XOR, CRC32 or DMA transaction. 374 * To gain maximum performance the user is asked to keep the following 375 * restrictions: 376 * 1) Selected engine is available (not busy). 377 * 2) This module does not take into consideration CPU MMU issues. 378 * In order for the XOR engine to access the appropriate source 379 * and destination, address parameters must be given in system 380 * physical mode. 381 * 3) This API does not take care of cache coherency issues. The source, 382 * destination and, in case of chain, the descriptor list are assumed 383 * to be cache coherent. 384 * 4) Parameters validity. 385 * 386 * INPUT: 387 * chan - XOR channel number. 388 * type - One of three: XOR, CRC32 and DMA operations. 389 * xor_chain_ptr - address of chain pointer 390 * 391 * OUTPUT: 392 * None. 393 * 394 * RETURN: 395 * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. 396 * 397 *******************************************************************************/ 398 int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr) 399 { 400 u32 temp; 401 402 /* Parameter checking */ 403 if (chan >= MV_XOR_MAX_CHAN) { 404 DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan)); 405 return MV_BAD_PARAM; 406 } 407 if (mv_xor_state_get(chan) == MV_ACTIVE) { 408 DB(printf("%s: ERR. Channel is already active\n", __func__)); 409 return MV_BUSY; 410 } 411 if (xor_chain_ptr == 0x0) { 412 DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__)); 413 return MV_BAD_PARAM; 414 } 415 416 /* read configuration register and mask the operation mode field */ 417 temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); 418 temp &= ~XEXCR_OPERATION_MODE_MASK; 419 420 switch (type) { 421 case MV_XOR: 422 if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) { 423 DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n", 424 __func__)); 425 return MV_BAD_PARAM; 426 } 427 /* set the operation mode to XOR */ 428 temp |= XEXCR_OPERATION_MODE_XOR; 429 break; 430 case MV_DMA: 431 if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) { 432 DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", 433 __func__)); 434 return MV_BAD_PARAM; 435 } 436 /* set the operation mode to DMA */ 437 temp |= XEXCR_OPERATION_MODE_DMA; 438 break; 439 case MV_CRC32: 440 if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) { 441 DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", 442 __func__)); 443 return MV_BAD_PARAM; 444 } 445 /* set the operation mode to CRC32 */ 446 temp |= XEXCR_OPERATION_MODE_CRC; 447 break; 448 default: 449 return MV_BAD_PARAM; 450 } 451 452 /* write the operation mode to the register */ 453 reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp); 454 /* 455 * update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor 456 * Pointer Register (XExNDPR) 457 */ 458 reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), 459 xor_chain_ptr); 460 461 /* start transfer */ 462 reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), 463 XEXACTR_XESTART_MASK); 464 465 return MV_OK; 466 } 467