1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 /* 24 * dc_helper.c 25 * 26 * Created on: Aug 30, 2016 27 * Author: agrodzov 28 */ 29 30 #include <linux/delay.h> 31 #include <linux/stdarg.h> 32 33 #include "dm_services.h" 34 35 #include "dc.h" 36 #include "dc_dmub_srv.h" 37 #include "reg_helper.h" 38 39 static inline void submit_dmub_read_modify_write( 40 struct dc_reg_helper_state *offload, 41 const struct dc_context *ctx) 42 { 43 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 44 45 offload->should_burst_write = 46 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); 47 cmd_buf->header.payload_bytes = 48 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; 49 50 dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 51 52 memset(cmd_buf, 0, sizeof(*cmd_buf)); 53 54 offload->reg_seq_count = 0; 55 offload->same_addr_count = 0; 56 } 57 58 static inline void submit_dmub_burst_write( 59 struct dc_reg_helper_state *offload, 60 const struct dc_context *ctx) 61 { 62 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 63 64 cmd_buf->header.payload_bytes = 65 sizeof(uint32_t) * offload->reg_seq_count; 66 67 dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 68 69 memset(cmd_buf, 0, sizeof(*cmd_buf)); 70 71 offload->reg_seq_count = 0; 72 } 73 74 static inline void submit_dmub_reg_wait( 75 struct dc_reg_helper_state *offload, 76 const struct dc_context *ctx) 77 { 78 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 79 80 dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 81 82 memset(cmd_buf, 0, sizeof(*cmd_buf)); 83 offload->reg_seq_count = 0; 84 } 85 86 struct dc_reg_value_masks { 87 uint32_t value; 88 uint32_t mask; 89 }; 90 91 struct dc_reg_sequence { 92 uint32_t addr; 93 struct dc_reg_value_masks value_masks; 94 }; 95 96 static inline void set_reg_field_value_masks( 97 struct dc_reg_value_masks *field_value_mask, 98 uint32_t value, 99 uint32_t mask, 100 uint8_t shift) 101 { 102 ASSERT(mask != 0); 103 104 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); 105 field_value_mask->mask = field_value_mask->mask | mask; 106 } 107 108 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, 109 uint32_t addr, int n, 110 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 111 va_list ap) 112 { 113 uint32_t shift, mask, field_value; 114 int i = 1; 115 116 /* gather all bits value/mask getting updated in this register */ 117 set_reg_field_value_masks(field_value_mask, 118 field_value1, mask1, shift1); 119 120 while (i < n) { 121 shift = va_arg(ap, uint32_t); 122 mask = va_arg(ap, uint32_t); 123 field_value = va_arg(ap, uint32_t); 124 125 set_reg_field_value_masks(field_value_mask, 126 field_value, mask, shift); 127 i++; 128 } 129 } 130 131 static void dmub_flush_buffer_execute( 132 struct dc_reg_helper_state *offload, 133 const struct dc_context *ctx) 134 { 135 submit_dmub_read_modify_write(offload, ctx); 136 } 137 138 static void dmub_flush_burst_write_buffer_execute( 139 struct dc_reg_helper_state *offload, 140 const struct dc_context *ctx) 141 { 142 submit_dmub_burst_write(offload, ctx); 143 } 144 145 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, 146 uint32_t reg_val) 147 { 148 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 149 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 150 151 /* flush command if buffer is full */ 152 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) 153 dmub_flush_burst_write_buffer_execute(offload, ctx); 154 155 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && 156 addr != cmd_buf->addr) { 157 dmub_flush_burst_write_buffer_execute(offload, ctx); 158 return false; 159 } 160 161 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; 162 cmd_buf->header.sub_type = 0; 163 cmd_buf->addr = addr; 164 cmd_buf->write_values[offload->reg_seq_count] = reg_val; 165 offload->reg_seq_count++; 166 167 return true; 168 } 169 170 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, 171 struct dc_reg_value_masks *field_value_mask) 172 { 173 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 174 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 175 struct dmub_cmd_read_modify_write_sequence *seq; 176 177 /* flush command if buffer is full */ 178 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && 179 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) 180 dmub_flush_buffer_execute(offload, ctx); 181 182 if (offload->should_burst_write) { 183 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) 184 return field_value_mask->value; 185 else 186 offload->should_burst_write = false; 187 } 188 189 /* pack commands */ 190 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; 191 cmd_buf->header.sub_type = 0; 192 seq = &cmd_buf->seq[offload->reg_seq_count]; 193 194 if (offload->reg_seq_count) { 195 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) 196 offload->same_addr_count++; 197 else 198 offload->same_addr_count = 0; 199 } 200 201 seq->addr = addr; 202 seq->modify_mask = field_value_mask->mask; 203 seq->modify_value = field_value_mask->value; 204 offload->reg_seq_count++; 205 206 return field_value_mask->value; 207 } 208 209 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, 210 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) 211 { 212 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 213 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 214 215 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; 216 cmd_buf->header.sub_type = 0; 217 cmd_buf->reg_wait.addr = addr; 218 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); 219 cmd_buf->reg_wait.mask = mask; 220 cmd_buf->reg_wait.time_out_us = time_out_us; 221 } 222 223 uint32_t generic_reg_update_ex(const struct dc_context *ctx, 224 uint32_t addr, int n, 225 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 226 ...) 227 { 228 struct dc_reg_value_masks field_value_mask = {0}; 229 uint32_t reg_val; 230 va_list ap; 231 232 va_start(ap, field_value1); 233 234 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 235 field_value1, ap); 236 237 va_end(ap); 238 239 if (ctx->dmub_srv && 240 ctx->dmub_srv->reg_helper_offload.gather_in_progress) 241 return dmub_reg_value_pack(ctx, addr, &field_value_mask); 242 /* todo: return void so we can decouple code running in driver from register states */ 243 244 /* mmio write directly */ 245 reg_val = dm_read_reg(ctx, addr); 246 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 247 dm_write_reg(ctx, addr, reg_val); 248 return reg_val; 249 } 250 251 uint32_t generic_reg_set_ex(const struct dc_context *ctx, 252 uint32_t addr, uint32_t reg_val, int n, 253 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 254 ...) 255 { 256 struct dc_reg_value_masks field_value_mask = {0}; 257 va_list ap; 258 259 va_start(ap, field_value1); 260 261 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 262 field_value1, ap); 263 264 va_end(ap); 265 266 267 /* mmio write directly */ 268 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 269 270 if (ctx->dmub_srv && 271 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 272 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); 273 /* todo: return void so we can decouple code running in driver from register states */ 274 } 275 276 dm_write_reg(ctx, addr, reg_val); 277 return reg_val; 278 } 279 280 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, 281 uint8_t shift, uint32_t mask, uint32_t *field_value) 282 { 283 uint32_t reg_val = dm_read_reg(ctx, addr); 284 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 285 return reg_val; 286 } 287 288 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr, 289 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 290 uint8_t shift2, uint32_t mask2, uint32_t *field_value2) 291 { 292 uint32_t reg_val = dm_read_reg(ctx, addr); 293 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 294 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 295 return reg_val; 296 } 297 298 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr, 299 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 300 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 301 uint8_t shift3, uint32_t mask3, uint32_t *field_value3) 302 { 303 uint32_t reg_val = dm_read_reg(ctx, addr); 304 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 305 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 306 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 307 return reg_val; 308 } 309 310 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr, 311 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 312 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 313 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 314 uint8_t shift4, uint32_t mask4, uint32_t *field_value4) 315 { 316 uint32_t reg_val = dm_read_reg(ctx, addr); 317 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 318 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 319 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 320 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 321 return reg_val; 322 } 323 324 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, 325 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 326 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 327 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 328 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 329 uint8_t shift5, uint32_t mask5, uint32_t *field_value5) 330 { 331 uint32_t reg_val = dm_read_reg(ctx, addr); 332 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 333 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 334 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 335 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 336 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 337 return reg_val; 338 } 339 340 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, 341 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 342 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 343 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 344 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 345 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 346 uint8_t shift6, uint32_t mask6, uint32_t *field_value6) 347 { 348 uint32_t reg_val = dm_read_reg(ctx, addr); 349 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 350 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 351 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 352 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 353 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 354 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 355 return reg_val; 356 } 357 358 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, 359 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 360 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 361 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 362 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 363 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 364 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 365 uint8_t shift7, uint32_t mask7, uint32_t *field_value7) 366 { 367 uint32_t reg_val = dm_read_reg(ctx, addr); 368 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 369 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 370 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 371 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 372 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 373 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 374 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 375 return reg_val; 376 } 377 378 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, 379 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 380 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 381 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 382 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 383 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 384 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 385 uint8_t shift7, uint32_t mask7, uint32_t *field_value7, 386 uint8_t shift8, uint32_t mask8, uint32_t *field_value8) 387 { 388 uint32_t reg_val = dm_read_reg(ctx, addr); 389 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 390 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 391 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 392 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 393 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 394 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 395 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 396 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); 397 return reg_val; 398 } 399 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer 400 * compiler won't be able to check for size match and is prone to stack corruption type of bugs 401 402 uint32_t generic_reg_get(const struct dc_context *ctx, 403 uint32_t addr, int n, ...) 404 { 405 uint32_t shift, mask; 406 uint32_t *field_value; 407 uint32_t reg_val; 408 int i = 0; 409 410 reg_val = dm_read_reg(ctx, addr); 411 412 va_list ap; 413 va_start(ap, n); 414 415 while (i < n) { 416 shift = va_arg(ap, uint32_t); 417 mask = va_arg(ap, uint32_t); 418 field_value = va_arg(ap, uint32_t *); 419 420 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 421 i++; 422 } 423 424 va_end(ap); 425 426 return reg_val; 427 } 428 */ 429 430 void generic_reg_wait(const struct dc_context *ctx, 431 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, 432 unsigned int delay_between_poll_us, unsigned int time_out_num_tries, 433 const char *func_name, int line) 434 { 435 uint32_t field_value; 436 uint32_t reg_val; 437 int i; 438 439 if (ctx->dmub_srv && 440 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 441 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, 442 delay_between_poll_us * time_out_num_tries); 443 return; 444 } 445 446 /* 447 * Something is terribly wrong if time out is > 3000ms. 448 * 3000ms is the maximum time needed for SMU to pass values back. 449 * This value comes from experiments. 450 * 451 */ 452 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); 453 454 for (i = 0; i <= time_out_num_tries; i++) { 455 if (i) { 456 if (delay_between_poll_us >= 1000) 457 msleep(delay_between_poll_us/1000); 458 else if (delay_between_poll_us > 0) 459 udelay(delay_between_poll_us); 460 } 461 462 reg_val = dm_read_reg(ctx, addr); 463 464 field_value = get_reg_field_value_ex(reg_val, mask, shift); 465 466 if (field_value == condition_value) { 467 if (i * delay_between_poll_us > 1000) 468 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", 469 delay_between_poll_us * i / 1000, 470 func_name, line); 471 return; 472 } 473 } 474 475 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", 476 delay_between_poll_us, time_out_num_tries, 477 func_name, line); 478 479 BREAK_TO_DEBUGGER(); 480 } 481 482 void generic_write_indirect_reg(const struct dc_context *ctx, 483 uint32_t addr_index, uint32_t addr_data, 484 uint32_t index, uint32_t data) 485 { 486 dm_write_reg(ctx, addr_index, index); 487 dm_write_reg(ctx, addr_data, data); 488 } 489 490 uint32_t generic_read_indirect_reg(const struct dc_context *ctx, 491 uint32_t addr_index, uint32_t addr_data, 492 uint32_t index) 493 { 494 uint32_t value = 0; 495 496 // when reg read, there should not be any offload. 497 if (ctx->dmub_srv && 498 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 499 ASSERT(false); 500 } 501 502 dm_write_reg(ctx, addr_index, index); 503 value = dm_read_reg(ctx, addr_data); 504 505 return value; 506 } 507 508 uint32_t generic_indirect_reg_get(const struct dc_context *ctx, 509 uint32_t addr_index, uint32_t addr_data, 510 uint32_t index, int n, 511 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 512 ...) 513 { 514 uint32_t shift, mask, *field_value; 515 uint32_t value = 0; 516 int i = 1; 517 518 va_list ap; 519 520 va_start(ap, field_value1); 521 522 value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); 523 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 524 525 while (i < n) { 526 shift = va_arg(ap, uint32_t); 527 mask = va_arg(ap, uint32_t); 528 field_value = va_arg(ap, uint32_t *); 529 530 *field_value = get_reg_field_value_ex(value, mask, shift); 531 i++; 532 } 533 534 va_end(ap); 535 536 return value; 537 } 538 539 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, 540 uint32_t addr_index, uint32_t addr_data, 541 uint32_t index, uint32_t reg_val, int n, 542 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 543 ...) 544 { 545 uint32_t shift, mask, field_value; 546 int i = 1; 547 548 va_list ap; 549 550 va_start(ap, field_value1); 551 552 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 553 554 while (i < n) { 555 shift = va_arg(ap, uint32_t); 556 mask = va_arg(ap, uint32_t); 557 field_value = va_arg(ap, uint32_t); 558 559 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 560 i++; 561 } 562 563 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val); 564 va_end(ap); 565 566 return reg_val; 567 } 568 569 570 uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, 571 uint32_t index, uint32_t reg_val, int n, 572 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 573 ...) 574 { 575 uint32_t shift, mask, field_value; 576 int i = 1; 577 578 va_list ap; 579 580 va_start(ap, field_value1); 581 582 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 583 584 while (i < n) { 585 shift = va_arg(ap, uint32_t); 586 mask = va_arg(ap, uint32_t); 587 field_value = va_arg(ap, uint32_t); 588 589 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 590 i++; 591 } 592 593 dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val); 594 va_end(ap); 595 596 return reg_val; 597 } 598 599 uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, 600 uint32_t index, int n, 601 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 602 ...) 603 { 604 uint32_t shift, mask, *field_value; 605 uint32_t value = 0; 606 int i = 1; 607 608 va_list ap; 609 610 va_start(ap, field_value1); 611 612 value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index); 613 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 614 615 while (i < n) { 616 shift = va_arg(ap, uint32_t); 617 mask = va_arg(ap, uint32_t); 618 field_value = va_arg(ap, uint32_t *); 619 620 *field_value = get_reg_field_value_ex(value, mask, shift); 621 i++; 622 } 623 624 va_end(ap); 625 626 return value; 627 } 628 629 void reg_sequence_start_gather(const struct dc_context *ctx) 630 { 631 /* if reg sequence is supported and enabled, set flag to 632 * indicate we want to have REG_SET, REG_UPDATE macro build 633 * reg sequence command buffer rather than MMIO directly. 634 */ 635 636 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { 637 struct dc_reg_helper_state *offload = 638 &ctx->dmub_srv->reg_helper_offload; 639 640 /* caller sequence mismatch. need to debug caller. offload will not work!!! */ 641 ASSERT(!offload->gather_in_progress); 642 643 offload->gather_in_progress = true; 644 } 645 } 646 647 void reg_sequence_start_execute(const struct dc_context *ctx) 648 { 649 struct dc_reg_helper_state *offload; 650 651 if (!ctx->dmub_srv) 652 return; 653 654 offload = &ctx->dmub_srv->reg_helper_offload; 655 656 if (offload && offload->gather_in_progress) { 657 offload->gather_in_progress = false; 658 offload->should_burst_write = false; 659 switch (offload->cmd_data.cmd_common.header.type) { 660 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: 661 submit_dmub_read_modify_write(offload, ctx); 662 break; 663 case DMUB_CMD__REG_REG_WAIT: 664 submit_dmub_reg_wait(offload, ctx); 665 break; 666 case DMUB_CMD__REG_SEQ_BURST_WRITE: 667 submit_dmub_burst_write(offload, ctx); 668 break; 669 default: 670 return; 671 } 672 } 673 } 674 675 void reg_sequence_wait_done(const struct dc_context *ctx) 676 { 677 /* callback to DM to poll for last submission done*/ 678 struct dc_reg_helper_state *offload; 679 680 if (!ctx->dmub_srv) 681 return; 682 683 offload = &ctx->dmub_srv->reg_helper_offload; 684 685 if (offload && 686 ctx->dc->debug.dmub_offload_enabled && 687 !ctx->dc->debug.dmcub_emulation) { 688 dc_dmub_srv_wait_idle(ctx->dmub_srv); 689 } 690 } 691 692 char *dce_version_to_string(const int version) 693 { 694 switch (version) { 695 case DCE_VERSION_8_0: 696 return "DCE 8.0"; 697 case DCE_VERSION_8_1: 698 return "DCE 8.1"; 699 case DCE_VERSION_8_3: 700 return "DCE 8.3"; 701 case DCE_VERSION_10_0: 702 return "DCE 10.0"; 703 case DCE_VERSION_11_0: 704 return "DCE 11.0"; 705 case DCE_VERSION_11_2: 706 return "DCE 11.2"; 707 case DCE_VERSION_11_22: 708 return "DCE 11.22"; 709 case DCE_VERSION_12_0: 710 return "DCE 12.0"; 711 case DCE_VERSION_12_1: 712 return "DCE 12.1"; 713 case DCN_VERSION_1_0: 714 return "DCN 1.0"; 715 case DCN_VERSION_1_01: 716 return "DCN 1.0.1"; 717 case DCN_VERSION_2_0: 718 return "DCN 2.0"; 719 case DCN_VERSION_2_1: 720 return "DCN 2.1"; 721 case DCN_VERSION_2_01: 722 return "DCN 2.0.1"; 723 case DCN_VERSION_3_0: 724 return "DCN 3.0"; 725 case DCN_VERSION_3_01: 726 return "DCN 3.0.1"; 727 case DCN_VERSION_3_02: 728 return "DCN 3.0.2"; 729 case DCN_VERSION_3_03: 730 return "DCN 3.0.3"; 731 case DCN_VERSION_3_1: 732 return "DCN 3.1"; 733 case DCN_VERSION_3_14: 734 return "DCN 3.1.4"; 735 case DCN_VERSION_3_15: 736 return "DCN 3.1.5"; 737 case DCN_VERSION_3_16: 738 return "DCN 3.1.6"; 739 case DCN_VERSION_3_2: 740 return "DCN 3.2"; 741 case DCN_VERSION_3_21: 742 return "DCN 3.2.1"; 743 default: 744 return "Unknown"; 745 } 746 } 747