1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 /* 24 * dc_helper.c 25 * 26 * Created on: Aug 30, 2016 27 * Author: agrodzov 28 */ 29 30 #include <linux/delay.h> 31 #include <linux/stdarg.h> 32 33 #include "dm_services.h" 34 35 #include "dc.h" 36 #include "dc_dmub_srv.h" 37 #include "reg_helper.h" 38 39 static inline void submit_dmub_read_modify_write( 40 struct dc_reg_helper_state *offload, 41 const struct dc_context *ctx) 42 { 43 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 44 45 offload->should_burst_write = 46 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); 47 cmd_buf->header.payload_bytes = 48 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; 49 50 dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 51 52 memset(cmd_buf, 0, sizeof(*cmd_buf)); 53 54 offload->reg_seq_count = 0; 55 offload->same_addr_count = 0; 56 } 57 58 static inline void submit_dmub_burst_write( 59 struct dc_reg_helper_state *offload, 60 const struct dc_context *ctx) 61 { 62 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 63 64 cmd_buf->header.payload_bytes = 65 sizeof(uint32_t) * offload->reg_seq_count; 66 67 dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 68 69 memset(cmd_buf, 0, sizeof(*cmd_buf)); 70 71 offload->reg_seq_count = 0; 72 } 73 74 static inline void submit_dmub_reg_wait( 75 struct dc_reg_helper_state *offload, 76 const struct dc_context *ctx) 77 { 78 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 79 80 dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 81 82 memset(cmd_buf, 0, sizeof(*cmd_buf)); 83 offload->reg_seq_count = 0; 84 } 85 86 struct dc_reg_value_masks { 87 uint32_t value; 88 uint32_t mask; 89 }; 90 91 struct dc_reg_sequence { 92 uint32_t addr; 93 struct dc_reg_value_masks value_masks; 94 }; 95 96 static inline void set_reg_field_value_masks( 97 struct dc_reg_value_masks *field_value_mask, 98 uint32_t value, 99 uint32_t mask, 100 uint8_t shift) 101 { 102 ASSERT(mask != 0); 103 104 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); 105 field_value_mask->mask = field_value_mask->mask | mask; 106 } 107 108 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, 109 uint32_t addr, int n, 110 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 111 va_list ap) 112 { 113 uint32_t shift, mask, field_value; 114 int i = 1; 115 116 /* gather all bits value/mask getting updated in this register */ 117 set_reg_field_value_masks(field_value_mask, 118 field_value1, mask1, shift1); 119 120 while (i < n) { 121 shift = va_arg(ap, uint32_t); 122 mask = va_arg(ap, uint32_t); 123 field_value = va_arg(ap, uint32_t); 124 125 set_reg_field_value_masks(field_value_mask, 126 field_value, mask, shift); 127 i++; 128 } 129 } 130 131 static void dmub_flush_buffer_execute( 132 struct dc_reg_helper_state *offload, 133 const struct dc_context *ctx) 134 { 135 submit_dmub_read_modify_write(offload, ctx); 136 } 137 138 static void dmub_flush_burst_write_buffer_execute( 139 struct dc_reg_helper_state *offload, 140 const struct dc_context *ctx) 141 { 142 submit_dmub_burst_write(offload, ctx); 143 } 144 145 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, 146 uint32_t reg_val) 147 { 148 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 149 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 150 151 /* flush command if buffer is full */ 152 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) 153 dmub_flush_burst_write_buffer_execute(offload, ctx); 154 155 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && 156 addr != cmd_buf->addr) { 157 dmub_flush_burst_write_buffer_execute(offload, ctx); 158 return false; 159 } 160 161 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; 162 cmd_buf->header.sub_type = 0; 163 cmd_buf->addr = addr; 164 cmd_buf->write_values[offload->reg_seq_count] = reg_val; 165 offload->reg_seq_count++; 166 167 return true; 168 } 169 170 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, 171 struct dc_reg_value_masks *field_value_mask) 172 { 173 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 174 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 175 struct dmub_cmd_read_modify_write_sequence *seq; 176 177 /* flush command if buffer is full */ 178 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && 179 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) 180 dmub_flush_buffer_execute(offload, ctx); 181 182 if (offload->should_burst_write) { 183 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) 184 return field_value_mask->value; 185 else 186 offload->should_burst_write = false; 187 } 188 189 /* pack commands */ 190 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; 191 cmd_buf->header.sub_type = 0; 192 seq = &cmd_buf->seq[offload->reg_seq_count]; 193 194 if (offload->reg_seq_count) { 195 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) 196 offload->same_addr_count++; 197 else 198 offload->same_addr_count = 0; 199 } 200 201 seq->addr = addr; 202 seq->modify_mask = field_value_mask->mask; 203 seq->modify_value = field_value_mask->value; 204 offload->reg_seq_count++; 205 206 return field_value_mask->value; 207 } 208 209 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, 210 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) 211 { 212 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 213 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 214 215 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; 216 cmd_buf->header.sub_type = 0; 217 cmd_buf->reg_wait.addr = addr; 218 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); 219 cmd_buf->reg_wait.mask = mask; 220 cmd_buf->reg_wait.time_out_us = time_out_us; 221 } 222 223 uint32_t generic_reg_update_ex(const struct dc_context *ctx, 224 uint32_t addr, int n, 225 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 226 ...) 227 { 228 struct dc_reg_value_masks field_value_mask = {0}; 229 uint32_t reg_val; 230 va_list ap; 231 232 va_start(ap, field_value1); 233 234 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 235 field_value1, ap); 236 237 va_end(ap); 238 239 if (ctx->dmub_srv && 240 ctx->dmub_srv->reg_helper_offload.gather_in_progress) 241 return dmub_reg_value_pack(ctx, addr, &field_value_mask); 242 /* todo: return void so we can decouple code running in driver from register states */ 243 244 /* mmio write directly */ 245 reg_val = dm_read_reg(ctx, addr); 246 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 247 dm_write_reg(ctx, addr, reg_val); 248 return reg_val; 249 } 250 251 uint32_t generic_reg_set_ex(const struct dc_context *ctx, 252 uint32_t addr, uint32_t reg_val, int n, 253 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 254 ...) 255 { 256 struct dc_reg_value_masks field_value_mask = {0}; 257 va_list ap; 258 259 va_start(ap, field_value1); 260 261 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 262 field_value1, ap); 263 264 va_end(ap); 265 266 267 /* mmio write directly */ 268 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 269 270 if (ctx->dmub_srv && 271 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 272 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); 273 /* todo: return void so we can decouple code running in driver from register states */ 274 } 275 276 dm_write_reg(ctx, addr, reg_val); 277 return reg_val; 278 } 279 280 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, 281 uint8_t shift, uint32_t mask, uint32_t *field_value) 282 { 283 uint32_t reg_val = dm_read_reg(ctx, addr); 284 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 285 return reg_val; 286 } 287 288 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr, 289 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 290 uint8_t shift2, uint32_t mask2, uint32_t *field_value2) 291 { 292 uint32_t reg_val = dm_read_reg(ctx, addr); 293 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 294 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 295 return reg_val; 296 } 297 298 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr, 299 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 300 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 301 uint8_t shift3, uint32_t mask3, uint32_t *field_value3) 302 { 303 uint32_t reg_val = dm_read_reg(ctx, addr); 304 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 305 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 306 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 307 return reg_val; 308 } 309 310 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr, 311 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 312 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 313 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 314 uint8_t shift4, uint32_t mask4, uint32_t *field_value4) 315 { 316 uint32_t reg_val = dm_read_reg(ctx, addr); 317 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 318 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 319 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 320 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 321 return reg_val; 322 } 323 324 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, 325 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 326 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 327 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 328 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 329 uint8_t shift5, uint32_t mask5, uint32_t *field_value5) 330 { 331 uint32_t reg_val = dm_read_reg(ctx, addr); 332 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 333 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 334 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 335 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 336 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 337 return reg_val; 338 } 339 340 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, 341 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 342 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 343 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 344 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 345 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 346 uint8_t shift6, uint32_t mask6, uint32_t *field_value6) 347 { 348 uint32_t reg_val = dm_read_reg(ctx, addr); 349 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 350 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 351 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 352 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 353 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 354 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 355 return reg_val; 356 } 357 358 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, 359 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 360 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 361 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 362 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 363 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 364 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 365 uint8_t shift7, uint32_t mask7, uint32_t *field_value7) 366 { 367 uint32_t reg_val = dm_read_reg(ctx, addr); 368 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 369 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 370 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 371 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 372 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 373 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 374 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 375 return reg_val; 376 } 377 378 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, 379 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 380 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 381 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 382 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 383 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 384 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 385 uint8_t shift7, uint32_t mask7, uint32_t *field_value7, 386 uint8_t shift8, uint32_t mask8, uint32_t *field_value8) 387 { 388 uint32_t reg_val = dm_read_reg(ctx, addr); 389 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 390 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 391 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 392 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 393 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 394 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 395 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 396 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); 397 return reg_val; 398 } 399 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer 400 * compiler won't be able to check for size match and is prone to stack corruption type of bugs 401 402 uint32_t generic_reg_get(const struct dc_context *ctx, 403 uint32_t addr, int n, ...) 404 { 405 uint32_t shift, mask; 406 uint32_t *field_value; 407 uint32_t reg_val; 408 int i = 0; 409 410 reg_val = dm_read_reg(ctx, addr); 411 412 va_list ap; 413 va_start(ap, n); 414 415 while (i < n) { 416 shift = va_arg(ap, uint32_t); 417 mask = va_arg(ap, uint32_t); 418 field_value = va_arg(ap, uint32_t *); 419 420 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 421 i++; 422 } 423 424 va_end(ap); 425 426 return reg_val; 427 } 428 */ 429 430 void generic_reg_wait(const struct dc_context *ctx, 431 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, 432 unsigned int delay_between_poll_us, unsigned int time_out_num_tries, 433 const char *func_name, int line) 434 { 435 uint32_t field_value; 436 uint32_t reg_val; 437 int i; 438 439 if (ctx->dmub_srv && 440 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 441 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, 442 delay_between_poll_us * time_out_num_tries); 443 return; 444 } 445 446 /* 447 * Something is terribly wrong if time out is > 3000ms. 448 * 3000ms is the maximum time needed for SMU to pass values back. 449 * This value comes from experiments. 450 * 451 */ 452 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); 453 454 for (i = 0; i <= time_out_num_tries; i++) { 455 if (i) { 456 if (delay_between_poll_us >= 1000) 457 msleep(delay_between_poll_us/1000); 458 else if (delay_between_poll_us > 0) 459 udelay(delay_between_poll_us); 460 } 461 462 reg_val = dm_read_reg(ctx, addr); 463 464 field_value = get_reg_field_value_ex(reg_val, mask, shift); 465 466 if (field_value == condition_value) { 467 if (i * delay_between_poll_us > 1000 && 468 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 469 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", 470 delay_between_poll_us * i / 1000, 471 func_name, line); 472 return; 473 } 474 } 475 476 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", 477 delay_between_poll_us, time_out_num_tries, 478 func_name, line); 479 480 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 481 BREAK_TO_DEBUGGER(); 482 } 483 484 void generic_write_indirect_reg(const struct dc_context *ctx, 485 uint32_t addr_index, uint32_t addr_data, 486 uint32_t index, uint32_t data) 487 { 488 dm_write_reg(ctx, addr_index, index); 489 dm_write_reg(ctx, addr_data, data); 490 } 491 492 uint32_t generic_read_indirect_reg(const struct dc_context *ctx, 493 uint32_t addr_index, uint32_t addr_data, 494 uint32_t index) 495 { 496 uint32_t value = 0; 497 498 // when reg read, there should not be any offload. 499 if (ctx->dmub_srv && 500 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 501 ASSERT(false); 502 } 503 504 dm_write_reg(ctx, addr_index, index); 505 value = dm_read_reg(ctx, addr_data); 506 507 return value; 508 } 509 510 uint32_t generic_indirect_reg_get(const struct dc_context *ctx, 511 uint32_t addr_index, uint32_t addr_data, 512 uint32_t index, int n, 513 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 514 ...) 515 { 516 uint32_t shift, mask, *field_value; 517 uint32_t value = 0; 518 int i = 1; 519 520 va_list ap; 521 522 va_start(ap, field_value1); 523 524 value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); 525 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 526 527 while (i < n) { 528 shift = va_arg(ap, uint32_t); 529 mask = va_arg(ap, uint32_t); 530 field_value = va_arg(ap, uint32_t *); 531 532 *field_value = get_reg_field_value_ex(value, mask, shift); 533 i++; 534 } 535 536 va_end(ap); 537 538 return value; 539 } 540 541 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, 542 uint32_t addr_index, uint32_t addr_data, 543 uint32_t index, uint32_t reg_val, int n, 544 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 545 ...) 546 { 547 uint32_t shift, mask, field_value; 548 int i = 1; 549 550 va_list ap; 551 552 va_start(ap, field_value1); 553 554 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 555 556 while (i < n) { 557 shift = va_arg(ap, uint32_t); 558 mask = va_arg(ap, uint32_t); 559 field_value = va_arg(ap, uint32_t); 560 561 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 562 i++; 563 } 564 565 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val); 566 va_end(ap); 567 568 return reg_val; 569 } 570 571 572 uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, 573 uint32_t index, uint32_t reg_val, int n, 574 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 575 ...) 576 { 577 uint32_t shift, mask, field_value; 578 int i = 1; 579 580 va_list ap; 581 582 va_start(ap, field_value1); 583 584 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 585 586 while (i < n) { 587 shift = va_arg(ap, uint32_t); 588 mask = va_arg(ap, uint32_t); 589 field_value = va_arg(ap, uint32_t); 590 591 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 592 i++; 593 } 594 595 dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val); 596 va_end(ap); 597 598 return reg_val; 599 } 600 601 uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, 602 uint32_t index, int n, 603 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 604 ...) 605 { 606 uint32_t shift, mask, *field_value; 607 uint32_t value = 0; 608 int i = 1; 609 610 va_list ap; 611 612 va_start(ap, field_value1); 613 614 value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index); 615 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 616 617 while (i < n) { 618 shift = va_arg(ap, uint32_t); 619 mask = va_arg(ap, uint32_t); 620 field_value = va_arg(ap, uint32_t *); 621 622 *field_value = get_reg_field_value_ex(value, mask, shift); 623 i++; 624 } 625 626 va_end(ap); 627 628 return value; 629 } 630 631 void reg_sequence_start_gather(const struct dc_context *ctx) 632 { 633 /* if reg sequence is supported and enabled, set flag to 634 * indicate we want to have REG_SET, REG_UPDATE macro build 635 * reg sequence command buffer rather than MMIO directly. 636 */ 637 638 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { 639 struct dc_reg_helper_state *offload = 640 &ctx->dmub_srv->reg_helper_offload; 641 642 /* caller sequence mismatch. need to debug caller. offload will not work!!! */ 643 ASSERT(!offload->gather_in_progress); 644 645 offload->gather_in_progress = true; 646 } 647 } 648 649 void reg_sequence_start_execute(const struct dc_context *ctx) 650 { 651 struct dc_reg_helper_state *offload; 652 653 if (!ctx->dmub_srv) 654 return; 655 656 offload = &ctx->dmub_srv->reg_helper_offload; 657 658 if (offload && offload->gather_in_progress) { 659 offload->gather_in_progress = false; 660 offload->should_burst_write = false; 661 switch (offload->cmd_data.cmd_common.header.type) { 662 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: 663 submit_dmub_read_modify_write(offload, ctx); 664 break; 665 case DMUB_CMD__REG_REG_WAIT: 666 submit_dmub_reg_wait(offload, ctx); 667 break; 668 case DMUB_CMD__REG_SEQ_BURST_WRITE: 669 submit_dmub_burst_write(offload, ctx); 670 break; 671 default: 672 return; 673 } 674 } 675 } 676 677 void reg_sequence_wait_done(const struct dc_context *ctx) 678 { 679 /* callback to DM to poll for last submission done*/ 680 struct dc_reg_helper_state *offload; 681 682 if (!ctx->dmub_srv) 683 return; 684 685 offload = &ctx->dmub_srv->reg_helper_offload; 686 687 if (offload && 688 ctx->dc->debug.dmub_offload_enabled && 689 !ctx->dc->debug.dmcub_emulation) { 690 dc_dmub_srv_wait_idle(ctx->dmub_srv); 691 } 692 } 693