1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 /* 24 * dc_helper.c 25 * 26 * Created on: Aug 30, 2016 27 * Author: agrodzov 28 */ 29 30 #include <linux/delay.h> 31 32 #include "dm_services.h" 33 #include <stdarg.h> 34 35 #include "dc.h" 36 #include "dc_dmub_srv.h" 37 38 static inline void submit_dmub_read_modify_write( 39 struct dc_reg_helper_state *offload, 40 const struct dc_context *ctx) 41 { 42 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 43 bool gather = false; 44 45 offload->should_burst_write = 46 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); 47 cmd_buf->header.payload_bytes = 48 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; 49 50 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 51 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 52 53 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 54 55 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 56 57 memset(cmd_buf, 0, sizeof(*cmd_buf)); 58 59 offload->reg_seq_count = 0; 60 offload->same_addr_count = 0; 61 } 62 63 static inline void submit_dmub_burst_write( 64 struct dc_reg_helper_state *offload, 65 const struct dc_context *ctx) 66 { 67 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 68 bool gather = false; 69 70 cmd_buf->header.payload_bytes = 71 sizeof(uint32_t) * offload->reg_seq_count; 72 73 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 74 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 75 76 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 77 78 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 79 80 memset(cmd_buf, 0, sizeof(*cmd_buf)); 81 82 offload->reg_seq_count = 0; 83 } 84 85 static inline void submit_dmub_reg_wait( 86 struct dc_reg_helper_state *offload, 87 const struct dc_context *ctx) 88 { 89 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 90 bool gather = false; 91 92 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 93 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 94 95 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 96 97 memset(cmd_buf, 0, sizeof(*cmd_buf)); 98 offload->reg_seq_count = 0; 99 100 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 101 } 102 103 struct dc_reg_value_masks { 104 uint32_t value; 105 uint32_t mask; 106 }; 107 108 struct dc_reg_sequence { 109 uint32_t addr; 110 struct dc_reg_value_masks value_masks; 111 }; 112 113 static inline void set_reg_field_value_masks( 114 struct dc_reg_value_masks *field_value_mask, 115 uint32_t value, 116 uint32_t mask, 117 uint8_t shift) 118 { 119 ASSERT(mask != 0); 120 121 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); 122 field_value_mask->mask = field_value_mask->mask | mask; 123 } 124 125 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, 126 uint32_t addr, int n, 127 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 128 va_list ap) 129 { 130 uint32_t shift, mask, field_value; 131 int i = 1; 132 133 /* gather all bits value/mask getting updated in this register */ 134 set_reg_field_value_masks(field_value_mask, 135 field_value1, mask1, shift1); 136 137 while (i < n) { 138 shift = va_arg(ap, uint32_t); 139 mask = va_arg(ap, uint32_t); 140 field_value = va_arg(ap, uint32_t); 141 142 set_reg_field_value_masks(field_value_mask, 143 field_value, mask, shift); 144 i++; 145 } 146 } 147 148 static void dmub_flush_buffer_execute( 149 struct dc_reg_helper_state *offload, 150 const struct dc_context *ctx) 151 { 152 submit_dmub_read_modify_write(offload, ctx); 153 dc_dmub_srv_cmd_execute(ctx->dmub_srv); 154 } 155 156 static void dmub_flush_burst_write_buffer_execute( 157 struct dc_reg_helper_state *offload, 158 const struct dc_context *ctx) 159 { 160 submit_dmub_burst_write(offload, ctx); 161 dc_dmub_srv_cmd_execute(ctx->dmub_srv); 162 } 163 164 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, 165 uint32_t reg_val) 166 { 167 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 168 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 169 170 /* flush command if buffer is full */ 171 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) 172 dmub_flush_burst_write_buffer_execute(offload, ctx); 173 174 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && 175 addr != cmd_buf->addr) { 176 dmub_flush_burst_write_buffer_execute(offload, ctx); 177 return false; 178 } 179 180 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; 181 cmd_buf->header.sub_type = 0; 182 cmd_buf->addr = addr; 183 cmd_buf->write_values[offload->reg_seq_count] = reg_val; 184 offload->reg_seq_count++; 185 186 return true; 187 } 188 189 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, 190 struct dc_reg_value_masks *field_value_mask) 191 { 192 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 193 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 194 struct dmub_cmd_read_modify_write_sequence *seq; 195 196 /* flush command if buffer is full */ 197 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && 198 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) 199 dmub_flush_buffer_execute(offload, ctx); 200 201 if (offload->should_burst_write) { 202 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) 203 return field_value_mask->value; 204 else 205 offload->should_burst_write = false; 206 } 207 208 /* pack commands */ 209 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; 210 cmd_buf->header.sub_type = 0; 211 seq = &cmd_buf->seq[offload->reg_seq_count]; 212 213 if (offload->reg_seq_count) { 214 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) 215 offload->same_addr_count++; 216 else 217 offload->same_addr_count = 0; 218 } 219 220 seq->addr = addr; 221 seq->modify_mask = field_value_mask->mask; 222 seq->modify_value = field_value_mask->value; 223 offload->reg_seq_count++; 224 225 return field_value_mask->value; 226 } 227 228 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, 229 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) 230 { 231 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 232 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 233 234 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; 235 cmd_buf->header.sub_type = 0; 236 cmd_buf->reg_wait.addr = addr; 237 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); 238 cmd_buf->reg_wait.mask = mask; 239 cmd_buf->reg_wait.time_out_us = time_out_us; 240 } 241 242 uint32_t generic_reg_update_ex(const struct dc_context *ctx, 243 uint32_t addr, int n, 244 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 245 ...) 246 { 247 struct dc_reg_value_masks field_value_mask = {0}; 248 uint32_t reg_val; 249 va_list ap; 250 251 va_start(ap, field_value1); 252 253 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 254 field_value1, ap); 255 256 va_end(ap); 257 258 if (ctx->dmub_srv && 259 ctx->dmub_srv->reg_helper_offload.gather_in_progress) 260 return dmub_reg_value_pack(ctx, addr, &field_value_mask); 261 /* todo: return void so we can decouple code running in driver from register states */ 262 263 /* mmio write directly */ 264 reg_val = dm_read_reg(ctx, addr); 265 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 266 dm_write_reg(ctx, addr, reg_val); 267 return reg_val; 268 } 269 270 uint32_t generic_reg_set_ex(const struct dc_context *ctx, 271 uint32_t addr, uint32_t reg_val, int n, 272 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 273 ...) 274 { 275 struct dc_reg_value_masks field_value_mask = {0}; 276 va_list ap; 277 278 va_start(ap, field_value1); 279 280 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 281 field_value1, ap); 282 283 va_end(ap); 284 285 286 /* mmio write directly */ 287 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 288 289 if (ctx->dmub_srv && 290 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 291 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); 292 /* todo: return void so we can decouple code running in driver from register states */ 293 } 294 295 dm_write_reg(ctx, addr, reg_val); 296 return reg_val; 297 } 298 299 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, 300 uint8_t shift, uint32_t mask, uint32_t *field_value) 301 { 302 uint32_t reg_val = dm_read_reg(ctx, addr); 303 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 304 return reg_val; 305 } 306 307 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr, 308 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 309 uint8_t shift2, uint32_t mask2, uint32_t *field_value2) 310 { 311 uint32_t reg_val = dm_read_reg(ctx, addr); 312 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 313 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 314 return reg_val; 315 } 316 317 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr, 318 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 319 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 320 uint8_t shift3, uint32_t mask3, uint32_t *field_value3) 321 { 322 uint32_t reg_val = dm_read_reg(ctx, addr); 323 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 324 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 325 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 326 return reg_val; 327 } 328 329 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr, 330 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 331 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 332 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 333 uint8_t shift4, uint32_t mask4, uint32_t *field_value4) 334 { 335 uint32_t reg_val = dm_read_reg(ctx, addr); 336 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 337 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 338 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 339 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 340 return reg_val; 341 } 342 343 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, 344 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 345 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 346 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 347 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 348 uint8_t shift5, uint32_t mask5, uint32_t *field_value5) 349 { 350 uint32_t reg_val = dm_read_reg(ctx, addr); 351 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 352 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 353 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 354 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 355 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 356 return reg_val; 357 } 358 359 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, 360 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 361 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 362 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 363 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 364 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 365 uint8_t shift6, uint32_t mask6, uint32_t *field_value6) 366 { 367 uint32_t reg_val = dm_read_reg(ctx, addr); 368 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 369 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 370 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 371 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 372 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 373 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 374 return reg_val; 375 } 376 377 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, 378 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 379 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 380 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 381 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 382 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 383 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 384 uint8_t shift7, uint32_t mask7, uint32_t *field_value7) 385 { 386 uint32_t reg_val = dm_read_reg(ctx, addr); 387 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 388 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 389 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 390 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 391 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 392 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 393 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 394 return reg_val; 395 } 396 397 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, 398 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 399 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 400 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 401 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 402 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 403 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 404 uint8_t shift7, uint32_t mask7, uint32_t *field_value7, 405 uint8_t shift8, uint32_t mask8, uint32_t *field_value8) 406 { 407 uint32_t reg_val = dm_read_reg(ctx, addr); 408 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 409 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 410 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 411 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 412 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 413 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 414 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 415 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); 416 return reg_val; 417 } 418 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer 419 * compiler won't be able to check for size match and is prone to stack corruption type of bugs 420 421 uint32_t generic_reg_get(const struct dc_context *ctx, 422 uint32_t addr, int n, ...) 423 { 424 uint32_t shift, mask; 425 uint32_t *field_value; 426 uint32_t reg_val; 427 int i = 0; 428 429 reg_val = dm_read_reg(ctx, addr); 430 431 va_list ap; 432 va_start(ap, n); 433 434 while (i < n) { 435 shift = va_arg(ap, uint32_t); 436 mask = va_arg(ap, uint32_t); 437 field_value = va_arg(ap, uint32_t *); 438 439 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 440 i++; 441 } 442 443 va_end(ap); 444 445 return reg_val; 446 } 447 */ 448 449 void generic_reg_wait(const struct dc_context *ctx, 450 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, 451 unsigned int delay_between_poll_us, unsigned int time_out_num_tries, 452 const char *func_name, int line) 453 { 454 uint32_t field_value; 455 uint32_t reg_val; 456 int i; 457 458 if (ctx->dmub_srv && 459 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 460 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, 461 delay_between_poll_us * time_out_num_tries); 462 return; 463 } 464 465 /* 466 * Something is terribly wrong if time out is > 3000ms. 467 * 3000ms is the maximum time needed for SMU to pass values back. 468 * This value comes from experiments. 469 * 470 */ 471 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); 472 473 for (i = 0; i <= time_out_num_tries; i++) { 474 if (i) { 475 if (delay_between_poll_us >= 1000) 476 msleep(delay_between_poll_us/1000); 477 else if (delay_between_poll_us > 0) 478 udelay(delay_between_poll_us); 479 } 480 481 reg_val = dm_read_reg(ctx, addr); 482 483 field_value = get_reg_field_value_ex(reg_val, mask, shift); 484 485 if (field_value == condition_value) { 486 if (i * delay_between_poll_us > 1000 && 487 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 488 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", 489 delay_between_poll_us * i / 1000, 490 func_name, line); 491 return; 492 } 493 } 494 495 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", 496 delay_between_poll_us, time_out_num_tries, 497 func_name, line); 498 499 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 500 BREAK_TO_DEBUGGER(); 501 } 502 503 void generic_write_indirect_reg(const struct dc_context *ctx, 504 uint32_t addr_index, uint32_t addr_data, 505 uint32_t index, uint32_t data) 506 { 507 dm_write_reg(ctx, addr_index, index); 508 dm_write_reg(ctx, addr_data, data); 509 } 510 511 uint32_t generic_read_indirect_reg(const struct dc_context *ctx, 512 uint32_t addr_index, uint32_t addr_data, 513 uint32_t index) 514 { 515 uint32_t value = 0; 516 517 // when reg read, there should not be any offload. 518 if (ctx->dmub_srv && 519 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 520 ASSERT(false); 521 } 522 523 dm_write_reg(ctx, addr_index, index); 524 value = dm_read_reg(ctx, addr_data); 525 526 return value; 527 } 528 529 uint32_t generic_indirect_reg_get(const struct dc_context *ctx, 530 uint32_t addr_index, uint32_t addr_data, 531 uint32_t index, int n, 532 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 533 ...) 534 { 535 uint32_t shift, mask, *field_value; 536 uint32_t value = 0; 537 int i = 1; 538 539 va_list ap; 540 541 va_start(ap, field_value1); 542 543 value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); 544 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 545 546 while (i < n) { 547 shift = va_arg(ap, uint32_t); 548 mask = va_arg(ap, uint32_t); 549 field_value = va_arg(ap, uint32_t *); 550 551 *field_value = get_reg_field_value_ex(value, mask, shift); 552 i++; 553 } 554 555 va_end(ap); 556 557 return value; 558 } 559 560 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, 561 uint32_t addr_index, uint32_t addr_data, 562 uint32_t index, uint32_t reg_val, int n, 563 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 564 ...) 565 { 566 uint32_t shift, mask, field_value; 567 int i = 1; 568 569 va_list ap; 570 571 va_start(ap, field_value1); 572 573 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 574 575 while (i < n) { 576 shift = va_arg(ap, uint32_t); 577 mask = va_arg(ap, uint32_t); 578 field_value = va_arg(ap, uint32_t); 579 580 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 581 i++; 582 } 583 584 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val); 585 va_end(ap); 586 587 return reg_val; 588 } 589 590 void reg_sequence_start_gather(const struct dc_context *ctx) 591 { 592 /* if reg sequence is supported and enabled, set flag to 593 * indicate we want to have REG_SET, REG_UPDATE macro build 594 * reg sequence command buffer rather than MMIO directly. 595 */ 596 597 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { 598 struct dc_reg_helper_state *offload = 599 &ctx->dmub_srv->reg_helper_offload; 600 601 /* caller sequence mismatch. need to debug caller. offload will not work!!! */ 602 ASSERT(!offload->gather_in_progress); 603 604 offload->gather_in_progress = true; 605 } 606 } 607 608 void reg_sequence_start_execute(const struct dc_context *ctx) 609 { 610 struct dc_reg_helper_state *offload; 611 612 if (!ctx->dmub_srv) 613 return; 614 615 offload = &ctx->dmub_srv->reg_helper_offload; 616 617 if (offload && offload->gather_in_progress) { 618 offload->gather_in_progress = false; 619 offload->should_burst_write = false; 620 switch (offload->cmd_data.cmd_common.header.type) { 621 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: 622 submit_dmub_read_modify_write(offload, ctx); 623 break; 624 case DMUB_CMD__REG_REG_WAIT: 625 submit_dmub_reg_wait(offload, ctx); 626 break; 627 case DMUB_CMD__REG_SEQ_BURST_WRITE: 628 submit_dmub_burst_write(offload, ctx); 629 break; 630 default: 631 return; 632 } 633 634 dc_dmub_srv_cmd_execute(ctx->dmub_srv); 635 } 636 } 637 638 void reg_sequence_wait_done(const struct dc_context *ctx) 639 { 640 /* callback to DM to poll for last submission done*/ 641 struct dc_reg_helper_state *offload; 642 643 if (!ctx->dmub_srv) 644 return; 645 646 offload = &ctx->dmub_srv->reg_helper_offload; 647 648 if (offload && 649 ctx->dc->debug.dmub_offload_enabled && 650 !ctx->dc->debug.dmcub_emulation) { 651 dc_dmub_srv_wait_idle(ctx->dmub_srv); 652 } 653 } 654