1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "../dmub_srv.h" 27 #include "dmub_dcn20.h" 28 #include "dmub_dcn21.h" 29 #include "dmub_cmd.h" 30 #include "dmub_dcn30.h" 31 #include "dmub_dcn301.h" 32 #include "dmub_dcn302.h" 33 #include "dmub_dcn303.h" 34 #include "os_types.h" 35 /* 36 * Note: the DMUB service is standalone. No additional headers should be 37 * added below or above this line unless they reside within the DMUB 38 * folder. 39 */ 40 41 /* Alignment for framebuffer memory. */ 42 #define DMUB_FB_ALIGNMENT (1024 * 1024) 43 44 /* Stack size. */ 45 #define DMUB_STACK_SIZE (128 * 1024) 46 47 /* Context size. */ 48 #define DMUB_CONTEXT_SIZE (512 * 1024) 49 50 /* Mailbox size : Ring buffers are required for both inbox and outbox */ 51 #define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE)) 52 53 /* Default state size if meta is absent. */ 54 #define DMUB_FW_STATE_SIZE (64 * 1024) 55 56 /* Default tracebuffer size if meta is absent. */ 57 #define DMUB_TRACE_BUFFER_SIZE (64 * 1024) 58 59 60 /* Default scratch mem size. */ 61 #define DMUB_SCRATCH_MEM_SIZE (256) 62 63 /* Number of windows in use. */ 64 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) 65 /* Base addresses. */ 66 67 #define DMUB_CW0_BASE (0x60000000) 68 #define DMUB_CW1_BASE (0x61000000) 69 #define DMUB_CW3_BASE (0x63000000) 70 #define DMUB_CW4_BASE (0x64000000) 71 #define DMUB_CW5_BASE (0x65000000) 72 #define DMUB_CW6_BASE (0x66000000) 73 74 #define DMUB_REGION5_BASE (0xA0000000) 75 76 static inline uint32_t dmub_align(uint32_t val, uint32_t factor) 77 { 78 return (val + factor - 1) / factor * factor; 79 } 80 81 void dmub_flush_buffer_mem(const struct dmub_fb *fb) 82 { 83 const uint8_t *base = (const uint8_t *)fb->cpu_addr; 84 uint8_t buf[64]; 85 uint32_t pos, end; 86 87 /** 88 * Read 64-byte chunks since we don't want to store a 89 * large temporary buffer for this purpose. 90 */ 91 end = fb->size / sizeof(buf) * sizeof(buf); 92 93 for (pos = 0; pos < end; pos += sizeof(buf)) 94 dmub_memcpy(buf, base + pos, sizeof(buf)); 95 96 /* Read anything leftover into the buffer. */ 97 if (end < fb->size) 98 dmub_memcpy(buf, base + pos, fb->size - end); 99 } 100 101 static const struct dmub_fw_meta_info * 102 dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) 103 { 104 const union dmub_fw_meta *meta; 105 const uint8_t *blob = NULL; 106 uint32_t blob_size = 0; 107 uint32_t meta_offset = 0; 108 109 if (params->fw_bss_data && params->bss_data_size) { 110 /* Legacy metadata region. */ 111 blob = params->fw_bss_data; 112 blob_size = params->bss_data_size; 113 meta_offset = DMUB_FW_META_OFFSET; 114 } else if (params->fw_inst_const && params->inst_const_size) { 115 /* Combined metadata region. */ 116 blob = params->fw_inst_const; 117 blob_size = params->inst_const_size; 118 meta_offset = 0; 119 } 120 121 if (!blob || !blob_size) 122 return NULL; 123 124 if (blob_size < sizeof(union dmub_fw_meta) + meta_offset) 125 return NULL; 126 127 meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset - 128 sizeof(union dmub_fw_meta)); 129 130 if (meta->info.magic_value != DMUB_FW_META_MAGIC) 131 return NULL; 132 133 return &meta->info; 134 } 135 136 static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) 137 { 138 struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; 139 140 switch (asic) { 141 case DMUB_ASIC_DCN20: 142 case DMUB_ASIC_DCN21: 143 case DMUB_ASIC_DCN30: 144 case DMUB_ASIC_DCN301: 145 case DMUB_ASIC_DCN302: 146 case DMUB_ASIC_DCN303: 147 dmub->regs = &dmub_srv_dcn20_regs; 148 149 funcs->reset = dmub_dcn20_reset; 150 funcs->reset_release = dmub_dcn20_reset_release; 151 funcs->backdoor_load = dmub_dcn20_backdoor_load; 152 funcs->setup_windows = dmub_dcn20_setup_windows; 153 funcs->setup_mailbox = dmub_dcn20_setup_mailbox; 154 funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; 155 funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; 156 funcs->is_supported = dmub_dcn20_is_supported; 157 funcs->is_hw_init = dmub_dcn20_is_hw_init; 158 funcs->set_gpint = dmub_dcn20_set_gpint; 159 funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked; 160 funcs->get_gpint_response = dmub_dcn20_get_gpint_response; 161 funcs->get_fw_status = dmub_dcn20_get_fw_boot_status; 162 funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options; 163 funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence; 164 funcs->get_current_time = dmub_dcn20_get_current_time; 165 166 // Out mailbox register access functions for RN and above 167 funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox; 168 funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr; 169 funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr; 170 171 //outbox0 call stacks 172 funcs->setup_outbox0 = dmub_dcn20_setup_outbox0; 173 funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr; 174 funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr; 175 176 if (asic == DMUB_ASIC_DCN21) { 177 dmub->regs = &dmub_srv_dcn21_regs; 178 179 funcs->is_phy_init = dmub_dcn21_is_phy_init; 180 } 181 if (asic == DMUB_ASIC_DCN30) { 182 dmub->regs = &dmub_srv_dcn30_regs; 183 184 funcs->backdoor_load = dmub_dcn30_backdoor_load; 185 funcs->setup_windows = dmub_dcn30_setup_windows; 186 } 187 if (asic == DMUB_ASIC_DCN301) { 188 dmub->regs = &dmub_srv_dcn301_regs; 189 190 funcs->backdoor_load = dmub_dcn30_backdoor_load; 191 funcs->setup_windows = dmub_dcn30_setup_windows; 192 } 193 if (asic == DMUB_ASIC_DCN302) { 194 dmub->regs = &dmub_srv_dcn302_regs; 195 196 funcs->backdoor_load = dmub_dcn30_backdoor_load; 197 funcs->setup_windows = dmub_dcn30_setup_windows; 198 } 199 if (asic == DMUB_ASIC_DCN303) { 200 dmub->regs = &dmub_srv_dcn303_regs; 201 202 funcs->backdoor_load = dmub_dcn30_backdoor_load; 203 funcs->setup_windows = dmub_dcn30_setup_windows; 204 } 205 break; 206 207 default: 208 return false; 209 } 210 211 return true; 212 } 213 214 enum dmub_status dmub_srv_create(struct dmub_srv *dmub, 215 const struct dmub_srv_create_params *params) 216 { 217 enum dmub_status status = DMUB_STATUS_OK; 218 219 dmub_memset(dmub, 0, sizeof(*dmub)); 220 221 dmub->funcs = params->funcs; 222 dmub->user_ctx = params->user_ctx; 223 dmub->asic = params->asic; 224 dmub->fw_version = params->fw_version; 225 dmub->is_virtual = params->is_virtual; 226 227 /* Setup asic dependent hardware funcs. */ 228 if (!dmub_srv_hw_setup(dmub, params->asic)) { 229 status = DMUB_STATUS_INVALID; 230 goto cleanup; 231 } 232 233 /* Override (some) hardware funcs based on user params. */ 234 if (params->hw_funcs) { 235 if (params->hw_funcs->emul_get_inbox1_rptr) 236 dmub->hw_funcs.emul_get_inbox1_rptr = 237 params->hw_funcs->emul_get_inbox1_rptr; 238 239 if (params->hw_funcs->emul_set_inbox1_wptr) 240 dmub->hw_funcs.emul_set_inbox1_wptr = 241 params->hw_funcs->emul_set_inbox1_wptr; 242 243 if (params->hw_funcs->is_supported) 244 dmub->hw_funcs.is_supported = 245 params->hw_funcs->is_supported; 246 } 247 248 /* Sanity checks for required hw func pointers. */ 249 if (!dmub->hw_funcs.get_inbox1_rptr || 250 !dmub->hw_funcs.set_inbox1_wptr) { 251 status = DMUB_STATUS_INVALID; 252 goto cleanup; 253 } 254 255 cleanup: 256 if (status == DMUB_STATUS_OK) 257 dmub->sw_init = true; 258 else 259 dmub_srv_destroy(dmub); 260 261 return status; 262 } 263 264 void dmub_srv_destroy(struct dmub_srv *dmub) 265 { 266 dmub_memset(dmub, 0, sizeof(*dmub)); 267 } 268 269 enum dmub_status 270 dmub_srv_calc_region_info(struct dmub_srv *dmub, 271 const struct dmub_srv_region_params *params, 272 struct dmub_srv_region_info *out) 273 { 274 struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; 275 struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; 276 struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; 277 struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; 278 struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; 279 struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; 280 struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; 281 struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; 282 const struct dmub_fw_meta_info *fw_info; 283 uint32_t fw_state_size = DMUB_FW_STATE_SIZE; 284 uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; 285 uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; 286 287 if (!dmub->sw_init) 288 return DMUB_STATUS_INVALID; 289 290 memset(out, 0, sizeof(*out)); 291 292 out->num_regions = DMUB_NUM_WINDOWS; 293 294 inst->base = 0x0; 295 inst->top = inst->base + params->inst_const_size; 296 297 data->base = dmub_align(inst->top, 256); 298 data->top = data->base + params->bss_data_size; 299 300 /* 301 * All cache windows below should be aligned to the size 302 * of the DMCUB cache line, 64 bytes. 303 */ 304 305 stack->base = dmub_align(data->top, 256); 306 stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; 307 308 bios->base = dmub_align(stack->top, 256); 309 bios->top = bios->base + params->vbios_size; 310 311 mail->base = dmub_align(bios->top, 256); 312 mail->top = mail->base + DMUB_MAILBOX_SIZE; 313 314 fw_info = dmub_get_fw_meta_info(params); 315 316 if (fw_info) { 317 fw_state_size = fw_info->fw_region_size; 318 trace_buffer_size = fw_info->trace_buffer_size; 319 320 /** 321 * If DM didn't fill in a version, then fill it in based on 322 * the firmware meta now that we have it. 323 * 324 * TODO: Make it easier for driver to extract this out to 325 * pass during creation. 326 */ 327 if (dmub->fw_version == 0) 328 dmub->fw_version = fw_info->fw_version; 329 } 330 331 trace_buff->base = dmub_align(mail->top, 256); 332 trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); 333 334 fw_state->base = dmub_align(trace_buff->top, 256); 335 fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); 336 337 scratch_mem->base = dmub_align(fw_state->top, 256); 338 scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); 339 340 out->fb_size = dmub_align(scratch_mem->top, 4096); 341 342 return DMUB_STATUS_OK; 343 } 344 345 enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, 346 const struct dmub_srv_fb_params *params, 347 struct dmub_srv_fb_info *out) 348 { 349 uint8_t *cpu_base; 350 uint64_t gpu_base; 351 uint32_t i; 352 353 if (!dmub->sw_init) 354 return DMUB_STATUS_INVALID; 355 356 memset(out, 0, sizeof(*out)); 357 358 if (params->region_info->num_regions != DMUB_NUM_WINDOWS) 359 return DMUB_STATUS_INVALID; 360 361 cpu_base = (uint8_t *)params->cpu_addr; 362 gpu_base = params->gpu_addr; 363 364 for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { 365 const struct dmub_region *reg = 366 ¶ms->region_info->regions[i]; 367 368 out->fb[i].cpu_addr = cpu_base + reg->base; 369 out->fb[i].gpu_addr = gpu_base + reg->base; 370 out->fb[i].size = reg->top - reg->base; 371 } 372 373 out->num_fb = DMUB_NUM_WINDOWS; 374 375 return DMUB_STATUS_OK; 376 } 377 378 enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, 379 bool *is_supported) 380 { 381 *is_supported = false; 382 383 if (!dmub->sw_init) 384 return DMUB_STATUS_INVALID; 385 386 if (dmub->hw_funcs.is_supported) 387 *is_supported = dmub->hw_funcs.is_supported(dmub); 388 389 return DMUB_STATUS_OK; 390 } 391 392 enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) 393 { 394 *is_hw_init = false; 395 396 if (!dmub->sw_init) 397 return DMUB_STATUS_INVALID; 398 399 if (!dmub->hw_init) 400 return DMUB_STATUS_OK; 401 402 if (dmub->hw_funcs.is_hw_init) 403 *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); 404 405 return DMUB_STATUS_OK; 406 } 407 408 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, 409 const struct dmub_srv_hw_params *params) 410 { 411 struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; 412 struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; 413 struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; 414 struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; 415 struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; 416 struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; 417 struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; 418 struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; 419 420 struct dmub_rb_init_params rb_params, outbox0_rb_params; 421 struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; 422 struct dmub_region inbox1, outbox1, outbox0; 423 424 if (!dmub->sw_init) 425 return DMUB_STATUS_INVALID; 426 427 if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || 428 !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { 429 ASSERT(0); 430 return DMUB_STATUS_INVALID; 431 } 432 433 dmub->fb_base = params->fb_base; 434 dmub->fb_offset = params->fb_offset; 435 dmub->psp_version = params->psp_version; 436 437 if (dmub->hw_funcs.reset) 438 dmub->hw_funcs.reset(dmub); 439 440 cw0.offset.quad_part = inst_fb->gpu_addr; 441 cw0.region.base = DMUB_CW0_BASE; 442 cw0.region.top = cw0.region.base + inst_fb->size - 1; 443 444 cw1.offset.quad_part = stack_fb->gpu_addr; 445 cw1.region.base = DMUB_CW1_BASE; 446 cw1.region.top = cw1.region.base + stack_fb->size - 1; 447 448 if (params->load_inst_const && dmub->hw_funcs.backdoor_load) { 449 /** 450 * Read back all the instruction memory so we don't hang the 451 * DMCUB when backdoor loading if the write from x86 hasn't been 452 * flushed yet. This only occurs in backdoor loading. 453 */ 454 dmub_flush_buffer_mem(inst_fb); 455 dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); 456 } 457 458 cw2.offset.quad_part = data_fb->gpu_addr; 459 cw2.region.base = DMUB_CW0_BASE + inst_fb->size; 460 cw2.region.top = cw2.region.base + data_fb->size; 461 462 cw3.offset.quad_part = bios_fb->gpu_addr; 463 cw3.region.base = DMUB_CW3_BASE; 464 cw3.region.top = cw3.region.base + bios_fb->size; 465 466 cw4.offset.quad_part = mail_fb->gpu_addr; 467 cw4.region.base = DMUB_CW4_BASE; 468 cw4.region.top = cw4.region.base + mail_fb->size; 469 470 /** 471 * Doubled the mailbox region to accomodate inbox and outbox. 472 * Note: Currently, currently total mailbox size is 16KB. It is split 473 * equally into 8KB between inbox and outbox. If this config is 474 * changed, then uncached base address configuration of outbox1 475 * has to be updated in funcs->setup_out_mailbox. 476 */ 477 inbox1.base = cw4.region.base; 478 inbox1.top = cw4.region.base + DMUB_RB_SIZE; 479 outbox1.base = inbox1.top; 480 outbox1.top = cw4.region.top; 481 482 cw5.offset.quad_part = tracebuff_fb->gpu_addr; 483 cw5.region.base = DMUB_CW5_BASE; 484 cw5.region.top = cw5.region.base + tracebuff_fb->size; 485 486 outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET; 487 outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET; 488 489 cw6.offset.quad_part = fw_state_fb->gpu_addr; 490 cw6.region.base = DMUB_CW6_BASE; 491 cw6.region.top = cw6.region.base + fw_state_fb->size; 492 493 dmub->fw_state = fw_state_fb->cpu_addr; 494 495 dmub->scratch_mem_fb = *scratch_mem_fb; 496 497 if (dmub->hw_funcs.setup_windows) 498 dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); 499 500 if (dmub->hw_funcs.setup_outbox0) 501 dmub->hw_funcs.setup_outbox0(dmub, &outbox0); 502 503 if (dmub->hw_funcs.setup_mailbox) 504 dmub->hw_funcs.setup_mailbox(dmub, &inbox1); 505 if (dmub->hw_funcs.setup_out_mailbox) 506 dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1); 507 508 dmub_memset(&rb_params, 0, sizeof(rb_params)); 509 rb_params.ctx = dmub; 510 rb_params.base_address = mail_fb->cpu_addr; 511 rb_params.capacity = DMUB_RB_SIZE; 512 dmub_rb_init(&dmub->inbox1_rb, &rb_params); 513 514 // Initialize outbox1 ring buffer 515 rb_params.ctx = dmub; 516 rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE); 517 rb_params.capacity = DMUB_RB_SIZE; 518 dmub_rb_init(&dmub->outbox1_rb, &rb_params); 519 520 dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params)); 521 outbox0_rb_params.ctx = dmub; 522 outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET); 523 outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64); 524 dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params); 525 526 /* Report to DMUB what features are supported by current driver */ 527 if (dmub->hw_funcs.enable_dmub_boot_options) 528 dmub->hw_funcs.enable_dmub_boot_options(dmub, params); 529 530 if (dmub->hw_funcs.reset_release) 531 dmub->hw_funcs.reset_release(dmub); 532 533 dmub->hw_init = true; 534 535 return DMUB_STATUS_OK; 536 } 537 538 enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) 539 { 540 if (!dmub->sw_init) 541 return DMUB_STATUS_INVALID; 542 543 if (dmub->hw_funcs.reset) 544 dmub->hw_funcs.reset(dmub); 545 546 dmub->hw_init = false; 547 548 return DMUB_STATUS_OK; 549 } 550 551 enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, 552 const union dmub_rb_cmd *cmd) 553 { 554 if (!dmub->hw_init) 555 return DMUB_STATUS_INVALID; 556 557 if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) 558 return DMUB_STATUS_OK; 559 560 return DMUB_STATUS_QUEUE_FULL; 561 } 562 563 enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) 564 { 565 if (!dmub->hw_init) 566 return DMUB_STATUS_INVALID; 567 568 /** 569 * Read back all the queued commands to ensure that they've 570 * been flushed to framebuffer memory. Otherwise DMCUB might 571 * read back stale, fully invalid or partially invalid data. 572 */ 573 dmub_rb_flush_pending(&dmub->inbox1_rb); 574 575 dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); 576 return DMUB_STATUS_OK; 577 } 578 579 enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, 580 uint32_t timeout_us) 581 { 582 uint32_t i; 583 584 if (!dmub->hw_init) 585 return DMUB_STATUS_INVALID; 586 587 for (i = 0; i <= timeout_us; i += 100) { 588 union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub); 589 590 if (status.bits.dal_fw && status.bits.mailbox_rdy) 591 return DMUB_STATUS_OK; 592 593 udelay(100); 594 } 595 596 return DMUB_STATUS_TIMEOUT; 597 } 598 599 enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, 600 uint32_t timeout_us) 601 { 602 uint32_t i = 0; 603 604 if (!dmub->hw_init) 605 return DMUB_STATUS_INVALID; 606 607 if (!dmub->hw_funcs.is_phy_init) 608 return DMUB_STATUS_OK; 609 610 for (i = 0; i <= timeout_us; i += 10) { 611 if (dmub->hw_funcs.is_phy_init(dmub)) 612 return DMUB_STATUS_OK; 613 614 udelay(10); 615 } 616 617 return DMUB_STATUS_TIMEOUT; 618 } 619 620 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, 621 uint32_t timeout_us) 622 { 623 uint32_t i; 624 625 if (!dmub->hw_init) 626 return DMUB_STATUS_INVALID; 627 628 for (i = 0; i <= timeout_us; ++i) { 629 dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); 630 if (dmub_rb_empty(&dmub->inbox1_rb)) 631 return DMUB_STATUS_OK; 632 633 udelay(1); 634 } 635 636 return DMUB_STATUS_TIMEOUT; 637 } 638 639 enum dmub_status 640 dmub_srv_send_gpint_command(struct dmub_srv *dmub, 641 enum dmub_gpint_command command_code, 642 uint16_t param, uint32_t timeout_us) 643 { 644 union dmub_gpint_data_register reg; 645 uint32_t i; 646 647 if (!dmub->sw_init) 648 return DMUB_STATUS_INVALID; 649 650 if (!dmub->hw_funcs.set_gpint) 651 return DMUB_STATUS_INVALID; 652 653 if (!dmub->hw_funcs.is_gpint_acked) 654 return DMUB_STATUS_INVALID; 655 656 reg.bits.status = 1; 657 reg.bits.command_code = command_code; 658 reg.bits.param = param; 659 660 dmub->hw_funcs.set_gpint(dmub, reg); 661 662 for (i = 0; i < timeout_us; ++i) { 663 udelay(1); 664 665 if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) 666 return DMUB_STATUS_OK; 667 } 668 669 return DMUB_STATUS_TIMEOUT; 670 } 671 672 enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, 673 uint32_t *response) 674 { 675 *response = 0; 676 677 if (!dmub->sw_init) 678 return DMUB_STATUS_INVALID; 679 680 if (!dmub->hw_funcs.get_gpint_response) 681 return DMUB_STATUS_INVALID; 682 683 *response = dmub->hw_funcs.get_gpint_response(dmub); 684 685 return DMUB_STATUS_OK; 686 } 687 688 enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, 689 union dmub_fw_boot_status *status) 690 { 691 status->all = 0; 692 693 if (!dmub->sw_init) 694 return DMUB_STATUS_INVALID; 695 696 if (dmub->hw_funcs.get_fw_status) 697 *status = dmub->hw_funcs.get_fw_status(dmub); 698 699 return DMUB_STATUS_OK; 700 } 701 702 enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, 703 union dmub_rb_cmd *cmd) 704 { 705 enum dmub_status status = DMUB_STATUS_OK; 706 707 // Queue command 708 status = dmub_srv_cmd_queue(dmub, cmd); 709 710 if (status != DMUB_STATUS_OK) 711 return status; 712 713 // Execute command 714 status = dmub_srv_cmd_execute(dmub); 715 716 if (status != DMUB_STATUS_OK) 717 return status; 718 719 // Wait for DMUB to process command 720 status = dmub_srv_wait_for_idle(dmub, 100000); 721 722 if (status != DMUB_STATUS_OK) 723 return status; 724 725 // Copy data back from ring buffer into command 726 dmub_rb_get_return_data(&dmub->inbox1_rb, cmd); 727 728 return status; 729 } 730 731 static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, 732 void *entry) 733 { 734 const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t); 735 uint64_t *dst = (uint64_t *)entry; 736 uint8_t i; 737 uint8_t loop_count; 738 739 if (rb->rptr == rb->wrpt) 740 return false; 741 742 loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t); 743 // copying data 744 for (i = 0; i < loop_count; i++) 745 *dst++ = *src++; 746 747 rb->rptr += sizeof(struct dmcub_trace_buf_entry); 748 749 rb->rptr %= rb->capacity; 750 751 return true; 752 } 753 754 bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry) 755 { 756 dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub); 757 758 return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); 759 } 760