1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "../inc/dmub_srv.h" 27 #include "dmub_dcn20.h" 28 #include "dmub_dcn21.h" 29 #include "dmub_fw_meta.h" 30 #include "os_types.h" 31 /* 32 * Note: the DMUB service is standalone. No additional headers should be 33 * added below or above this line unless they reside within the DMUB 34 * folder. 35 */ 36 37 /* Alignment for framebuffer memory. */ 38 #define DMUB_FB_ALIGNMENT (1024 * 1024) 39 40 /* Stack size. */ 41 #define DMUB_STACK_SIZE (128 * 1024) 42 43 /* Context size. */ 44 #define DMUB_CONTEXT_SIZE (512 * 1024) 45 46 /* Mailbox size */ 47 #define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) 48 49 /* Default state size if meta is absent. */ 50 #define DMUB_FW_STATE_SIZE (1024) 51 52 /* Default tracebuffer size if meta is absent. */ 53 #define DMUB_TRACE_BUFFER_SIZE (1024) 54 55 /* Default scratch mem size. */ 56 #define DMUB_SCRATCH_MEM_SIZE (256) 57 58 /* Number of windows in use. */ 59 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) 60 /* Base addresses. */ 61 62 #define DMUB_CW0_BASE (0x60000000) 63 #define DMUB_CW1_BASE (0x61000000) 64 #define DMUB_CW3_BASE (0x63000000) 65 #define DMUB_CW5_BASE (0x65000000) 66 #define DMUB_CW6_BASE (0x66000000) 67 68 static inline uint32_t dmub_align(uint32_t val, uint32_t factor) 69 { 70 return (val + factor - 1) / factor * factor; 71 } 72 73 static void dmub_flush_buffer_mem(const struct dmub_fb *fb) 74 { 75 const uint8_t *base = (const uint8_t *)fb->cpu_addr; 76 uint8_t buf[64]; 77 uint32_t pos, end; 78 79 /** 80 * Read 64-byte chunks since we don't want to store a 81 * large temporary buffer for this purpose. 82 */ 83 end = fb->size / sizeof(buf) * sizeof(buf); 84 85 for (pos = 0; pos < end; pos += sizeof(buf)) 86 dmub_memcpy(buf, base + pos, sizeof(buf)); 87 88 /* Read anything leftover into the buffer. */ 89 if (end < fb->size) 90 dmub_memcpy(buf, base + pos, fb->size - end); 91 } 92 93 static const struct dmub_fw_meta_info * 94 dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size) 95 { 96 const union dmub_fw_meta *meta; 97 98 if (fw_bss_data == NULL) 99 return NULL; 100 101 if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET) 102 return NULL; 103 104 meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size - 105 DMUB_FW_META_OFFSET - 106 sizeof(union dmub_fw_meta)); 107 108 if (meta->info.magic_value != DMUB_FW_META_MAGIC) 109 return NULL; 110 111 return &meta->info; 112 } 113 114 static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) 115 { 116 struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; 117 118 switch (asic) { 119 case DMUB_ASIC_DCN20: 120 case DMUB_ASIC_DCN21: 121 dmub->regs = &dmub_srv_dcn20_regs; 122 123 funcs->reset = dmub_dcn20_reset; 124 funcs->reset_release = dmub_dcn20_reset_release; 125 funcs->backdoor_load = dmub_dcn20_backdoor_load; 126 funcs->setup_windows = dmub_dcn20_setup_windows; 127 funcs->setup_mailbox = dmub_dcn20_setup_mailbox; 128 funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; 129 funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; 130 funcs->is_supported = dmub_dcn20_is_supported; 131 funcs->is_hw_init = dmub_dcn20_is_hw_init; 132 funcs->set_gpint = dmub_dcn20_set_gpint; 133 funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked; 134 funcs->get_gpint_response = dmub_dcn20_get_gpint_response; 135 136 if (asic == DMUB_ASIC_DCN21) { 137 dmub->regs = &dmub_srv_dcn21_regs; 138 139 funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; 140 funcs->is_phy_init = dmub_dcn21_is_phy_init; 141 } 142 break; 143 144 default: 145 return false; 146 } 147 148 return true; 149 } 150 151 enum dmub_status dmub_srv_create(struct dmub_srv *dmub, 152 const struct dmub_srv_create_params *params) 153 { 154 enum dmub_status status = DMUB_STATUS_OK; 155 156 dmub_memset(dmub, 0, sizeof(*dmub)); 157 158 dmub->funcs = params->funcs; 159 dmub->user_ctx = params->user_ctx; 160 dmub->asic = params->asic; 161 dmub->is_virtual = params->is_virtual; 162 163 /* Setup asic dependent hardware funcs. */ 164 if (!dmub_srv_hw_setup(dmub, params->asic)) { 165 status = DMUB_STATUS_INVALID; 166 goto cleanup; 167 } 168 169 /* Override (some) hardware funcs based on user params. */ 170 if (params->hw_funcs) { 171 if (params->hw_funcs->get_inbox1_rptr) 172 dmub->hw_funcs.get_inbox1_rptr = 173 params->hw_funcs->get_inbox1_rptr; 174 175 if (params->hw_funcs->set_inbox1_wptr) 176 dmub->hw_funcs.set_inbox1_wptr = 177 params->hw_funcs->set_inbox1_wptr; 178 179 if (params->hw_funcs->is_supported) 180 dmub->hw_funcs.is_supported = 181 params->hw_funcs->is_supported; 182 } 183 184 /* Sanity checks for required hw func pointers. */ 185 if (!dmub->hw_funcs.get_inbox1_rptr || 186 !dmub->hw_funcs.set_inbox1_wptr) { 187 status = DMUB_STATUS_INVALID; 188 goto cleanup; 189 } 190 191 cleanup: 192 if (status == DMUB_STATUS_OK) 193 dmub->sw_init = true; 194 else 195 dmub_srv_destroy(dmub); 196 197 return status; 198 } 199 200 void dmub_srv_destroy(struct dmub_srv *dmub) 201 { 202 dmub_memset(dmub, 0, sizeof(*dmub)); 203 } 204 205 enum dmub_status 206 dmub_srv_calc_region_info(struct dmub_srv *dmub, 207 const struct dmub_srv_region_params *params, 208 struct dmub_srv_region_info *out) 209 { 210 struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; 211 struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; 212 struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; 213 struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; 214 struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; 215 struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; 216 struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; 217 struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; 218 const struct dmub_fw_meta_info *fw_info; 219 uint32_t fw_state_size = DMUB_FW_STATE_SIZE; 220 uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; 221 uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; 222 223 if (!dmub->sw_init) 224 return DMUB_STATUS_INVALID; 225 226 memset(out, 0, sizeof(*out)); 227 228 out->num_regions = DMUB_NUM_WINDOWS; 229 230 inst->base = 0x0; 231 inst->top = inst->base + params->inst_const_size; 232 233 data->base = dmub_align(inst->top, 256); 234 data->top = data->base + params->bss_data_size; 235 236 /* 237 * All cache windows below should be aligned to the size 238 * of the DMCUB cache line, 64 bytes. 239 */ 240 241 stack->base = dmub_align(data->top, 256); 242 stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; 243 244 bios->base = dmub_align(stack->top, 256); 245 bios->top = bios->base + params->vbios_size; 246 247 mail->base = dmub_align(bios->top, 256); 248 mail->top = mail->base + DMUB_MAILBOX_SIZE; 249 250 fw_info = dmub_get_fw_meta_info(params->fw_bss_data, 251 params->bss_data_size); 252 253 if (fw_info) { 254 fw_state_size = fw_info->fw_region_size; 255 trace_buffer_size = fw_info->trace_buffer_size; 256 } 257 258 trace_buff->base = dmub_align(mail->top, 256); 259 trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); 260 261 fw_state->base = dmub_align(trace_buff->top, 256); 262 fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); 263 264 scratch_mem->base = dmub_align(fw_state->top, 256); 265 scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); 266 267 out->fb_size = dmub_align(scratch_mem->top, 4096); 268 269 return DMUB_STATUS_OK; 270 } 271 272 enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, 273 const struct dmub_srv_fb_params *params, 274 struct dmub_srv_fb_info *out) 275 { 276 uint8_t *cpu_base; 277 uint64_t gpu_base; 278 uint32_t i; 279 280 if (!dmub->sw_init) 281 return DMUB_STATUS_INVALID; 282 283 memset(out, 0, sizeof(*out)); 284 285 if (params->region_info->num_regions != DMUB_NUM_WINDOWS) 286 return DMUB_STATUS_INVALID; 287 288 cpu_base = (uint8_t *)params->cpu_addr; 289 gpu_base = params->gpu_addr; 290 291 for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { 292 const struct dmub_region *reg = 293 ¶ms->region_info->regions[i]; 294 295 out->fb[i].cpu_addr = cpu_base + reg->base; 296 out->fb[i].gpu_addr = gpu_base + reg->base; 297 out->fb[i].size = reg->top - reg->base; 298 } 299 300 out->num_fb = DMUB_NUM_WINDOWS; 301 302 return DMUB_STATUS_OK; 303 } 304 305 enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, 306 bool *is_supported) 307 { 308 *is_supported = false; 309 310 if (!dmub->sw_init) 311 return DMUB_STATUS_INVALID; 312 313 if (dmub->hw_funcs.is_supported) 314 *is_supported = dmub->hw_funcs.is_supported(dmub); 315 316 return DMUB_STATUS_OK; 317 } 318 319 enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) 320 { 321 *is_hw_init = false; 322 323 if (!dmub->sw_init) 324 return DMUB_STATUS_INVALID; 325 326 if (!dmub->hw_init) 327 return DMUB_STATUS_OK; 328 329 if (dmub->hw_funcs.is_hw_init) 330 *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); 331 332 return DMUB_STATUS_OK; 333 } 334 335 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, 336 const struct dmub_srv_hw_params *params) 337 { 338 struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; 339 struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; 340 struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; 341 struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; 342 struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; 343 struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; 344 struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; 345 struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; 346 347 struct dmub_rb_init_params rb_params; 348 struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; 349 struct dmub_region inbox1; 350 351 if (!dmub->sw_init) 352 return DMUB_STATUS_INVALID; 353 354 dmub->fb_base = params->fb_base; 355 dmub->fb_offset = params->fb_offset; 356 dmub->psp_version = params->psp_version; 357 358 if (inst_fb && data_fb) { 359 cw0.offset.quad_part = inst_fb->gpu_addr; 360 cw0.region.base = DMUB_CW0_BASE; 361 cw0.region.top = cw0.region.base + inst_fb->size - 1; 362 363 cw1.offset.quad_part = stack_fb->gpu_addr; 364 cw1.region.base = DMUB_CW1_BASE; 365 cw1.region.top = cw1.region.base + stack_fb->size - 1; 366 367 /** 368 * Read back all the instruction memory so we don't hang the 369 * DMCUB when backdoor loading if the write from x86 hasn't been 370 * flushed yet. This only occurs in backdoor loading. 371 */ 372 dmub_flush_buffer_mem(inst_fb); 373 374 if (params->load_inst_const && dmub->hw_funcs.backdoor_load) 375 dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); 376 } 377 378 if (dmub->hw_funcs.reset) 379 dmub->hw_funcs.reset(dmub); 380 381 if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && 382 fw_state_fb && scratch_mem_fb) { 383 cw2.offset.quad_part = data_fb->gpu_addr; 384 cw2.region.base = DMUB_CW0_BASE + inst_fb->size; 385 cw2.region.top = cw2.region.base + data_fb->size; 386 387 cw3.offset.quad_part = bios_fb->gpu_addr; 388 cw3.region.base = DMUB_CW3_BASE; 389 cw3.region.top = cw3.region.base + bios_fb->size; 390 391 cw4.offset.quad_part = mail_fb->gpu_addr; 392 cw4.region.base = cw3.region.top + 1; 393 cw4.region.top = cw4.region.base + mail_fb->size; 394 395 inbox1.base = cw4.region.base; 396 inbox1.top = cw4.region.top; 397 398 cw5.offset.quad_part = tracebuff_fb->gpu_addr; 399 cw5.region.base = DMUB_CW5_BASE; 400 cw5.region.top = cw5.region.base + tracebuff_fb->size; 401 402 cw6.offset.quad_part = fw_state_fb->gpu_addr; 403 cw6.region.base = DMUB_CW6_BASE; 404 cw6.region.top = cw6.region.base + fw_state_fb->size; 405 406 dmub->fw_state = fw_state_fb->cpu_addr; 407 408 dmub->scratch_mem_fb = *scratch_mem_fb; 409 410 if (dmub->hw_funcs.setup_windows) 411 dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, 412 &cw5, &cw6); 413 414 if (dmub->hw_funcs.setup_mailbox) 415 dmub->hw_funcs.setup_mailbox(dmub, &inbox1); 416 } 417 418 if (mail_fb) { 419 dmub_memset(&rb_params, 0, sizeof(rb_params)); 420 rb_params.ctx = dmub; 421 rb_params.base_address = mail_fb->cpu_addr; 422 rb_params.capacity = DMUB_RB_SIZE; 423 424 dmub_rb_init(&dmub->inbox1_rb, &rb_params); 425 } 426 427 if (dmub->hw_funcs.reset_release) 428 dmub->hw_funcs.reset_release(dmub); 429 430 dmub->hw_init = true; 431 432 return DMUB_STATUS_OK; 433 } 434 435 enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) 436 { 437 if (!dmub->sw_init) 438 return DMUB_STATUS_INVALID; 439 440 if (dmub->hw_init == false) 441 return DMUB_STATUS_OK; 442 443 if (dmub->hw_funcs.reset) 444 dmub->hw_funcs.reset(dmub); 445 446 dmub->hw_init = false; 447 448 return DMUB_STATUS_OK; 449 } 450 451 enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, 452 const struct dmub_cmd_header *cmd) 453 { 454 if (!dmub->hw_init) 455 return DMUB_STATUS_INVALID; 456 457 if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) 458 return DMUB_STATUS_OK; 459 460 return DMUB_STATUS_QUEUE_FULL; 461 } 462 463 enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) 464 { 465 if (!dmub->hw_init) 466 return DMUB_STATUS_INVALID; 467 468 /** 469 * Read back all the queued commands to ensure that they've 470 * been flushed to framebuffer memory. Otherwise DMCUB might 471 * read back stale, fully invalid or partially invalid data. 472 */ 473 dmub_rb_flush_pending(&dmub->inbox1_rb); 474 475 dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); 476 return DMUB_STATUS_OK; 477 } 478 479 enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, 480 uint32_t timeout_us) 481 { 482 uint32_t i; 483 484 if (!dmub->hw_init) 485 return DMUB_STATUS_INVALID; 486 487 if (!dmub->hw_funcs.is_auto_load_done) 488 return DMUB_STATUS_OK; 489 490 for (i = 0; i <= timeout_us; i += 100) { 491 if (dmub->hw_funcs.is_auto_load_done(dmub)) 492 return DMUB_STATUS_OK; 493 494 udelay(100); 495 } 496 497 return DMUB_STATUS_TIMEOUT; 498 } 499 500 enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, 501 uint32_t timeout_us) 502 { 503 uint32_t i = 0; 504 505 if (!dmub->hw_init) 506 return DMUB_STATUS_INVALID; 507 508 if (!dmub->hw_funcs.is_phy_init) 509 return DMUB_STATUS_OK; 510 511 for (i = 0; i <= timeout_us; i += 10) { 512 if (dmub->hw_funcs.is_phy_init(dmub)) 513 return DMUB_STATUS_OK; 514 515 udelay(10); 516 } 517 518 return DMUB_STATUS_TIMEOUT; 519 } 520 521 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, 522 uint32_t timeout_us) 523 { 524 uint32_t i; 525 526 if (!dmub->hw_init) 527 return DMUB_STATUS_INVALID; 528 529 for (i = 0; i <= timeout_us; ++i) { 530 dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); 531 if (dmub_rb_empty(&dmub->inbox1_rb)) 532 return DMUB_STATUS_OK; 533 534 udelay(1); 535 } 536 537 return DMUB_STATUS_TIMEOUT; 538 } 539 540 enum dmub_status 541 dmub_srv_send_gpint_command(struct dmub_srv *dmub, 542 enum dmub_gpint_command command_code, 543 uint16_t param, uint32_t timeout_us) 544 { 545 union dmub_gpint_data_register reg; 546 uint32_t i; 547 548 if (!dmub->sw_init) 549 return DMUB_STATUS_INVALID; 550 551 if (!dmub->hw_funcs.set_gpint) 552 return DMUB_STATUS_INVALID; 553 554 if (!dmub->hw_funcs.is_gpint_acked) 555 return DMUB_STATUS_INVALID; 556 557 reg.bits.status = 1; 558 reg.bits.command_code = command_code; 559 reg.bits.param = param; 560 561 dmub->hw_funcs.set_gpint(dmub, reg); 562 563 for (i = 0; i < timeout_us; ++i) { 564 if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) 565 return DMUB_STATUS_OK; 566 } 567 568 return DMUB_STATUS_TIMEOUT; 569 } 570 571 enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, 572 uint32_t *response) 573 { 574 *response = 0; 575 576 if (!dmub->sw_init) 577 return DMUB_STATUS_INVALID; 578 579 if (!dmub->hw_funcs.get_gpint_response) 580 return DMUB_STATUS_INVALID; 581 582 *response = dmub->hw_funcs.get_gpint_response(dmub); 583 584 return DMUB_STATUS_OK; 585 } 586