1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/firmware.h> 8 #include <linux/highmem.h> 9 10 #include <drm/drm_cache.h> 11 #include <drm/drm_print.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "intel_uc_fw.h" 15 #include "intel_uc_fw_abi.h" 16 #include "i915_drv.h" 17 #include "i915_reg.h" 18 19 static inline struct intel_gt * 20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type) 21 { 22 if (type == INTEL_UC_FW_TYPE_GUC) 23 return container_of(uc_fw, struct intel_gt, uc.guc.fw); 24 25 GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC); 26 return container_of(uc_fw, struct intel_gt, uc.huc.fw); 27 } 28 29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw) 30 { 31 GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); 32 return ____uc_fw_to_gt(uc_fw, uc_fw->type); 33 } 34 35 #ifdef CONFIG_DRM_I915_DEBUG_GUC 36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, 37 enum intel_uc_fw_status status) 38 { 39 uc_fw->__status = status; 40 drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm, 41 "%s firmware -> %s\n", 42 intel_uc_fw_type_repr(uc_fw->type), 43 status == INTEL_UC_FIRMWARE_SELECTED ? 44 uc_fw->path : intel_uc_fw_status_repr(status)); 45 } 46 #endif 47 48 /* 49 * List of required GuC and HuC binaries per-platform. 50 * Must be ordered based on platform + revid, from newer to older. 51 * 52 * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same 53 * firmware as TGL. 54 */ 55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \ 56 fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \ 57 fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \ 58 fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \ 59 fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \ 60 fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \ 61 fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \ 62 fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \ 63 fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \ 64 fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \ 65 fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \ 66 fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \ 67 fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \ 68 fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \ 69 fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \ 70 fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \ 71 fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1)) 72 73 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \ 74 fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \ 75 fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \ 76 fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \ 77 fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \ 78 fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \ 79 fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \ 80 fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \ 81 fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \ 82 fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \ 83 fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \ 84 fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \ 85 fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \ 86 fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \ 87 fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \ 88 fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0)) 89 90 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \ 91 "i915/" \ 92 __stringify(prefix_) name_ \ 93 __stringify(major_) "." \ 94 __stringify(minor_) "." \ 95 __stringify(patch_) ".bin" 96 97 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \ 98 __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_) 99 100 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \ 101 __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_) 102 103 /* All blobs need to be declared via MODULE_FIRMWARE() */ 104 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \ 105 MODULE_FIRMWARE(uc_); 106 107 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH) 108 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH) 109 110 /* The below structs and macros are used to iterate across the list of blobs */ 111 struct __packed uc_fw_blob { 112 u8 major; 113 u8 minor; 114 const char *path; 115 }; 116 117 #define UC_FW_BLOB(major_, minor_, path_) \ 118 { .major = major_, .minor = minor_, .path = path_ } 119 120 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \ 121 UC_FW_BLOB(major_, minor_, \ 122 MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_)) 123 124 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \ 125 UC_FW_BLOB(major_, minor_, \ 126 MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_)) 127 128 struct __packed uc_fw_platform_requirement { 129 enum intel_platform p; 130 u8 rev; /* first platform rev using this FW */ 131 const struct uc_fw_blob blob; 132 }; 133 134 #define MAKE_FW_LIST(platform_, revid_, uc_) \ 135 { \ 136 .p = INTEL_##platform_, \ 137 .rev = revid_, \ 138 .blob = uc_, \ 139 }, 140 141 struct fw_blobs_by_type { 142 const struct uc_fw_platform_requirement *blobs; 143 u32 count; 144 }; 145 146 static void 147 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) 148 { 149 static const struct uc_fw_platform_requirement blobs_guc[] = { 150 INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB) 151 }; 152 static const struct uc_fw_platform_requirement blobs_huc[] = { 153 INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB) 154 }; 155 static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = { 156 [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) }, 157 [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) }, 158 }; 159 const struct uc_fw_platform_requirement *fw_blobs; 160 enum intel_platform p = INTEL_INFO(i915)->platform; 161 u32 fw_count; 162 u8 rev = INTEL_REVID(i915); 163 int i; 164 165 /* 166 * The only difference between the ADL GuC FWs is the HWConfig support. 167 * ADL-N does not support HWConfig, so we should use the same binary as 168 * ADL-S, otherwise the GuC might attempt to fetch a config table that 169 * does not exist. 170 */ 171 if (IS_ADLP_N(i915)) 172 p = INTEL_ALDERLAKE_S; 173 174 GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all)); 175 fw_blobs = blobs_all[uc_fw->type].blobs; 176 fw_count = blobs_all[uc_fw->type].count; 177 178 for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) { 179 if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { 180 const struct uc_fw_blob *blob = &fw_blobs[i].blob; 181 uc_fw->path = blob->path; 182 uc_fw->major_ver_wanted = blob->major; 183 uc_fw->minor_ver_wanted = blob->minor; 184 break; 185 } 186 } 187 188 /* make sure the list is ordered as expected */ 189 if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { 190 for (i = 1; i < fw_count; i++) { 191 if (fw_blobs[i].p < fw_blobs[i - 1].p) 192 continue; 193 194 if (fw_blobs[i].p == fw_blobs[i - 1].p && 195 fw_blobs[i].rev < fw_blobs[i - 1].rev) 196 continue; 197 198 pr_err("invalid FW blob order: %s r%u comes before %s r%u\n", 199 intel_platform_name(fw_blobs[i - 1].p), 200 fw_blobs[i - 1].rev, 201 intel_platform_name(fw_blobs[i].p), 202 fw_blobs[i].rev); 203 204 uc_fw->path = NULL; 205 } 206 } 207 } 208 209 static const char *__override_guc_firmware_path(struct drm_i915_private *i915) 210 { 211 if (i915->params.enable_guc & ENABLE_GUC_MASK) 212 return i915->params.guc_firmware_path; 213 return ""; 214 } 215 216 static const char *__override_huc_firmware_path(struct drm_i915_private *i915) 217 { 218 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC) 219 return i915->params.huc_firmware_path; 220 return ""; 221 } 222 223 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) 224 { 225 const char *path = NULL; 226 227 switch (uc_fw->type) { 228 case INTEL_UC_FW_TYPE_GUC: 229 path = __override_guc_firmware_path(i915); 230 break; 231 case INTEL_UC_FW_TYPE_HUC: 232 path = __override_huc_firmware_path(i915); 233 break; 234 } 235 236 if (unlikely(path)) { 237 uc_fw->path = path; 238 uc_fw->user_overridden = true; 239 } 240 } 241 242 /** 243 * intel_uc_fw_init_early - initialize the uC object and select the firmware 244 * @uc_fw: uC firmware 245 * @type: type of uC 246 * 247 * Initialize the state of our uC object and relevant tracking and select the 248 * firmware to fetch and load. 249 */ 250 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, 251 enum intel_uc_fw_type type) 252 { 253 struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915; 254 255 /* 256 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status 257 * before we're looked at the HW caps to see if we have uc support 258 */ 259 BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); 260 GEM_BUG_ON(uc_fw->status); 261 GEM_BUG_ON(uc_fw->path); 262 263 uc_fw->type = type; 264 265 if (HAS_GT_UC(i915)) { 266 __uc_fw_auto_select(i915, uc_fw); 267 __uc_fw_user_override(i915, uc_fw); 268 } 269 270 intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ? 271 INTEL_UC_FIRMWARE_SELECTED : 272 INTEL_UC_FIRMWARE_DISABLED : 273 INTEL_UC_FIRMWARE_NOT_SUPPORTED); 274 } 275 276 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e) 277 { 278 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915; 279 bool user = e == -EINVAL; 280 281 if (i915_inject_probe_error(i915, e)) { 282 /* non-existing blob */ 283 uc_fw->path = "<invalid>"; 284 uc_fw->user_overridden = user; 285 } else if (i915_inject_probe_error(i915, e)) { 286 /* require next major version */ 287 uc_fw->major_ver_wanted += 1; 288 uc_fw->minor_ver_wanted = 0; 289 uc_fw->user_overridden = user; 290 } else if (i915_inject_probe_error(i915, e)) { 291 /* require next minor version */ 292 uc_fw->minor_ver_wanted += 1; 293 uc_fw->user_overridden = user; 294 } else if (uc_fw->major_ver_wanted && 295 i915_inject_probe_error(i915, e)) { 296 /* require prev major version */ 297 uc_fw->major_ver_wanted -= 1; 298 uc_fw->minor_ver_wanted = 0; 299 uc_fw->user_overridden = user; 300 } else if (uc_fw->minor_ver_wanted && 301 i915_inject_probe_error(i915, e)) { 302 /* require prev minor version - hey, this should work! */ 303 uc_fw->minor_ver_wanted -= 1; 304 uc_fw->user_overridden = user; 305 } else if (user && i915_inject_probe_error(i915, e)) { 306 /* officially unsupported platform */ 307 uc_fw->major_ver_wanted = 0; 308 uc_fw->minor_ver_wanted = 0; 309 uc_fw->user_overridden = true; 310 } 311 } 312 313 static int check_gsc_manifest(const struct firmware *fw, 314 struct intel_uc_fw *uc_fw) 315 { 316 u32 *dw = (u32 *)fw->data; 317 u32 version = dw[HUC_GSC_VERSION_DW]; 318 319 uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version); 320 uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version); 321 322 return 0; 323 } 324 325 static int check_ccs_header(struct drm_i915_private *i915, 326 const struct firmware *fw, 327 struct intel_uc_fw *uc_fw) 328 { 329 struct uc_css_header *css; 330 size_t size; 331 332 /* Check the size of the blob before examining buffer contents */ 333 if (unlikely(fw->size < sizeof(struct uc_css_header))) { 334 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n", 335 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, 336 fw->size, sizeof(struct uc_css_header)); 337 return -ENODATA; 338 } 339 340 css = (struct uc_css_header *)fw->data; 341 342 /* Check integrity of size values inside CSS header */ 343 size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - 344 css->exponent_size_dw) * sizeof(u32); 345 if (unlikely(size != sizeof(struct uc_css_header))) { 346 drm_warn(&i915->drm, 347 "%s firmware %s: unexpected header size: %zu != %zu\n", 348 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, 349 fw->size, sizeof(struct uc_css_header)); 350 return -EPROTO; 351 } 352 353 /* uCode size must calculated from other sizes */ 354 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); 355 356 /* now RSA */ 357 uc_fw->rsa_size = css->key_size_dw * sizeof(u32); 358 359 /* At least, it should have header, uCode and RSA. Size of all three. */ 360 size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; 361 if (unlikely(fw->size < size)) { 362 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n", 363 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, 364 fw->size, size); 365 return -ENOEXEC; 366 } 367 368 /* Sanity check whether this fw is not larger than whole WOPCM memory */ 369 size = __intel_uc_fw_get_upload_size(uc_fw); 370 if (unlikely(size >= i915->wopcm.size)) { 371 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n", 372 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, 373 size, (size_t)i915->wopcm.size); 374 return -E2BIG; 375 } 376 377 /* Get version numbers from the CSS header */ 378 uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, 379 css->sw_version); 380 uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR, 381 css->sw_version); 382 383 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) 384 uc_fw->private_data_size = css->private_data_size; 385 386 return 0; 387 } 388 389 /** 390 * intel_uc_fw_fetch - fetch uC firmware 391 * @uc_fw: uC firmware 392 * 393 * Fetch uC firmware into GEM obj. 394 * 395 * Return: 0 on success, a negative errno code on failure. 396 */ 397 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) 398 { 399 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915; 400 struct device *dev = i915->drm.dev; 401 struct drm_i915_gem_object *obj; 402 const struct firmware *fw = NULL; 403 int err; 404 405 GEM_BUG_ON(!i915->wopcm.size); 406 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw)); 407 408 err = i915_inject_probe_error(i915, -ENXIO); 409 if (err) 410 goto fail; 411 412 __force_fw_fetch_failures(uc_fw, -EINVAL); 413 __force_fw_fetch_failures(uc_fw, -ESTALE); 414 415 err = request_firmware(&fw, uc_fw->path, dev); 416 if (err) 417 goto fail; 418 419 if (uc_fw->loaded_via_gsc) 420 err = check_gsc_manifest(fw, uc_fw); 421 else 422 err = check_ccs_header(i915, fw, uc_fw); 423 if (err) 424 goto fail; 425 426 if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || 427 uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { 428 drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", 429 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, 430 uc_fw->major_ver_found, uc_fw->minor_ver_found, 431 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); 432 if (!intel_uc_fw_is_overridden(uc_fw)) { 433 err = -ENOEXEC; 434 goto fail; 435 } 436 } 437 438 if (HAS_LMEM(i915)) { 439 obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size); 440 if (!IS_ERR(obj)) 441 obj->flags |= I915_BO_ALLOC_PM_EARLY; 442 } else { 443 obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); 444 } 445 446 if (IS_ERR(obj)) { 447 err = PTR_ERR(obj); 448 goto fail; 449 } 450 451 uc_fw->obj = obj; 452 uc_fw->size = fw->size; 453 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); 454 455 release_firmware(fw); 456 return 0; 457 458 fail: 459 intel_uc_fw_change_status(uc_fw, err == -ENOENT ? 460 INTEL_UC_FIRMWARE_MISSING : 461 INTEL_UC_FIRMWARE_ERROR); 462 463 drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n", 464 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); 465 drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n", 466 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); 467 468 release_firmware(fw); /* OK even if fw is NULL */ 469 return err; 470 } 471 472 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw) 473 { 474 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; 475 struct drm_mm_node *node = &ggtt->uc_fw; 476 477 GEM_BUG_ON(!drm_mm_node_allocated(node)); 478 GEM_BUG_ON(upper_32_bits(node->start)); 479 GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); 480 481 return lower_32_bits(node->start); 482 } 483 484 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw) 485 { 486 struct drm_i915_gem_object *obj = uc_fw->obj; 487 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; 488 struct i915_vma_resource *dummy = &uc_fw->dummy; 489 u32 pte_flags = 0; 490 491 dummy->start = uc_fw_ggtt_offset(uc_fw); 492 dummy->node_size = obj->base.size; 493 dummy->bi.pages = obj->mm.pages; 494 495 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 496 GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size); 497 498 /* uc_fw->obj cache domains were not controlled across suspend */ 499 if (i915_gem_object_has_struct_page(obj)) 500 drm_clflush_sg(dummy->bi.pages); 501 502 if (i915_gem_object_is_lmem(obj)) 503 pte_flags |= PTE_LM; 504 505 if (ggtt->vm.raw_insert_entries) 506 ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags); 507 else 508 ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags); 509 } 510 511 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw) 512 { 513 struct drm_i915_gem_object *obj = uc_fw->obj; 514 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; 515 u64 start = uc_fw_ggtt_offset(uc_fw); 516 517 ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); 518 } 519 520 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) 521 { 522 struct intel_gt *gt = __uc_fw_to_gt(uc_fw); 523 struct intel_uncore *uncore = gt->uncore; 524 u64 offset; 525 int ret; 526 527 ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT); 528 if (ret) 529 return ret; 530 531 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 532 533 /* Set the source address for the uCode */ 534 offset = uc_fw_ggtt_offset(uc_fw); 535 GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); 536 intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); 537 intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); 538 539 /* Set the DMA destination */ 540 intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset); 541 intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); 542 543 /* 544 * Set the transfer size. The header plus uCode will be copied to WOPCM 545 * via DMA, excluding any other components 546 */ 547 intel_uncore_write_fw(uncore, DMA_COPY_SIZE, 548 sizeof(struct uc_css_header) + uc_fw->ucode_size); 549 550 /* Start the DMA */ 551 intel_uncore_write_fw(uncore, DMA_CTRL, 552 _MASKED_BIT_ENABLE(dma_flags | START_DMA)); 553 554 /* Wait for DMA to finish */ 555 ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); 556 if (ret) 557 drm_err(>->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", 558 intel_uc_fw_type_repr(uc_fw->type), 559 intel_uncore_read_fw(uncore, DMA_CTRL)); 560 561 /* Disable the bits once DMA is over */ 562 intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); 563 564 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 565 566 return ret; 567 } 568 569 /** 570 * intel_uc_fw_upload - load uC firmware using custom loader 571 * @uc_fw: uC firmware 572 * @dst_offset: destination offset 573 * @dma_flags: flags for flags for dma ctrl 574 * 575 * Loads uC firmware and updates internal flags. 576 * 577 * Return: 0 on success, non-zero on failure. 578 */ 579 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) 580 { 581 struct intel_gt *gt = __uc_fw_to_gt(uc_fw); 582 int err; 583 584 /* make sure the status was cleared the last time we reset the uc */ 585 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); 586 587 err = i915_inject_probe_error(gt->i915, -ENOEXEC); 588 if (err) 589 return err; 590 591 if (!intel_uc_fw_is_loadable(uc_fw)) 592 return -ENOEXEC; 593 594 /* Call custom loader */ 595 uc_fw_bind_ggtt(uc_fw); 596 err = uc_fw_xfer(uc_fw, dst_offset, dma_flags); 597 uc_fw_unbind_ggtt(uc_fw); 598 if (err) 599 goto fail; 600 601 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED); 602 return 0; 603 604 fail: 605 i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n", 606 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, 607 err); 608 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL); 609 return err; 610 } 611 612 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw) 613 { 614 /* 615 * The HW reads the GuC RSA from memory if the key size is > 256 bytes, 616 * while it reads it from the 64 RSA registers if it is smaller. 617 * The HuC RSA is always read from memory. 618 */ 619 return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256; 620 } 621 622 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw) 623 { 624 struct intel_gt *gt = __uc_fw_to_gt(uc_fw); 625 struct i915_vma *vma; 626 size_t copied; 627 void *vaddr; 628 int err; 629 630 err = i915_inject_probe_error(gt->i915, -ENXIO); 631 if (err) 632 return err; 633 634 if (!uc_fw_need_rsa_in_memory(uc_fw)) 635 return 0; 636 637 /* 638 * uC firmwares will sit above GUC_GGTT_TOP and will not map through 639 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC 640 * authentication from memory, as the RSA offset now falls within the 641 * GuC inaccessible range. We resort to perma-pinning an additional vma 642 * within the accessible range that only contains the RSA signature. 643 * The GuC HW can use this extra pinning to perform the authentication 644 * since its GGTT offset will be GuC accessible. 645 */ 646 GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE); 647 vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE); 648 if (IS_ERR(vma)) 649 return PTR_ERR(vma); 650 651 vaddr = i915_gem_object_pin_map_unlocked(vma->obj, 652 i915_coherent_map_type(gt->i915, vma->obj, true)); 653 if (IS_ERR(vaddr)) { 654 i915_vma_unpin_and_release(&vma, 0); 655 err = PTR_ERR(vaddr); 656 goto unpin_out; 657 } 658 659 copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size); 660 i915_gem_object_unpin_map(vma->obj); 661 662 if (copied < uc_fw->rsa_size) { 663 err = -ENOMEM; 664 goto unpin_out; 665 } 666 667 uc_fw->rsa_data = vma; 668 669 return 0; 670 671 unpin_out: 672 i915_vma_unpin_and_release(&vma, 0); 673 return err; 674 } 675 676 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw) 677 { 678 i915_vma_unpin_and_release(&uc_fw->rsa_data, 0); 679 } 680 681 int intel_uc_fw_init(struct intel_uc_fw *uc_fw) 682 { 683 int err; 684 685 /* this should happen before the load! */ 686 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); 687 688 if (!intel_uc_fw_is_available(uc_fw)) 689 return -ENOEXEC; 690 691 err = i915_gem_object_pin_pages_unlocked(uc_fw->obj); 692 if (err) { 693 DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", 694 intel_uc_fw_type_repr(uc_fw->type), err); 695 goto out; 696 } 697 698 err = uc_fw_rsa_data_create(uc_fw); 699 if (err) { 700 DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n", 701 intel_uc_fw_type_repr(uc_fw->type), err); 702 goto out_unpin; 703 } 704 705 return 0; 706 707 out_unpin: 708 i915_gem_object_unpin_pages(uc_fw->obj); 709 out: 710 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL); 711 return err; 712 } 713 714 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) 715 { 716 uc_fw_rsa_data_destroy(uc_fw); 717 718 if (i915_gem_object_has_pinned_pages(uc_fw->obj)) 719 i915_gem_object_unpin_pages(uc_fw->obj); 720 721 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); 722 } 723 724 /** 725 * intel_uc_fw_cleanup_fetch - cleanup uC firmware 726 * @uc_fw: uC firmware 727 * 728 * Cleans up uC firmware by releasing the firmware GEM obj. 729 */ 730 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) 731 { 732 if (!intel_uc_fw_is_available(uc_fw)) 733 return; 734 735 i915_gem_object_put(fetch_and_zero(&uc_fw->obj)); 736 737 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED); 738 } 739 740 /** 741 * intel_uc_fw_copy_rsa - copy fw RSA to buffer 742 * 743 * @uc_fw: uC firmware 744 * @dst: dst buffer 745 * @max_len: max number of bytes to copy 746 * 747 * Return: number of copied bytes. 748 */ 749 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) 750 { 751 struct intel_memory_region *mr = uc_fw->obj->mm.region; 752 u32 size = min_t(u32, uc_fw->rsa_size, max_len); 753 u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; 754 struct sgt_iter iter; 755 size_t count = 0; 756 int idx; 757 758 /* Called during reset handling, must be atomic [no fs_reclaim] */ 759 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)); 760 761 idx = offset >> PAGE_SHIFT; 762 offset = offset_in_page(offset); 763 if (i915_gem_object_has_struct_page(uc_fw->obj)) { 764 struct page *page; 765 766 for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) { 767 u32 len = min_t(u32, size, PAGE_SIZE - offset); 768 void *vaddr; 769 770 if (idx > 0) { 771 idx--; 772 continue; 773 } 774 775 vaddr = kmap_atomic(page); 776 memcpy(dst, vaddr + offset, len); 777 kunmap_atomic(vaddr); 778 779 offset = 0; 780 dst += len; 781 size -= len; 782 count += len; 783 if (!size) 784 break; 785 } 786 } else { 787 dma_addr_t addr; 788 789 for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) { 790 u32 len = min_t(u32, size, PAGE_SIZE - offset); 791 void __iomem *vaddr; 792 793 if (idx > 0) { 794 idx--; 795 continue; 796 } 797 798 vaddr = io_mapping_map_atomic_wc(&mr->iomap, 799 addr - mr->region.start); 800 memcpy_fromio(dst, vaddr + offset, len); 801 io_mapping_unmap_atomic(vaddr); 802 803 offset = 0; 804 dst += len; 805 size -= len; 806 count += len; 807 if (!size) 808 break; 809 } 810 } 811 812 return count; 813 } 814 815 /** 816 * intel_uc_fw_dump - dump information about uC firmware 817 * @uc_fw: uC firmware 818 * @p: the &drm_printer 819 * 820 * Pretty printer for uC firmware. 821 */ 822 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) 823 { 824 drm_printf(p, "%s firmware: %s\n", 825 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); 826 drm_printf(p, "\tstatus: %s\n", 827 intel_uc_fw_status_repr(uc_fw->status)); 828 drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", 829 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, 830 uc_fw->major_ver_found, uc_fw->minor_ver_found); 831 drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size); 832 drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size); 833 } 834