1 /* 2 * Copyright 2019 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/firmware.h> 25 #include <core/memory.h> 26 #include <subdev/mmu.h> 27 28 static struct nvkm_acr_hsf * 29 nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name) 30 { 31 struct nvkm_acr_hsf *hsf; 32 list_for_each_entry(hsf, &acr->hsf, head) { 33 if (!strcmp(hsf->name, name)) 34 return hsf; 35 } 36 return NULL; 37 } 38 39 int 40 nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) 41 { 42 struct nvkm_subdev *subdev = &acr->subdev; 43 struct nvkm_acr_hsf *hsf; 44 int ret; 45 46 hsf = nvkm_acr_hsf_find(acr, name); 47 if (!hsf) 48 return -EINVAL; 49 50 nvkm_debug(subdev, "executing %s binary\n", hsf->name); 51 ret = nvkm_falcon_get(hsf->falcon, subdev); 52 if (ret) 53 return ret; 54 55 ret = hsf->func->boot(acr, hsf); 56 nvkm_falcon_put(hsf->falcon, subdev); 57 if (ret) { 58 nvkm_error(subdev, "%s binary failed\n", hsf->name); 59 return ret; 60 } 61 62 nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name); 63 return 0; 64 } 65 66 static void 67 nvkm_acr_unload(struct nvkm_acr *acr) 68 { 69 if (acr->done) { 70 nvkm_acr_hsf_boot(acr, "unload"); 71 acr->done = false; 72 } 73 } 74 75 static int 76 nvkm_acr_load(struct nvkm_acr *acr) 77 { 78 struct nvkm_subdev *subdev = &acr->subdev; 79 struct nvkm_acr_lsf *lsf; 80 u64 start, limit; 81 int ret; 82 83 if (list_empty(&acr->lsf)) { 84 nvkm_debug(subdev, "No LSF(s) present.\n"); 85 return 0; 86 } 87 88 ret = acr->func->init(acr); 89 if (ret) 90 return ret; 91 92 acr->func->wpr_check(acr, &start, &limit); 93 94 if (start != acr->wpr_start || limit != acr->wpr_end) { 95 nvkm_error(subdev, "WPR not configured as expected: " 96 "%016llx-%016llx vs %016llx-%016llx\n", 97 acr->wpr_start, acr->wpr_end, start, limit); 98 return -EIO; 99 } 100 101 acr->done = true; 102 103 list_for_each_entry(lsf, &acr->lsf, head) { 104 if (lsf->func->boot) { 105 ret = lsf->func->boot(lsf->falcon); 106 if (ret) 107 break; 108 } 109 } 110 111 return ret; 112 } 113 114 static int 115 nvkm_acr_reload(struct nvkm_acr *acr) 116 { 117 nvkm_acr_unload(acr); 118 return nvkm_acr_load(acr); 119 } 120 121 static struct nvkm_acr_lsf * 122 nvkm_acr_falcon(struct nvkm_device *device) 123 { 124 struct nvkm_acr *acr = device->acr; 125 struct nvkm_acr_lsf *lsf; 126 127 if (acr) { 128 list_for_each_entry(lsf, &acr->lsf, head) { 129 if (lsf->func->bootstrap_falcon) 130 return lsf; 131 } 132 } 133 134 return NULL; 135 } 136 137 int 138 nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) 139 { 140 struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device); 141 struct nvkm_acr *acr = device->acr; 142 unsigned long id; 143 144 /* If there's no LS FW managing bootstrapping of other LS falcons, 145 * we depend on the HS firmware being able to do it instead. 146 */ 147 if (!acrflcn) { 148 /* Which isn't possible everywhere... */ 149 if ((mask & acr->func->bootstrap_falcons) == mask) { 150 int ret = nvkm_acr_reload(acr); 151 if (ret) 152 return ret; 153 154 return acr->done ? 0 : -EINVAL; 155 } 156 return -ENOSYS; 157 } 158 159 if ((mask & acrflcn->func->bootstrap_falcons) != mask) 160 return -ENOSYS; 161 162 if (acrflcn->func->bootstrap_multiple_falcons) { 163 return acrflcn->func-> 164 bootstrap_multiple_falcons(acrflcn->falcon, mask); 165 } 166 167 for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { 168 int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id); 169 if (ret) 170 return ret; 171 } 172 173 return 0; 174 } 175 176 bool 177 nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) 178 { 179 struct nvkm_acr *acr = device->acr; 180 181 if (acr) { 182 if (acr->managed_falcons & BIT_ULL(id)) 183 return true; 184 } 185 186 return false; 187 } 188 189 static int 190 nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) 191 { 192 nvkm_acr_unload(nvkm_acr(subdev)); 193 return 0; 194 } 195 196 static int 197 nvkm_acr_init(struct nvkm_subdev *subdev) 198 { 199 if (!nvkm_acr_falcon(subdev->device)) 200 return 0; 201 202 return nvkm_acr_load(nvkm_acr(subdev)); 203 } 204 205 static void 206 nvkm_acr_cleanup(struct nvkm_acr *acr) 207 { 208 nvkm_acr_lsfw_del_all(acr); 209 nvkm_acr_hsfw_del_all(acr); 210 nvkm_firmware_put(acr->wpr_fw); 211 acr->wpr_fw = NULL; 212 } 213 214 static int 215 nvkm_acr_oneinit(struct nvkm_subdev *subdev) 216 { 217 struct nvkm_device *device = subdev->device; 218 struct nvkm_acr *acr = nvkm_acr(subdev); 219 struct nvkm_acr_hsfw *hsfw; 220 struct nvkm_acr_lsfw *lsfw, *lsft; 221 struct nvkm_acr_lsf *lsf; 222 u32 wpr_size = 0; 223 u64 falcons; 224 int ret, i; 225 226 if (list_empty(&acr->hsfw)) { 227 nvkm_debug(subdev, "No HSFW(s)\n"); 228 nvkm_acr_cleanup(acr); 229 return 0; 230 } 231 232 /* Determine layout/size of WPR image up-front, as we need to know 233 * it to allocate memory before we begin constructing it. 234 */ 235 list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 236 /* Cull unknown falcons that are present in WPR image. */ 237 if (acr->wpr_fw) { 238 if (!lsfw->func) { 239 nvkm_acr_lsfw_del(lsfw); 240 continue; 241 } 242 243 wpr_size = acr->wpr_fw->size; 244 } 245 246 /* Ensure we've fetched falcon configuration. */ 247 ret = nvkm_falcon_get(lsfw->falcon, subdev); 248 if (ret) 249 return ret; 250 251 nvkm_falcon_put(lsfw->falcon, subdev); 252 253 if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) 254 return -ENOMEM; 255 lsf->func = lsfw->func; 256 lsf->falcon = lsfw->falcon; 257 lsf->id = lsfw->id; 258 list_add_tail(&lsf->head, &acr->lsf); 259 acr->managed_falcons |= BIT_ULL(lsf->id); 260 } 261 262 /* Ensure the falcon that'll provide ACR functions is booted first. */ 263 lsf = nvkm_acr_falcon(device); 264 if (lsf) { 265 falcons = lsf->func->bootstrap_falcons; 266 list_move(&lsf->head, &acr->lsf); 267 } else { 268 falcons = acr->func->bootstrap_falcons; 269 } 270 271 /* Cull falcons that can't be bootstrapped, or the HSFW can fail to 272 * boot and leave the GPU in a weird state. 273 */ 274 list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 275 if (!(falcons & BIT_ULL(lsfw->id))) { 276 nvkm_warn(subdev, "%s falcon cannot be bootstrapped\n", 277 nvkm_acr_lsf_id(lsfw->id)); 278 nvkm_acr_lsfw_del(lsfw); 279 } 280 } 281 282 if (!acr->wpr_fw || acr->wpr_comp) 283 wpr_size = acr->func->wpr_layout(acr); 284 285 /* Allocate/Locate WPR + fill ucode blob pointer. 286 * 287 * dGPU: allocate WPR + shadow blob 288 * Tegra: locate WPR with regs, ensure size is sufficient, 289 * allocate ucode blob. 290 */ 291 ret = acr->func->wpr_alloc(acr, wpr_size); 292 if (ret) 293 return ret; 294 295 nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", 296 acr->wpr_start, acr->wpr_end, acr->shadow_start); 297 298 /* Write WPR to ucode blob. */ 299 nvkm_kmap(acr->wpr); 300 if (acr->wpr_fw && !acr->wpr_comp) 301 nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); 302 303 if (!acr->wpr_fw || acr->wpr_comp) 304 acr->func->wpr_build(acr, nvkm_acr_falcon(device)); 305 acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); 306 307 if (acr->wpr_fw && acr->wpr_comp) { 308 nvkm_kmap(acr->wpr); 309 for (i = 0; i < acr->wpr_fw->size; i += 4) { 310 u32 us = nvkm_ro32(acr->wpr, i); 311 u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; 312 if (fw != us) { 313 nvkm_warn(subdev, "%08x: %08x %08x\n", 314 i, us, fw); 315 } 316 } 317 return -EINVAL; 318 } 319 nvkm_done(acr->wpr); 320 321 /* Allocate instance block for ACR-related stuff. */ 322 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, 323 &acr->inst); 324 if (ret) 325 return ret; 326 327 ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); 328 if (ret) 329 return ret; 330 331 acr->vmm->debug = acr->subdev.debug; 332 333 ret = nvkm_vmm_join(acr->vmm, acr->inst); 334 if (ret) 335 return ret; 336 337 /* Load HS firmware blobs into ACR VMM. */ 338 list_for_each_entry(hsfw, &acr->hsfw, head) { 339 nvkm_debug(subdev, "loading %s fw\n", hsfw->name); 340 ret = hsfw->func->load(acr, hsfw); 341 if (ret) 342 return ret; 343 } 344 345 /* Kill temporary data. */ 346 nvkm_acr_cleanup(acr); 347 return 0; 348 } 349 350 static void * 351 nvkm_acr_dtor(struct nvkm_subdev *subdev) 352 { 353 struct nvkm_acr *acr = nvkm_acr(subdev); 354 struct nvkm_acr_hsf *hsf, *hst; 355 struct nvkm_acr_lsf *lsf, *lst; 356 357 list_for_each_entry_safe(hsf, hst, &acr->hsf, head) { 358 nvkm_vmm_put(acr->vmm, &hsf->vma); 359 nvkm_memory_unref(&hsf->ucode); 360 kfree(hsf->imem); 361 list_del(&hsf->head); 362 kfree(hsf); 363 } 364 365 nvkm_vmm_part(acr->vmm, acr->inst); 366 nvkm_vmm_unref(&acr->vmm); 367 nvkm_memory_unref(&acr->inst); 368 369 nvkm_memory_unref(&acr->wpr); 370 371 list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { 372 list_del(&lsf->head); 373 kfree(lsf); 374 } 375 376 nvkm_acr_cleanup(acr); 377 return acr; 378 } 379 380 static const struct nvkm_subdev_func 381 nvkm_acr = { 382 .dtor = nvkm_acr_dtor, 383 .oneinit = nvkm_acr_oneinit, 384 .init = nvkm_acr_init, 385 .fini = nvkm_acr_fini, 386 }; 387 388 static int 389 nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) 390 { 391 struct nvkm_subdev *subdev = &acr->subdev; 392 struct nvkm_device *device = subdev->device; 393 int ret; 394 395 ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); 396 if (ret < 0) 397 return ret; 398 399 /* Pre-add LSFs in the order they appear in the FW WPR image so that 400 * we're able to do a binary comparison with our own generator. 401 */ 402 ret = acr->func->wpr_parse(acr); 403 if (ret) 404 return ret; 405 406 acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); 407 acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); 408 return 0; 409 } 410 411 int 412 nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, 413 enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) 414 { 415 struct nvkm_acr *acr; 416 long wprfw; 417 418 if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) 419 return -ENOMEM; 420 nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev); 421 INIT_LIST_HEAD(&acr->hsfw); 422 INIT_LIST_HEAD(&acr->lsfw); 423 INIT_LIST_HEAD(&acr->hsf); 424 INIT_LIST_HEAD(&acr->lsf); 425 426 fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); 427 if (IS_ERR(fwif)) 428 return PTR_ERR(fwif); 429 430 acr->func = fwif->func; 431 432 wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); 433 if (wprfw >= 0) { 434 int ret = nvkm_acr_ctor_wpr(acr, wprfw); 435 if (ret) 436 return ret; 437 } 438 439 return 0; 440 } 441