1 /* 2 * Copyright 2017 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "vmm.h" 23 24 #include <core/client.h> 25 #include <subdev/fb.h> 26 #include <subdev/ltc.h> 27 #include <subdev/timer.h> 28 #include <engine/gr.h> 29 30 #include <nvif/ifc00d.h> 31 #include <nvif/unpack.h> 32 33 static void 34 gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm, 35 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 36 { 37 struct device *dev = vmm->mmu->subdev.device->dev; 38 dma_addr_t addr; 39 40 nvkm_kmap(pt->memory); 41 while (ptes--) { 42 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); 43 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); 44 u64 data = (u64)datahi << 32 | datalo; 45 if ((data & (3ULL << 1)) != 0) { 46 addr = (data >> 8) << 12; 47 dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 48 } 49 ptei++; 50 } 51 nvkm_done(pt->memory); 52 } 53 54 static bool 55 gp100_vmm_pfn_clear(struct nvkm_vmm *vmm, 56 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 57 { 58 bool dma = false; 59 nvkm_kmap(pt->memory); 60 while (ptes--) { 61 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); 62 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); 63 u64 data = (u64)datahi << 32 | datalo; 64 if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) { 65 VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0)); 66 dma = true; 67 } 68 ptei++; 69 } 70 nvkm_done(pt->memory); 71 return dma; 72 } 73 74 static void 75 gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 76 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 77 { 78 struct device *dev = vmm->mmu->subdev.device->dev; 79 dma_addr_t addr; 80 81 nvkm_kmap(pt->memory); 82 while (ptes--) { 83 u64 data = 0; 84 if (!(*map->pfn & NVKM_VMM_PFN_W)) 85 data |= BIT_ULL(6); /* RO. */ 86 87 if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) { 88 addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT; 89 addr = dma_map_page(dev, pfn_to_page(addr), 0, 90 PAGE_SIZE, DMA_BIDIRECTIONAL); 91 if (!WARN_ON(dma_mapping_error(dev, addr))) { 92 data |= addr >> 4; 93 data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */ 94 data |= BIT_ULL(3); /* VOL. */ 95 data |= BIT_ULL(0); /* VALID. */ 96 } 97 } else { 98 data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4; 99 data |= BIT_ULL(0); /* VALID. */ 100 } 101 102 VMM_WO064(pt, vmm, ptei++ * 8, data); 103 map->pfn++; 104 } 105 nvkm_done(pt->memory); 106 } 107 108 static inline void 109 gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 110 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 111 { 112 u64 data = (addr >> 4) | map->type; 113 114 map->type += ptes * map->ctag; 115 116 while (ptes--) { 117 VMM_WO064(pt, vmm, ptei++ * 8, data); 118 data += map->next; 119 } 120 } 121 122 static void 123 gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 124 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 125 { 126 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); 127 } 128 129 static void 130 gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 131 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 132 { 133 if (map->page->shift == PAGE_SHIFT) { 134 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); 135 nvkm_kmap(pt->memory); 136 while (ptes--) { 137 const u64 data = (*map->dma++ >> 4) | map->type; 138 VMM_WO064(pt, vmm, ptei++ * 8, data); 139 map->type += map->ctag; 140 } 141 nvkm_done(pt->memory); 142 return; 143 } 144 145 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); 146 } 147 148 static void 149 gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 150 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 151 { 152 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); 153 } 154 155 static void 156 gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm, 157 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 158 { 159 /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */ 160 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes); 161 } 162 163 static const struct nvkm_vmm_desc_func 164 gp100_vmm_desc_spt = { 165 .unmap = gf100_vmm_pgt_unmap, 166 .sparse = gp100_vmm_pgt_sparse, 167 .mem = gp100_vmm_pgt_mem, 168 .dma = gp100_vmm_pgt_dma, 169 .sgl = gp100_vmm_pgt_sgl, 170 .pfn = gp100_vmm_pgt_pfn, 171 .pfn_clear = gp100_vmm_pfn_clear, 172 .pfn_unmap = gp100_vmm_pfn_unmap, 173 }; 174 175 static void 176 gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm, 177 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 178 { 179 /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */ 180 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes); 181 } 182 183 static const struct nvkm_vmm_desc_func 184 gp100_vmm_desc_lpt = { 185 .invalid = gp100_vmm_lpt_invalid, 186 .unmap = gf100_vmm_pgt_unmap, 187 .sparse = gp100_vmm_pgt_sparse, 188 .mem = gp100_vmm_pgt_mem, 189 }; 190 191 static inline void 192 gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 193 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 194 { 195 u64 data = (addr >> 4) | map->type; 196 197 map->type += ptes * map->ctag; 198 199 while (ptes--) { 200 VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL); 201 data += map->next; 202 } 203 } 204 205 static void 206 gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 207 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 208 { 209 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte); 210 } 211 212 static inline bool 213 gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data) 214 { 215 switch (nvkm_memory_target(pt->memory)) { 216 case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break; 217 case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1; 218 *data |= BIT_ULL(3); /* VOL. */ 219 break; 220 case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break; 221 default: 222 WARN_ON(1); 223 return false; 224 } 225 *data |= pt->addr >> 4; 226 return true; 227 } 228 229 static void 230 gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) 231 { 232 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; 233 struct nvkm_mmu_pt *pd = pgd->pt[0]; 234 u64 data[2] = {}; 235 236 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) 237 return; 238 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) 239 return; 240 241 nvkm_kmap(pd->memory); 242 VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]); 243 nvkm_done(pd->memory); 244 } 245 246 static void 247 gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm, 248 struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) 249 { 250 /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */ 251 VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes); 252 } 253 254 static void 255 gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm, 256 struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) 257 { 258 VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes); 259 } 260 261 static void 262 gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm, 263 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 264 { 265 struct device *dev = vmm->mmu->subdev.device->dev; 266 dma_addr_t addr; 267 268 nvkm_kmap(pt->memory); 269 while (ptes--) { 270 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0); 271 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4); 272 u64 data = (u64)datahi << 32 | datalo; 273 274 if ((data & (3ULL << 1)) != 0) { 275 addr = (data >> 8) << 12; 276 dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL); 277 } 278 ptei++; 279 } 280 nvkm_done(pt->memory); 281 } 282 283 static bool 284 gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm, 285 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 286 { 287 bool dma = false; 288 289 nvkm_kmap(pt->memory); 290 while (ptes--) { 291 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0); 292 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4); 293 u64 data = (u64)datahi << 32 | datalo; 294 295 if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) { 296 VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0)); 297 dma = true; 298 } 299 ptei++; 300 } 301 nvkm_done(pt->memory); 302 return dma; 303 } 304 305 static void 306 gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 307 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 308 { 309 struct device *dev = vmm->mmu->subdev.device->dev; 310 dma_addr_t addr; 311 312 nvkm_kmap(pt->memory); 313 while (ptes--) { 314 u64 data = 0; 315 316 if (!(*map->pfn & NVKM_VMM_PFN_W)) 317 data |= BIT_ULL(6); /* RO. */ 318 319 if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) { 320 addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT; 321 addr = dma_map_page(dev, pfn_to_page(addr), 0, 322 1UL << 21, DMA_BIDIRECTIONAL); 323 if (!WARN_ON(dma_mapping_error(dev, addr))) { 324 data |= addr >> 4; 325 data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */ 326 data |= BIT_ULL(3); /* VOL. */ 327 data |= BIT_ULL(0); /* VALID. */ 328 } 329 } else { 330 data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4; 331 data |= BIT_ULL(0); /* VALID. */ 332 } 333 334 VMM_WO064(pt, vmm, ptei++ * 16, data); 335 map->pfn++; 336 } 337 nvkm_done(pt->memory); 338 } 339 340 static const struct nvkm_vmm_desc_func 341 gp100_vmm_desc_pd0 = { 342 .unmap = gp100_vmm_pd0_unmap, 343 .sparse = gp100_vmm_pd0_sparse, 344 .pde = gp100_vmm_pd0_pde, 345 .mem = gp100_vmm_pd0_mem, 346 .pfn = gp100_vmm_pd0_pfn, 347 .pfn_clear = gp100_vmm_pd0_pfn_clear, 348 .pfn_unmap = gp100_vmm_pd0_pfn_unmap, 349 }; 350 351 static void 352 gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) 353 { 354 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; 355 struct nvkm_mmu_pt *pd = pgd->pt[0]; 356 u64 data = 0; 357 358 if (!gp100_vmm_pde(pgt->pt[0], &data)) 359 return; 360 361 nvkm_kmap(pd->memory); 362 VMM_WO064(pd, vmm, pdei * 8, data); 363 nvkm_done(pd->memory); 364 } 365 366 static const struct nvkm_vmm_desc_func 367 gp100_vmm_desc_pd1 = { 368 .unmap = gf100_vmm_pgt_unmap, 369 .sparse = gp100_vmm_pgt_sparse, 370 .pde = gp100_vmm_pd1_pde, 371 }; 372 373 const struct nvkm_vmm_desc 374 gp100_vmm_desc_16[] = { 375 { LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt }, 376 { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 }, 377 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 378 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 379 { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 }, 380 {} 381 }; 382 383 const struct nvkm_vmm_desc 384 gp100_vmm_desc_12[] = { 385 { SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt }, 386 { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 }, 387 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 388 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 389 { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 }, 390 {} 391 }; 392 393 int 394 gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, 395 struct nvkm_vmm_map *map) 396 { 397 const enum nvkm_memory_target target = nvkm_memory_target(map->memory); 398 const struct nvkm_vmm_page *page = map->page; 399 union { 400 struct gp100_vmm_map_vn vn; 401 struct gp100_vmm_map_v0 v0; 402 } *args = argv; 403 struct nvkm_device *device = vmm->mmu->subdev.device; 404 struct nvkm_memory *memory = map->memory; 405 u8 kind, kind_inv, priv, ro, vol; 406 int kindn, aper, ret = -ENOSYS; 407 const u8 *kindm; 408 409 map->next = (1ULL << page->shift) >> 4; 410 map->type = 0; 411 412 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 413 vol = !!args->v0.vol; 414 ro = !!args->v0.ro; 415 priv = !!args->v0.priv; 416 kind = args->v0.kind; 417 } else 418 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 419 vol = target == NVKM_MEM_TARGET_HOST; 420 ro = 0; 421 priv = 0; 422 kind = 0x00; 423 } else { 424 VMM_DEBUG(vmm, "args"); 425 return ret; 426 } 427 428 aper = vmm->func->aper(target); 429 if (WARN_ON(aper < 0)) 430 return aper; 431 432 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); 433 if (kind >= kindn || kindm[kind] == kind_inv) { 434 VMM_DEBUG(vmm, "kind %02x", kind); 435 return -EINVAL; 436 } 437 438 if (kindm[kind] != kind) { 439 u64 tags = nvkm_memory_size(memory) >> 16; 440 if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) { 441 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type); 442 return -EINVAL; 443 } 444 445 ret = nvkm_memory_tags_get(memory, device, tags, 446 nvkm_ltc_tags_clear, 447 &map->tags); 448 if (ret) { 449 VMM_DEBUG(vmm, "comp %d", ret); 450 return ret; 451 } 452 453 if (map->tags->mn) { 454 tags = map->tags->mn->offset + (map->offset >> 16); 455 map->ctag |= ((1ULL << page->shift) >> 16) << 36; 456 map->type |= tags << 36; 457 map->next |= map->ctag; 458 } else { 459 kind = kindm[kind]; 460 } 461 } 462 463 map->type |= BIT(0); 464 map->type |= (u64)aper << 1; 465 map->type |= (u64) vol << 3; 466 map->type |= (u64)priv << 5; 467 map->type |= (u64) ro << 6; 468 map->type |= (u64)kind << 56; 469 return 0; 470 } 471 472 static int 473 gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc) 474 { 475 struct nvkm_device *device = vmm->mmu->subdev.device; 476 union { 477 struct gp100_vmm_fault_cancel_v0 v0; 478 } *args = argv; 479 int ret = -ENOSYS; 480 u32 inst, aper; 481 482 if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) 483 return ret; 484 485 /* Translate MaxwellFaultBufferA instance pointer to the same 486 * format as the NV_GR_FECS_CURRENT_CTX register. 487 */ 488 aper = (args->v0.inst >> 8) & 3; 489 args->v0.inst >>= 12; 490 args->v0.inst |= aper << 28; 491 args->v0.inst |= 0x80000000; 492 493 if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) { 494 if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) { 495 gf100_vmm_invalidate(vmm, 0x0000001b 496 /* CANCEL_TARGETED. */ | 497 (args->v0.hub << 20) | 498 (args->v0.gpc << 15) | 499 (args->v0.client << 9)); 500 } 501 WARN_ON(nvkm_gr_ctxsw_resume(device)); 502 } 503 504 return 0; 505 } 506 507 static int 508 gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc) 509 { 510 union { 511 struct gp100_vmm_fault_replay_vn vn; 512 } *args = argv; 513 int ret = -ENOSYS; 514 515 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 516 gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */ 517 } 518 519 return ret; 520 } 521 522 int 523 gp100_vmm_mthd(struct nvkm_vmm *vmm, 524 struct nvkm_client *client, u32 mthd, void *argv, u32 argc) 525 { 526 if (client->super) { 527 switch (mthd) { 528 case GP100_VMM_VN_FAULT_REPLAY: 529 return gp100_vmm_fault_replay(vmm, argv, argc); 530 case GP100_VMM_VN_FAULT_CANCEL: 531 return gp100_vmm_fault_cancel(vmm, argv, argc); 532 default: 533 break; 534 } 535 } 536 return -EINVAL; 537 } 538 539 void 540 gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) 541 { 542 struct nvkm_device *device = vmm->mmu->subdev.device; 543 nvkm_wr32(device, 0x100cb8, lower_32_bits(addr)); 544 nvkm_wr32(device, 0x100cec, upper_32_bits(addr)); 545 } 546 547 void 548 gp100_vmm_flush(struct nvkm_vmm *vmm, int depth) 549 { 550 u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; 551 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) 552 type |= 0x00000004; /* HUB_ONLY */ 553 type |= 0x00000001; /* PAGE_ALL */ 554 gf100_vmm_invalidate(vmm, type); 555 } 556 557 int 558 gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 559 { 560 u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */; 561 if (vmm->replay) { 562 base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */ 563 base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */ 564 } 565 return gf100_vmm_join_(vmm, inst, base); 566 } 567 568 static const struct nvkm_vmm_func 569 gp100_vmm = { 570 .join = gp100_vmm_join, 571 .part = gf100_vmm_part, 572 .aper = gf100_vmm_aper, 573 .valid = gp100_vmm_valid, 574 .flush = gp100_vmm_flush, 575 .mthd = gp100_vmm_mthd, 576 .invalidate_pdb = gp100_vmm_invalidate_pdb, 577 .page = { 578 { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx }, 579 { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx }, 580 { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx }, 581 { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC }, 582 { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC }, 583 { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx }, 584 {} 585 } 586 }; 587 588 int 589 gp100_vmm_new_(const struct nvkm_vmm_func *func, 590 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 591 void *argv, u32 argc, struct lock_class_key *key, 592 const char *name, struct nvkm_vmm **pvmm) 593 { 594 union { 595 struct gp100_vmm_vn vn; 596 struct gp100_vmm_v0 v0; 597 } *args = argv; 598 int ret = -ENOSYS; 599 bool replay; 600 601 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 602 replay = args->v0.fault_replay != 0; 603 } else 604 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 605 replay = false; 606 } else 607 return ret; 608 609 ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm); 610 if (ret) 611 return ret; 612 613 (*pvmm)->replay = replay; 614 return 0; 615 } 616 617 int 618 gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 619 void *argv, u32 argc, struct lock_class_key *key, 620 const char *name, struct nvkm_vmm **pvmm) 621 { 622 return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size, 623 argv, argc, key, name, pvmm); 624 } 625