1 /* 2 * Copyright 2017 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "vmm.h" 23 24 #include <core/client.h> 25 #include <subdev/fb.h> 26 #include <subdev/ltc.h> 27 #include <subdev/timer.h> 28 #include <engine/gr.h> 29 30 #include <nvif/ifc00d.h> 31 #include <nvif/unpack.h> 32 33 static void 34 gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm, 35 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 36 { 37 struct device *dev = vmm->mmu->subdev.device->dev; 38 dma_addr_t addr; 39 40 nvkm_kmap(pt->memory); 41 while (ptes--) { 42 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); 43 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); 44 u64 data = (u64)datahi << 32 | datalo; 45 if ((data & (3ULL << 1)) != 0) { 46 addr = (data >> 8) << 12; 47 dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 48 } 49 ptei++; 50 } 51 nvkm_done(pt->memory); 52 } 53 54 static bool 55 gp100_vmm_pfn_clear(struct nvkm_vmm *vmm, 56 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 57 { 58 bool dma = false; 59 nvkm_kmap(pt->memory); 60 while (ptes--) { 61 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); 62 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); 63 u64 data = (u64)datahi << 32 | datalo; 64 if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) { 65 VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0)); 66 dma = true; 67 } 68 ptei++; 69 } 70 nvkm_done(pt->memory); 71 return dma; 72 } 73 74 static void 75 gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 76 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 77 { 78 struct device *dev = vmm->mmu->subdev.device->dev; 79 dma_addr_t addr; 80 81 nvkm_kmap(pt->memory); 82 for (; ptes; ptes--, map->pfn++) { 83 u64 data = 0; 84 85 if (!(*map->pfn & NVKM_VMM_PFN_V)) 86 continue; 87 88 if (!(*map->pfn & NVKM_VMM_PFN_W)) 89 data |= BIT_ULL(6); /* RO. */ 90 91 if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) { 92 addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT; 93 addr = dma_map_page(dev, pfn_to_page(addr), 0, 94 PAGE_SIZE, DMA_BIDIRECTIONAL); 95 if (!WARN_ON(dma_mapping_error(dev, addr))) { 96 data |= addr >> 4; 97 data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */ 98 data |= BIT_ULL(3); /* VOL. */ 99 data |= BIT_ULL(0); /* VALID. */ 100 } 101 } else { 102 data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4; 103 data |= BIT_ULL(0); /* VALID. */ 104 } 105 106 VMM_WO064(pt, vmm, ptei++ * 8, data); 107 } 108 nvkm_done(pt->memory); 109 } 110 111 static inline void 112 gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 113 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 114 { 115 u64 data = (addr >> 4) | map->type; 116 117 map->type += ptes * map->ctag; 118 119 while (ptes--) { 120 VMM_WO064(pt, vmm, ptei++ * 8, data); 121 data += map->next; 122 } 123 } 124 125 static void 126 gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 127 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 128 { 129 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); 130 } 131 132 static void 133 gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 134 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 135 { 136 if (map->page->shift == PAGE_SHIFT) { 137 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); 138 nvkm_kmap(pt->memory); 139 while (ptes--) { 140 const u64 data = (*map->dma++ >> 4) | map->type; 141 VMM_WO064(pt, vmm, ptei++ * 8, data); 142 map->type += map->ctag; 143 } 144 nvkm_done(pt->memory); 145 return; 146 } 147 148 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); 149 } 150 151 static void 152 gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 153 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 154 { 155 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); 156 } 157 158 static void 159 gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm, 160 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 161 { 162 /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */ 163 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes); 164 } 165 166 static const struct nvkm_vmm_desc_func 167 gp100_vmm_desc_spt = { 168 .unmap = gf100_vmm_pgt_unmap, 169 .sparse = gp100_vmm_pgt_sparse, 170 .mem = gp100_vmm_pgt_mem, 171 .dma = gp100_vmm_pgt_dma, 172 .sgl = gp100_vmm_pgt_sgl, 173 .pfn = gp100_vmm_pgt_pfn, 174 .pfn_clear = gp100_vmm_pfn_clear, 175 .pfn_unmap = gp100_vmm_pfn_unmap, 176 }; 177 178 static void 179 gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm, 180 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 181 { 182 /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */ 183 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes); 184 } 185 186 static const struct nvkm_vmm_desc_func 187 gp100_vmm_desc_lpt = { 188 .invalid = gp100_vmm_lpt_invalid, 189 .unmap = gf100_vmm_pgt_unmap, 190 .sparse = gp100_vmm_pgt_sparse, 191 .mem = gp100_vmm_pgt_mem, 192 }; 193 194 static inline void 195 gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 196 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 197 { 198 u64 data = (addr >> 4) | map->type; 199 200 map->type += ptes * map->ctag; 201 202 while (ptes--) { 203 VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL); 204 data += map->next; 205 } 206 } 207 208 static void 209 gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 210 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 211 { 212 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte); 213 } 214 215 static inline bool 216 gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data) 217 { 218 switch (nvkm_memory_target(pt->memory)) { 219 case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break; 220 case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1; 221 *data |= BIT_ULL(3); /* VOL. */ 222 break; 223 case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break; 224 default: 225 WARN_ON(1); 226 return false; 227 } 228 *data |= pt->addr >> 4; 229 return true; 230 } 231 232 static void 233 gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) 234 { 235 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; 236 struct nvkm_mmu_pt *pd = pgd->pt[0]; 237 u64 data[2] = {}; 238 239 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) 240 return; 241 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) 242 return; 243 244 nvkm_kmap(pd->memory); 245 VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]); 246 nvkm_done(pd->memory); 247 } 248 249 static void 250 gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm, 251 struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) 252 { 253 /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */ 254 VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes); 255 } 256 257 static void 258 gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm, 259 struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) 260 { 261 VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes); 262 } 263 264 static void 265 gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm, 266 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 267 { 268 struct device *dev = vmm->mmu->subdev.device->dev; 269 dma_addr_t addr; 270 271 nvkm_kmap(pt->memory); 272 while (ptes--) { 273 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0); 274 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4); 275 u64 data = (u64)datahi << 32 | datalo; 276 277 if ((data & (3ULL << 1)) != 0) { 278 addr = (data >> 8) << 12; 279 dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL); 280 } 281 ptei++; 282 } 283 nvkm_done(pt->memory); 284 } 285 286 static bool 287 gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm, 288 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 289 { 290 bool dma = false; 291 292 nvkm_kmap(pt->memory); 293 while (ptes--) { 294 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0); 295 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4); 296 u64 data = (u64)datahi << 32 | datalo; 297 298 if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) { 299 VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0)); 300 dma = true; 301 } 302 ptei++; 303 } 304 nvkm_done(pt->memory); 305 return dma; 306 } 307 308 static void 309 gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 310 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 311 { 312 struct device *dev = vmm->mmu->subdev.device->dev; 313 dma_addr_t addr; 314 315 nvkm_kmap(pt->memory); 316 for (; ptes; ptes--, map->pfn++) { 317 u64 data = 0; 318 319 if (!(*map->pfn & NVKM_VMM_PFN_V)) 320 continue; 321 322 if (!(*map->pfn & NVKM_VMM_PFN_W)) 323 data |= BIT_ULL(6); /* RO. */ 324 325 if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) { 326 addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT; 327 addr = dma_map_page(dev, pfn_to_page(addr), 0, 328 1UL << 21, DMA_BIDIRECTIONAL); 329 if (!WARN_ON(dma_mapping_error(dev, addr))) { 330 data |= addr >> 4; 331 data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */ 332 data |= BIT_ULL(3); /* VOL. */ 333 data |= BIT_ULL(0); /* VALID. */ 334 } 335 } else { 336 data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4; 337 data |= BIT_ULL(0); /* VALID. */ 338 } 339 340 VMM_WO064(pt, vmm, ptei++ * 16, data); 341 } 342 nvkm_done(pt->memory); 343 } 344 345 static const struct nvkm_vmm_desc_func 346 gp100_vmm_desc_pd0 = { 347 .unmap = gp100_vmm_pd0_unmap, 348 .sparse = gp100_vmm_pd0_sparse, 349 .pde = gp100_vmm_pd0_pde, 350 .mem = gp100_vmm_pd0_mem, 351 .pfn = gp100_vmm_pd0_pfn, 352 .pfn_clear = gp100_vmm_pd0_pfn_clear, 353 .pfn_unmap = gp100_vmm_pd0_pfn_unmap, 354 }; 355 356 static void 357 gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) 358 { 359 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; 360 struct nvkm_mmu_pt *pd = pgd->pt[0]; 361 u64 data = 0; 362 363 if (!gp100_vmm_pde(pgt->pt[0], &data)) 364 return; 365 366 nvkm_kmap(pd->memory); 367 VMM_WO064(pd, vmm, pdei * 8, data); 368 nvkm_done(pd->memory); 369 } 370 371 static const struct nvkm_vmm_desc_func 372 gp100_vmm_desc_pd1 = { 373 .unmap = gf100_vmm_pgt_unmap, 374 .sparse = gp100_vmm_pgt_sparse, 375 .pde = gp100_vmm_pd1_pde, 376 }; 377 378 const struct nvkm_vmm_desc 379 gp100_vmm_desc_16[] = { 380 { LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt }, 381 { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 }, 382 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 383 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 384 { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 }, 385 {} 386 }; 387 388 const struct nvkm_vmm_desc 389 gp100_vmm_desc_12[] = { 390 { SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt }, 391 { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 }, 392 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 393 { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 }, 394 { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 }, 395 {} 396 }; 397 398 int 399 gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, 400 struct nvkm_vmm_map *map) 401 { 402 const enum nvkm_memory_target target = nvkm_memory_target(map->memory); 403 const struct nvkm_vmm_page *page = map->page; 404 union { 405 struct gp100_vmm_map_vn vn; 406 struct gp100_vmm_map_v0 v0; 407 } *args = argv; 408 struct nvkm_device *device = vmm->mmu->subdev.device; 409 struct nvkm_memory *memory = map->memory; 410 u8 kind, kind_inv, priv, ro, vol; 411 int kindn, aper, ret = -ENOSYS; 412 const u8 *kindm; 413 414 map->next = (1ULL << page->shift) >> 4; 415 map->type = 0; 416 417 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 418 vol = !!args->v0.vol; 419 ro = !!args->v0.ro; 420 priv = !!args->v0.priv; 421 kind = args->v0.kind; 422 } else 423 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 424 vol = target == NVKM_MEM_TARGET_HOST; 425 ro = 0; 426 priv = 0; 427 kind = 0x00; 428 } else { 429 VMM_DEBUG(vmm, "args"); 430 return ret; 431 } 432 433 aper = vmm->func->aper(target); 434 if (WARN_ON(aper < 0)) 435 return aper; 436 437 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); 438 if (kind >= kindn || kindm[kind] == kind_inv) { 439 VMM_DEBUG(vmm, "kind %02x", kind); 440 return -EINVAL; 441 } 442 443 if (kindm[kind] != kind) { 444 u64 tags = nvkm_memory_size(memory) >> 16; 445 if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) { 446 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type); 447 return -EINVAL; 448 } 449 450 ret = nvkm_memory_tags_get(memory, device, tags, 451 nvkm_ltc_tags_clear, 452 &map->tags); 453 if (ret) { 454 VMM_DEBUG(vmm, "comp %d", ret); 455 return ret; 456 } 457 458 if (map->tags->mn) { 459 tags = map->tags->mn->offset + (map->offset >> 16); 460 map->ctag |= ((1ULL << page->shift) >> 16) << 36; 461 map->type |= tags << 36; 462 map->next |= map->ctag; 463 } else { 464 kind = kindm[kind]; 465 } 466 } 467 468 map->type |= BIT(0); 469 map->type |= (u64)aper << 1; 470 map->type |= (u64) vol << 3; 471 map->type |= (u64)priv << 5; 472 map->type |= (u64) ro << 6; 473 map->type |= (u64)kind << 56; 474 return 0; 475 } 476 477 static int 478 gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc) 479 { 480 struct nvkm_device *device = vmm->mmu->subdev.device; 481 union { 482 struct gp100_vmm_fault_cancel_v0 v0; 483 } *args = argv; 484 int ret = -ENOSYS; 485 u32 inst, aper; 486 487 if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) 488 return ret; 489 490 /* Translate MaxwellFaultBufferA instance pointer to the same 491 * format as the NV_GR_FECS_CURRENT_CTX register. 492 */ 493 aper = (args->v0.inst >> 8) & 3; 494 args->v0.inst >>= 12; 495 args->v0.inst |= aper << 28; 496 args->v0.inst |= 0x80000000; 497 498 if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) { 499 if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) { 500 gf100_vmm_invalidate(vmm, 0x0000001b 501 /* CANCEL_TARGETED. */ | 502 (args->v0.hub << 20) | 503 (args->v0.gpc << 15) | 504 (args->v0.client << 9)); 505 } 506 WARN_ON(nvkm_gr_ctxsw_resume(device)); 507 } 508 509 return 0; 510 } 511 512 static int 513 gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc) 514 { 515 union { 516 struct gp100_vmm_fault_replay_vn vn; 517 } *args = argv; 518 int ret = -ENOSYS; 519 520 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 521 gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */ 522 } 523 524 return ret; 525 } 526 527 int 528 gp100_vmm_mthd(struct nvkm_vmm *vmm, 529 struct nvkm_client *client, u32 mthd, void *argv, u32 argc) 530 { 531 if (client->super) { 532 switch (mthd) { 533 case GP100_VMM_VN_FAULT_REPLAY: 534 return gp100_vmm_fault_replay(vmm, argv, argc); 535 case GP100_VMM_VN_FAULT_CANCEL: 536 return gp100_vmm_fault_cancel(vmm, argv, argc); 537 default: 538 break; 539 } 540 } 541 return -EINVAL; 542 } 543 544 void 545 gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) 546 { 547 struct nvkm_device *device = vmm->mmu->subdev.device; 548 nvkm_wr32(device, 0x100cb8, lower_32_bits(addr)); 549 nvkm_wr32(device, 0x100cec, upper_32_bits(addr)); 550 } 551 552 void 553 gp100_vmm_flush(struct nvkm_vmm *vmm, int depth) 554 { 555 u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; 556 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) 557 type |= 0x00000004; /* HUB_ONLY */ 558 type |= 0x00000001; /* PAGE_ALL */ 559 gf100_vmm_invalidate(vmm, type); 560 } 561 562 int 563 gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 564 { 565 u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */; 566 if (vmm->replay) { 567 base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */ 568 base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */ 569 } 570 return gf100_vmm_join_(vmm, inst, base); 571 } 572 573 static const struct nvkm_vmm_func 574 gp100_vmm = { 575 .join = gp100_vmm_join, 576 .part = gf100_vmm_part, 577 .aper = gf100_vmm_aper, 578 .valid = gp100_vmm_valid, 579 .flush = gp100_vmm_flush, 580 .mthd = gp100_vmm_mthd, 581 .invalidate_pdb = gp100_vmm_invalidate_pdb, 582 .page = { 583 { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx }, 584 { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx }, 585 { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx }, 586 { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC }, 587 { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC }, 588 { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx }, 589 {} 590 } 591 }; 592 593 int 594 gp100_vmm_new_(const struct nvkm_vmm_func *func, 595 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 596 void *argv, u32 argc, struct lock_class_key *key, 597 const char *name, struct nvkm_vmm **pvmm) 598 { 599 union { 600 struct gp100_vmm_vn vn; 601 struct gp100_vmm_v0 v0; 602 } *args = argv; 603 int ret = -ENOSYS; 604 bool replay; 605 606 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 607 replay = args->v0.fault_replay != 0; 608 } else 609 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 610 replay = false; 611 } else 612 return ret; 613 614 ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm); 615 if (ret) 616 return ret; 617 618 (*pvmm)->replay = replay; 619 return 0; 620 } 621 622 int 623 gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 624 void *argv, u32 argc, struct lock_class_key *key, 625 const char *name, struct nvkm_vmm **pvmm) 626 { 627 return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size, 628 argv, argc, key, name, pvmm); 629 } 630