1 /* 2 * Copyright 2017 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "vmm.h" 23 24 #include <nvif/ifb00d.h> 25 #include <nvif/unpack.h> 26 27 static void 28 gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm, 29 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 30 { 31 /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */ 32 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes); 33 } 34 35 static const struct nvkm_vmm_desc_func 36 gm200_vmm_spt = { 37 .unmap = gf100_vmm_pgt_unmap, 38 .sparse = gm200_vmm_pgt_sparse, 39 .mem = gf100_vmm_pgt_mem, 40 .dma = gf100_vmm_pgt_dma, 41 .sgl = gf100_vmm_pgt_sgl, 42 }; 43 44 static const struct nvkm_vmm_desc_func 45 gm200_vmm_lpt = { 46 .invalid = gk104_vmm_lpt_invalid, 47 .unmap = gf100_vmm_pgt_unmap, 48 .sparse = gm200_vmm_pgt_sparse, 49 .mem = gf100_vmm_pgt_mem, 50 }; 51 52 static void 53 gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm, 54 struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) 55 { 56 /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */ 57 VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes); 58 } 59 60 static const struct nvkm_vmm_desc_func 61 gm200_vmm_pgd = { 62 .unmap = gf100_vmm_pgt_unmap, 63 .sparse = gm200_vmm_pgd_sparse, 64 .pde = gf100_vmm_pgd_pde, 65 }; 66 67 const struct nvkm_vmm_desc 68 gm200_vmm_desc_17_12[] = { 69 { SPT, 15, 8, 0x1000, &gm200_vmm_spt }, 70 { PGD, 13, 8, 0x1000, &gm200_vmm_pgd }, 71 {} 72 }; 73 74 const struct nvkm_vmm_desc 75 gm200_vmm_desc_17_17[] = { 76 { LPT, 10, 8, 0x1000, &gm200_vmm_lpt }, 77 { PGD, 13, 8, 0x1000, &gm200_vmm_pgd }, 78 {} 79 }; 80 81 const struct nvkm_vmm_desc 82 gm200_vmm_desc_16_12[] = { 83 { SPT, 14, 8, 0x1000, &gm200_vmm_spt }, 84 { PGD, 14, 8, 0x1000, &gm200_vmm_pgd }, 85 {} 86 }; 87 88 const struct nvkm_vmm_desc 89 gm200_vmm_desc_16_16[] = { 90 { LPT, 10, 8, 0x1000, &gm200_vmm_lpt }, 91 { PGD, 14, 8, 0x1000, &gm200_vmm_pgd }, 92 {} 93 }; 94 95 int 96 gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base) 97 { 98 if (vmm->func->page[1].shift == 16) 99 base |= BIT_ULL(11); 100 return gf100_vmm_join_(vmm, inst, base); 101 } 102 103 int 104 gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 105 { 106 return gm200_vmm_join_(vmm, inst, 0); 107 } 108 109 static const struct nvkm_vmm_func 110 gm200_vmm_17 = { 111 .join = gm200_vmm_join, 112 .part = gf100_vmm_part, 113 .aper = gf100_vmm_aper, 114 .valid = gf100_vmm_valid, 115 .flush = gf100_vmm_flush, 116 .page = { 117 { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx }, 118 { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC }, 119 { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx }, 120 {} 121 } 122 }; 123 124 static const struct nvkm_vmm_func 125 gm200_vmm_16 = { 126 .join = gm200_vmm_join, 127 .part = gf100_vmm_part, 128 .aper = gf100_vmm_aper, 129 .valid = gf100_vmm_valid, 130 .flush = gf100_vmm_flush, 131 .page = { 132 { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx }, 133 { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC }, 134 { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx }, 135 {} 136 } 137 }; 138 139 int 140 gm200_vmm_new_(const struct nvkm_vmm_func *func_16, 141 const struct nvkm_vmm_func *func_17, 142 struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc, 143 struct lock_class_key *key, const char *name, 144 struct nvkm_vmm **pvmm) 145 { 146 const struct nvkm_vmm_func *func; 147 union { 148 struct gm200_vmm_vn vn; 149 struct gm200_vmm_v0 v0; 150 } *args = argv; 151 int ret = -ENOSYS; 152 153 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 154 switch (args->v0.bigpage) { 155 case 16: func = func_16; break; 156 case 17: func = func_17; break; 157 default: 158 return -EINVAL; 159 } 160 } else 161 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 162 func = func_17; 163 } else 164 return ret; 165 166 return nvkm_vmm_new_(func, mmu, 0, addr, size, key, name, pvmm); 167 } 168 169 int 170 gm200_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc, 171 struct lock_class_key *key, const char *name, 172 struct nvkm_vmm **pvmm) 173 { 174 return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr, 175 size, argv, argc, key, name, pvmm); 176 } 177 178 int 179 gm200_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size, 180 void *argv, u32 argc, struct lock_class_key *key, 181 const char *name, struct nvkm_vmm **pvmm) 182 { 183 return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr, 184 size, argv, argc, key, name, pvmm); 185 } 186