1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <core/gpuobj.h> 27 #include <subdev/fb.h> 28 #include <subdev/mmu.h> 29 30 struct gf100_bar_vm { 31 struct nvkm_memory *mem; 32 struct nvkm_gpuobj *pgd; 33 struct nvkm_vm *vm; 34 }; 35 36 struct gf100_bar { 37 struct nvkm_bar base; 38 spinlock_t lock; 39 struct gf100_bar_vm bar[2]; 40 }; 41 42 static struct nvkm_vm * 43 gf100_bar_kmap(struct nvkm_bar *obj) 44 { 45 struct gf100_bar *bar = container_of(obj, typeof(*bar), base); 46 return bar->bar[0].vm; 47 } 48 49 static int 50 gf100_bar_umap(struct nvkm_bar *obj, u64 size, int type, struct nvkm_vma *vma) 51 { 52 struct gf100_bar *bar = container_of(obj, typeof(*bar), base); 53 return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma); 54 } 55 56 static void 57 gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) 58 { 59 nvkm_vm_unmap(vma); 60 nvkm_vm_put(vma); 61 } 62 63 64 static int 65 gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm, 66 struct lock_class_key *key, int bar_nr) 67 { 68 struct nvkm_device *device = nv_device(&bar->base); 69 struct nvkm_vm *vm; 70 resource_size_t bar_len; 71 int ret; 72 73 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false, 74 &bar_vm->mem); 75 if (ret) 76 return ret; 77 78 ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd); 79 if (ret) 80 return ret; 81 82 bar_len = nv_device_resource_len(device, bar_nr); 83 84 ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm); 85 if (ret) 86 return ret; 87 88 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 89 90 /* 91 * Bootstrap page table lookup. 92 */ 93 if (bar_nr == 3) { 94 ret = nvkm_vm_boot(vm, bar_len); 95 if (ret) 96 return ret; 97 } 98 99 ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd); 100 nvkm_vm_ref(NULL, &vm, NULL); 101 if (ret) 102 return ret; 103 104 nvkm_kmap(bar_vm->mem); 105 nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr)); 106 nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr)); 107 nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1)); 108 nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1)); 109 nvkm_done(bar_vm->mem); 110 return 0; 111 } 112 113 int 114 gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 115 struct nvkm_oclass *oclass, void *data, u32 size, 116 struct nvkm_object **pobject) 117 { 118 static struct lock_class_key bar1_lock; 119 static struct lock_class_key bar3_lock; 120 struct nvkm_device *device = nv_device(parent); 121 struct gf100_bar *bar; 122 bool has_bar3 = nv_device_resource_len(device, 3) != 0; 123 int ret; 124 125 ret = nvkm_bar_create(parent, engine, oclass, &bar); 126 *pobject = nv_object(bar); 127 if (ret) 128 return ret; 129 130 device->bar = &bar->base; 131 bar->base.flush = g84_bar_flush; 132 spin_lock_init(&bar->lock); 133 134 /* BAR3 */ 135 if (has_bar3) { 136 ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3); 137 if (ret) 138 return ret; 139 } 140 141 /* BAR1 */ 142 ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1); 143 if (ret) 144 return ret; 145 146 if (has_bar3) 147 bar->base.kmap = gf100_bar_kmap; 148 bar->base.umap = gf100_bar_umap; 149 bar->base.unmap = gf100_bar_unmap; 150 return 0; 151 } 152 153 void 154 gf100_bar_dtor(struct nvkm_object *object) 155 { 156 struct gf100_bar *bar = (void *)object; 157 158 nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd); 159 nvkm_gpuobj_del(&bar->bar[1].pgd); 160 nvkm_memory_del(&bar->bar[1].mem); 161 162 if (bar->bar[0].vm) { 163 nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]); 164 nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd); 165 } 166 nvkm_gpuobj_del(&bar->bar[0].pgd); 167 nvkm_memory_del(&bar->bar[0].mem); 168 169 nvkm_bar_destroy(&bar->base); 170 } 171 172 int 173 gf100_bar_init(struct nvkm_object *object) 174 { 175 struct gf100_bar *bar = (void *)object; 176 struct nvkm_device *device = bar->base.subdev.device; 177 u32 addr; 178 int ret; 179 180 ret = nvkm_bar_init(&bar->base); 181 if (ret) 182 return ret; 183 184 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000); 185 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100); 186 187 addr = nvkm_memory_addr(bar->bar[1].mem) >> 12; 188 nvkm_wr32(device, 0x001704, 0x80000000 | addr); 189 190 if (bar->bar[0].mem) { 191 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; 192 nvkm_wr32(device, 0x001714, 0xc0000000 | addr); 193 } 194 195 return 0; 196 } 197 198 struct nvkm_oclass 199 gf100_bar_oclass = { 200 .handle = NV_SUBDEV(BAR, 0xc0), 201 .ofuncs = &(struct nvkm_ofuncs) { 202 .ctor = gf100_bar_ctor, 203 .dtor = gf100_bar_dtor, 204 .init = gf100_bar_init, 205 .fini = _nvkm_bar_fini, 206 }, 207 }; 208