1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <core/gpuobj.h> 27 #include <subdev/fb.h> 28 #include <subdev/mmu.h> 29 #include <subdev/timer.h> 30 31 struct nv50_bar { 32 struct nvkm_bar base; 33 spinlock_t lock; 34 struct nvkm_gpuobj *mem; 35 struct nvkm_gpuobj *pad; 36 struct nvkm_gpuobj *pgd; 37 struct nvkm_vm *bar1_vm; 38 struct nvkm_gpuobj *bar1; 39 struct nvkm_vm *bar3_vm; 40 struct nvkm_gpuobj *bar3; 41 }; 42 43 static int 44 nv50_bar_kmap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags, 45 struct nvkm_vma *vma) 46 { 47 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 48 int ret; 49 50 ret = nvkm_vm_get(bar->bar3_vm, mem->size << 12, 12, flags, vma); 51 if (ret) 52 return ret; 53 54 nvkm_vm_map(vma, mem); 55 return 0; 56 } 57 58 static int 59 nv50_bar_umap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags, 60 struct nvkm_vma *vma) 61 { 62 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 63 int ret; 64 65 ret = nvkm_vm_get(bar->bar1_vm, mem->size << 12, 12, flags, vma); 66 if (ret) 67 return ret; 68 69 nvkm_vm_map(vma, mem); 70 return 0; 71 } 72 73 static void 74 nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) 75 { 76 nvkm_vm_unmap(vma); 77 nvkm_vm_put(vma); 78 } 79 80 static void 81 nv50_bar_flush(struct nvkm_bar *obj) 82 { 83 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 84 struct nvkm_device *device = bar->base.subdev.device; 85 unsigned long flags; 86 spin_lock_irqsave(&bar->lock, flags); 87 nvkm_wr32(device, 0x00330c, 0x00000001); 88 nvkm_msec(device, 2000, 89 if (!(nvkm_rd32(device, 0x00330c) & 0x00000002)) 90 break; 91 ); 92 spin_unlock_irqrestore(&bar->lock, flags); 93 } 94 95 void 96 g84_bar_flush(struct nvkm_bar *obj) 97 { 98 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 99 struct nvkm_device *device = bar->base.subdev.device; 100 unsigned long flags; 101 spin_lock_irqsave(&bar->lock, flags); 102 nvkm_wr32(device, 0x070000, 0x00000001); 103 nvkm_msec(device, 2000, 104 if (!(nvkm_rd32(device, 0x070000) & 0x00000002)) 105 break; 106 ); 107 spin_unlock_irqrestore(&bar->lock, flags); 108 } 109 110 static int 111 nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 112 struct nvkm_oclass *oclass, void *data, u32 size, 113 struct nvkm_object **pobject) 114 { 115 struct nvkm_device *device = nv_device(parent); 116 struct nvkm_object *heap; 117 struct nvkm_vm *vm; 118 struct nv50_bar *bar; 119 u64 start, limit; 120 int ret; 121 122 ret = nvkm_bar_create(parent, engine, oclass, &bar); 123 *pobject = nv_object(bar); 124 if (ret) 125 return ret; 126 127 ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x20000, 0, 128 NVOBJ_FLAG_HEAP, &bar->mem); 129 heap = nv_object(bar->mem); 130 if (ret) 131 return ret; 132 133 ret = nvkm_gpuobj_new(nv_object(bar), heap, 134 (device->chipset == 0x50) ? 0x1400 : 0x0200, 135 0, 0, &bar->pad); 136 if (ret) 137 return ret; 138 139 ret = nvkm_gpuobj_new(nv_object(bar), heap, 0x4000, 0, 0, &bar->pgd); 140 if (ret) 141 return ret; 142 143 /* BAR3 */ 144 start = 0x0100000000ULL; 145 limit = start + nv_device_resource_len(device, 3); 146 147 ret = nvkm_vm_new(device, start, limit, start, &vm); 148 if (ret) 149 return ret; 150 151 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 152 153 ret = nvkm_gpuobj_new(nv_object(bar), heap, 154 ((limit-- - start) >> 12) * 8, 0x1000, 155 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); 156 vm->pgt[0].refcount[0] = 1; 157 if (ret) 158 return ret; 159 160 ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd); 161 nvkm_vm_ref(NULL, &vm, NULL); 162 if (ret) 163 return ret; 164 165 ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar3); 166 if (ret) 167 return ret; 168 169 nv_wo32(bar->bar3, 0x00, 0x7fc00000); 170 nv_wo32(bar->bar3, 0x04, lower_32_bits(limit)); 171 nv_wo32(bar->bar3, 0x08, lower_32_bits(start)); 172 nv_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 | 173 upper_32_bits(start)); 174 nv_wo32(bar->bar3, 0x10, 0x00000000); 175 nv_wo32(bar->bar3, 0x14, 0x00000000); 176 177 /* BAR1 */ 178 start = 0x0000000000ULL; 179 limit = start + nv_device_resource_len(device, 1); 180 181 ret = nvkm_vm_new(device, start, limit--, start, &vm); 182 if (ret) 183 return ret; 184 185 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 186 187 ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd); 188 nvkm_vm_ref(NULL, &vm, NULL); 189 if (ret) 190 return ret; 191 192 ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar1); 193 if (ret) 194 return ret; 195 196 nv_wo32(bar->bar1, 0x00, 0x7fc00000); 197 nv_wo32(bar->bar1, 0x04, lower_32_bits(limit)); 198 nv_wo32(bar->bar1, 0x08, lower_32_bits(start)); 199 nv_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 | 200 upper_32_bits(start)); 201 nv_wo32(bar->bar1, 0x10, 0x00000000); 202 nv_wo32(bar->bar1, 0x14, 0x00000000); 203 204 bar->base.alloc = nvkm_bar_alloc; 205 bar->base.kmap = nv50_bar_kmap; 206 bar->base.umap = nv50_bar_umap; 207 bar->base.unmap = nv50_bar_unmap; 208 if (device->chipset == 0x50) 209 bar->base.flush = nv50_bar_flush; 210 else 211 bar->base.flush = g84_bar_flush; 212 spin_lock_init(&bar->lock); 213 return 0; 214 } 215 216 static void 217 nv50_bar_dtor(struct nvkm_object *object) 218 { 219 struct nv50_bar *bar = (void *)object; 220 nvkm_gpuobj_ref(NULL, &bar->bar1); 221 nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd); 222 nvkm_gpuobj_ref(NULL, &bar->bar3); 223 if (bar->bar3_vm) { 224 nvkm_gpuobj_ref(NULL, &bar->bar3_vm->pgt[0].obj[0]); 225 nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd); 226 } 227 nvkm_gpuobj_ref(NULL, &bar->pgd); 228 nvkm_gpuobj_ref(NULL, &bar->pad); 229 nvkm_gpuobj_ref(NULL, &bar->mem); 230 nvkm_bar_destroy(&bar->base); 231 } 232 233 static int 234 nv50_bar_init(struct nvkm_object *object) 235 { 236 struct nv50_bar *bar = (void *)object; 237 struct nvkm_device *device = bar->base.subdev.device; 238 int ret, i; 239 240 ret = nvkm_bar_init(&bar->base); 241 if (ret) 242 return ret; 243 244 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000); 245 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100); 246 nvkm_wr32(device, 0x100c80, 0x00060001); 247 if (nvkm_msec(device, 2000, 248 if (!(nvkm_rd32(device, 0x100c80) & 0x00000001)) 249 break; 250 ) < 0) 251 return -EBUSY; 252 253 nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12); 254 nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12); 255 nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4); 256 nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4); 257 for (i = 0; i < 8; i++) 258 nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000); 259 return 0; 260 } 261 262 static int 263 nv50_bar_fini(struct nvkm_object *object, bool suspend) 264 { 265 struct nv50_bar *bar = (void *)object; 266 return nvkm_bar_fini(&bar->base, suspend); 267 } 268 269 struct nvkm_oclass 270 nv50_bar_oclass = { 271 .handle = NV_SUBDEV(BAR, 0x50), 272 .ofuncs = &(struct nvkm_ofuncs) { 273 .ctor = nv50_bar_ctor, 274 .dtor = nv50_bar_dtor, 275 .init = nv50_bar_init, 276 .fini = nv50_bar_fini, 277 }, 278 }; 279