1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <core/gpuobj.h>
27 #include <subdev/fb.h>
28 #include <subdev/mmu.h>
29 #include <subdev/timer.h>
30 
31 struct nv50_bar {
32 	struct nvkm_bar base;
33 	spinlock_t lock;
34 	struct nvkm_gpuobj *mem;
35 	struct nvkm_gpuobj *pad;
36 	struct nvkm_gpuobj *pgd;
37 	struct nvkm_vm *bar1_vm;
38 	struct nvkm_gpuobj *bar1;
39 	struct nvkm_vm *bar3_vm;
40 	struct nvkm_gpuobj *bar3;
41 };
42 
43 static int
44 nv50_bar_kmap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags,
45 	      struct nvkm_vma *vma)
46 {
47 	struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
48 	int ret;
49 
50 	ret = nvkm_vm_get(bar->bar3_vm, mem->size << 12, 12, flags, vma);
51 	if (ret)
52 		return ret;
53 
54 	nvkm_vm_map(vma, mem);
55 	return 0;
56 }
57 
58 static int
59 nv50_bar_umap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags,
60 	      struct nvkm_vma *vma)
61 {
62 	struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
63 	int ret;
64 
65 	ret = nvkm_vm_get(bar->bar1_vm, mem->size << 12, 12, flags, vma);
66 	if (ret)
67 		return ret;
68 
69 	nvkm_vm_map(vma, mem);
70 	return 0;
71 }
72 
73 static void
74 nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
75 {
76 	nvkm_vm_unmap(vma);
77 	nvkm_vm_put(vma);
78 }
79 
80 static void
81 nv50_bar_flush(struct nvkm_bar *obj)
82 {
83 	struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
84 	struct nvkm_device *device = bar->base.subdev.device;
85 	unsigned long flags;
86 	spin_lock_irqsave(&bar->lock, flags);
87 	nvkm_wr32(device, 0x00330c, 0x00000001);
88 	if (!nv_wait(bar, 0x00330c, 0x00000002, 0x00000000))
89 		nv_warn(bar, "flush timeout\n");
90 	spin_unlock_irqrestore(&bar->lock, flags);
91 }
92 
93 void
94 g84_bar_flush(struct nvkm_bar *obj)
95 {
96 	struct nv50_bar *bar = container_of(obj, typeof(*bar), base);
97 	struct nvkm_device *device = bar->base.subdev.device;
98 	unsigned long flags;
99 	spin_lock_irqsave(&bar->lock, flags);
100 	nvkm_wr32(device, 0x070000, 0x00000001);
101 	if (!nv_wait(bar, 0x070000, 0x00000002, 0x00000000))
102 		nv_warn(bar, "flush timeout\n");
103 	spin_unlock_irqrestore(&bar->lock, flags);
104 }
105 
106 static int
107 nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
108 	      struct nvkm_oclass *oclass, void *data, u32 size,
109 	      struct nvkm_object **pobject)
110 {
111 	struct nvkm_device *device = nv_device(parent);
112 	struct nvkm_object *heap;
113 	struct nvkm_vm *vm;
114 	struct nv50_bar *bar;
115 	u64 start, limit;
116 	int ret;
117 
118 	ret = nvkm_bar_create(parent, engine, oclass, &bar);
119 	*pobject = nv_object(bar);
120 	if (ret)
121 		return ret;
122 
123 	ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x20000, 0,
124 			      NVOBJ_FLAG_HEAP, &bar->mem);
125 	heap = nv_object(bar->mem);
126 	if (ret)
127 		return ret;
128 
129 	ret = nvkm_gpuobj_new(nv_object(bar), heap,
130 			      (device->chipset == 0x50) ? 0x1400 : 0x0200,
131 			      0, 0, &bar->pad);
132 	if (ret)
133 		return ret;
134 
135 	ret = nvkm_gpuobj_new(nv_object(bar), heap, 0x4000, 0, 0, &bar->pgd);
136 	if (ret)
137 		return ret;
138 
139 	/* BAR3 */
140 	start = 0x0100000000ULL;
141 	limit = start + nv_device_resource_len(device, 3);
142 
143 	ret = nvkm_vm_new(device, start, limit, start, &vm);
144 	if (ret)
145 		return ret;
146 
147 	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
148 
149 	ret = nvkm_gpuobj_new(nv_object(bar), heap,
150 			      ((limit-- - start) >> 12) * 8, 0x1000,
151 			      NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
152 	vm->pgt[0].refcount[0] = 1;
153 	if (ret)
154 		return ret;
155 
156 	ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
157 	nvkm_vm_ref(NULL, &vm, NULL);
158 	if (ret)
159 		return ret;
160 
161 	ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar3);
162 	if (ret)
163 		return ret;
164 
165 	nv_wo32(bar->bar3, 0x00, 0x7fc00000);
166 	nv_wo32(bar->bar3, 0x04, lower_32_bits(limit));
167 	nv_wo32(bar->bar3, 0x08, lower_32_bits(start));
168 	nv_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
169 				  upper_32_bits(start));
170 	nv_wo32(bar->bar3, 0x10, 0x00000000);
171 	nv_wo32(bar->bar3, 0x14, 0x00000000);
172 
173 	/* BAR1 */
174 	start = 0x0000000000ULL;
175 	limit = start + nv_device_resource_len(device, 1);
176 
177 	ret = nvkm_vm_new(device, start, limit--, start, &vm);
178 	if (ret)
179 		return ret;
180 
181 	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
182 
183 	ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
184 	nvkm_vm_ref(NULL, &vm, NULL);
185 	if (ret)
186 		return ret;
187 
188 	ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar1);
189 	if (ret)
190 		return ret;
191 
192 	nv_wo32(bar->bar1, 0x00, 0x7fc00000);
193 	nv_wo32(bar->bar1, 0x04, lower_32_bits(limit));
194 	nv_wo32(bar->bar1, 0x08, lower_32_bits(start));
195 	nv_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
196 				  upper_32_bits(start));
197 	nv_wo32(bar->bar1, 0x10, 0x00000000);
198 	nv_wo32(bar->bar1, 0x14, 0x00000000);
199 
200 	bar->base.alloc = nvkm_bar_alloc;
201 	bar->base.kmap = nv50_bar_kmap;
202 	bar->base.umap = nv50_bar_umap;
203 	bar->base.unmap = nv50_bar_unmap;
204 	if (device->chipset == 0x50)
205 		bar->base.flush = nv50_bar_flush;
206 	else
207 		bar->base.flush = g84_bar_flush;
208 	spin_lock_init(&bar->lock);
209 	return 0;
210 }
211 
212 static void
213 nv50_bar_dtor(struct nvkm_object *object)
214 {
215 	struct nv50_bar *bar = (void *)object;
216 	nvkm_gpuobj_ref(NULL, &bar->bar1);
217 	nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
218 	nvkm_gpuobj_ref(NULL, &bar->bar3);
219 	if (bar->bar3_vm) {
220 		nvkm_gpuobj_ref(NULL, &bar->bar3_vm->pgt[0].obj[0]);
221 		nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
222 	}
223 	nvkm_gpuobj_ref(NULL, &bar->pgd);
224 	nvkm_gpuobj_ref(NULL, &bar->pad);
225 	nvkm_gpuobj_ref(NULL, &bar->mem);
226 	nvkm_bar_destroy(&bar->base);
227 }
228 
229 static int
230 nv50_bar_init(struct nvkm_object *object)
231 {
232 	struct nv50_bar *bar = (void *)object;
233 	struct nvkm_device *device = bar->base.subdev.device;
234 	int ret, i;
235 
236 	ret = nvkm_bar_init(&bar->base);
237 	if (ret)
238 		return ret;
239 
240 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
241 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
242 	nvkm_wr32(device, 0x100c80, 0x00060001);
243 	if (!nv_wait(bar, 0x100c80, 0x00000001, 0x00000000)) {
244 		nv_error(bar, "vm flush timeout\n");
245 		return -EBUSY;
246 	}
247 
248 	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
249 	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
250 	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
251 	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
252 	for (i = 0; i < 8; i++)
253 		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
254 	return 0;
255 }
256 
257 static int
258 nv50_bar_fini(struct nvkm_object *object, bool suspend)
259 {
260 	struct nv50_bar *bar = (void *)object;
261 	return nvkm_bar_fini(&bar->base, suspend);
262 }
263 
264 struct nvkm_oclass
265 nv50_bar_oclass = {
266 	.handle = NV_SUBDEV(BAR, 0x50),
267 	.ofuncs = &(struct nvkm_ofuncs) {
268 		.ctor = nv50_bar_ctor,
269 		.dtor = nv50_bar_dtor,
270 		.init = nv50_bar_init,
271 		.fini = nv50_bar_fini,
272 	},
273 };
274