1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <subdev/fb.h>
27 #include <subdev/ltc.h>
28 #include <subdev/timer.h>
29 
30 #include <core/gpuobj.h>
31 
32 /* Map from compressed to corresponding uncompressed storage type.
33  * The value 0xff represents an invalid storage type.
34  */
35 const u8 gf100_pte_storage_type_map[256] =
36 {
37 	0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
38 	0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
39 	0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
40 	0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
41 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
42 	0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
43 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
44 	0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
45 	0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
46 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
47 	0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
48 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
49 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
50 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
51 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
52 	0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
53 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
54 	0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
55 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
56 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
57 	0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
58 	0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
59 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
60 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
61 	0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
62 	0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
63 	0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
64 	0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
65 	0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
66 	0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
67 	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
68 	0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
69 };
70 
71 
72 static void
73 gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
74 {
75 	u32 pde[2] = { 0, 0 };
76 
77 	if (pgt[0])
78 		pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
79 	if (pgt[1])
80 		pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
81 
82 	nvkm_kmap(pgd);
83 	nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
84 	nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
85 	nvkm_done(pgd);
86 }
87 
88 static inline u64
89 gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
90 {
91 	phys >>= 8;
92 
93 	phys |= 0x00000001; /* present */
94 	if (vma->access & NV_MEM_ACCESS_SYS)
95 		phys |= 0x00000002;
96 
97 	phys |= ((u64)target  << 32);
98 	phys |= ((u64)memtype << 36);
99 	return phys;
100 }
101 
102 static void
103 gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
104 	     struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
105 {
106 	u64 next = 1 << (vma->node->type - 8);
107 
108 	phys  = gf100_vm_addr(vma, phys, mem->memtype, 0);
109 	pte <<= 3;
110 
111 	if (mem->tag) {
112 		struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
113 		u32 tag = mem->tag->offset + (delta >> 17);
114 		phys |= (u64)tag << (32 + 12);
115 		next |= (u64)1   << (32 + 12);
116 		nvkm_ltc_tags_clear(ltc, tag, cnt);
117 	}
118 
119 	nvkm_kmap(pgt);
120 	while (cnt--) {
121 		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
122 		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
123 		phys += next;
124 		pte  += 8;
125 	}
126 	nvkm_done(pgt);
127 }
128 
129 static void
130 gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
131 		struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
132 {
133 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
134 	/* compressed storage types are invalid for system memory */
135 	u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
136 
137 	nvkm_kmap(pgt);
138 	pte <<= 3;
139 	while (cnt--) {
140 		u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
141 		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
142 		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
143 		pte += 8;
144 	}
145 	nvkm_done(pgt);
146 }
147 
148 static void
149 gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
150 {
151 	nvkm_kmap(pgt);
152 	pte <<= 3;
153 	while (cnt--) {
154 		nvkm_wo32(pgt, pte + 0, 0x00000000);
155 		nvkm_wo32(pgt, pte + 4, 0x00000000);
156 		pte += 8;
157 	}
158 	nvkm_done(pgt);
159 }
160 
161 static void
162 gf100_vm_flush(struct nvkm_vm *vm)
163 {
164 	struct nvkm_mmu *mmu = vm->mmu;
165 	struct nvkm_device *device = mmu->subdev.device;
166 	struct nvkm_vm_pgd *vpgd;
167 	u32 type;
168 
169 	type = 0x00000001; /* PAGE_ALL */
170 	if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
171 		type |= 0x00000004; /* HUB_ONLY */
172 
173 	mutex_lock(&mmu->subdev.mutex);
174 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
175 		/* looks like maybe a "free flush slots" counter, the
176 		 * faster you write to 0x100cbc to more it decreases
177 		 */
178 		nvkm_msec(device, 2000,
179 			if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
180 				break;
181 		);
182 
183 		nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
184 		nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
185 
186 		/* wait for flush to be queued? */
187 		nvkm_msec(device, 2000,
188 			if (nvkm_rd32(device, 0x100c80) & 0x00008000)
189 				break;
190 		);
191 	}
192 	mutex_unlock(&mmu->subdev.mutex);
193 }
194 
195 static int
196 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
197 		struct lock_class_key *key, struct nvkm_vm **pvm)
198 {
199 	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
200 }
201 
202 static const struct nvkm_mmu_func
203 gf100_mmu = {
204 	.limit = (1ULL << 40),
205 	.dma_bits = 40,
206 	.pgt_bits  = 27 - 12,
207 	.spg_shift = 12,
208 	.lpg_shift = 17,
209 	.create = gf100_vm_create,
210 	.map_pgt = gf100_vm_map_pgt,
211 	.map = gf100_vm_map,
212 	.map_sg = gf100_vm_map_sg,
213 	.unmap = gf100_vm_unmap,
214 	.flush = gf100_vm_flush,
215 };
216 
217 int
218 gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
219 {
220 	return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
221 }
222