xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  /*
2   * Copyright 2017 Red Hat Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   */
22  #include "vmm.h"
23  
24  #include <subdev/fb.h>
25  #include <subdev/timer.h>
26  #include <engine/gr.h>
27  
28  #include <nvif/if500d.h>
29  #include <nvif/unpack.h>
30  
31  static inline void
nv50_vmm_pgt_pte(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map,u64 addr)32  nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
33  		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
34  {
35  	u64 next = addr + map->type, data;
36  	u32 pten;
37  	int log2blk;
38  
39  	map->type += ptes * map->ctag;
40  
41  	while (ptes) {
42  		for (log2blk = 7; log2blk >= 0; log2blk--) {
43  			pten = 1 << log2blk;
44  			if (ptes >= pten && IS_ALIGNED(ptei, pten))
45  				break;
46  		}
47  
48  		data  = next | (log2blk << 7);
49  		next += pten * map->next;
50  		ptes -= pten;
51  
52  		while (pten--)
53  			VMM_WO064(pt, vmm, ptei++ * 8, data);
54  	}
55  }
56  
57  static void
nv50_vmm_pgt_sgl(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)58  nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
59  		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
60  {
61  	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
62  }
63  
64  static void
nv50_vmm_pgt_dma(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)65  nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
66  		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
67  {
68  	if (map->page->shift == PAGE_SHIFT) {
69  		VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
70  		nvkm_kmap(pt->memory);
71  		while (ptes--) {
72  			const u64 data = *map->dma++ + map->type;
73  			VMM_WO064(pt, vmm, ptei++ * 8, data);
74  			map->type += map->ctag;
75  		}
76  		nvkm_done(pt->memory);
77  		return;
78  	}
79  
80  	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
81  }
82  
83  static void
nv50_vmm_pgt_mem(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)84  nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
85  		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
86  {
87  	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
88  }
89  
90  static void
nv50_vmm_pgt_unmap(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes)91  nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
92  		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
93  {
94  	VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
95  }
96  
97  static const struct nvkm_vmm_desc_func
98  nv50_vmm_pgt = {
99  	.unmap = nv50_vmm_pgt_unmap,
100  	.mem = nv50_vmm_pgt_mem,
101  	.dma = nv50_vmm_pgt_dma,
102  	.sgl = nv50_vmm_pgt_sgl,
103  };
104  
105  static bool
nv50_vmm_pde(struct nvkm_vmm * vmm,struct nvkm_vmm_pt * pgt,u64 * pdata)106  nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
107  {
108  	struct nvkm_mmu_pt *pt;
109  	u64 data = 0xdeadcafe00000000ULL;
110  	if (pgt && (pt = pgt->pt[0])) {
111  		switch (pgt->page) {
112  		case 16: data = 0x00000001; break;
113  		case 12: data = 0x00000003;
114  			switch (nvkm_memory_size(pt->memory)) {
115  			case 0x100000: data |= 0x00000000; break;
116  			case 0x040000: data |= 0x00000020; break;
117  			case 0x020000: data |= 0x00000040; break;
118  			case 0x010000: data |= 0x00000060; break;
119  			default:
120  				WARN_ON(1);
121  				return false;
122  			}
123  			break;
124  		default:
125  			WARN_ON(1);
126  			return false;
127  		}
128  
129  		switch (nvkm_memory_target(pt->memory)) {
130  		case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
131  		case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
132  		case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
133  		default:
134  			WARN_ON(1);
135  			return false;
136  		}
137  
138  		data |= pt->addr;
139  	}
140  	*pdata = data;
141  	return true;
142  }
143  
144  static void
nv50_vmm_pgd_pde(struct nvkm_vmm * vmm,struct nvkm_vmm_pt * pgd,u32 pdei)145  nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
146  {
147  	struct nvkm_vmm_join *join;
148  	u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
149  	u64 data;
150  
151  	if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
152  		return;
153  
154  	list_for_each_entry(join, &vmm->join, head) {
155  		nvkm_kmap(join->inst);
156  		nvkm_wo64(join->inst, pdeo, data);
157  		nvkm_done(join->inst);
158  	}
159  }
160  
161  static const struct nvkm_vmm_desc_func
162  nv50_vmm_pgd = {
163  	.pde = nv50_vmm_pgd_pde,
164  };
165  
166  const struct nvkm_vmm_desc
167  nv50_vmm_desc_12[] = {
168  	{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
169  	{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
170  	{}
171  };
172  
173  const struct nvkm_vmm_desc
174  nv50_vmm_desc_16[] = {
175  	{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
176  	{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
177  	{}
178  };
179  
180  void
nv50_vmm_flush(struct nvkm_vmm * vmm,int level)181  nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
182  {
183  	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
184  	struct nvkm_device *device = subdev->device;
185  	int i, id;
186  
187  	mutex_lock(&vmm->mmu->mutex);
188  	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
189  		if (!atomic_read(&vmm->engref[i]))
190  			continue;
191  
192  		/* unfortunate hw bug workaround... */
193  		if (i == NVKM_ENGINE_GR && device->gr) {
194  			int ret = nvkm_gr_tlb_flush(device->gr);
195  			if (ret != -ENODEV)
196  				continue;
197  		}
198  
199  		switch (i) {
200  		case NVKM_ENGINE_GR    : id = 0x00; break;
201  		case NVKM_ENGINE_VP    :
202  		case NVKM_ENGINE_MSPDEC: id = 0x01; break;
203  		case NVKM_SUBDEV_BAR   : id = 0x06; break;
204  		case NVKM_ENGINE_MSPPP :
205  		case NVKM_ENGINE_MPEG  : id = 0x08; break;
206  		case NVKM_ENGINE_BSP   :
207  		case NVKM_ENGINE_MSVLD : id = 0x09; break;
208  		case NVKM_ENGINE_CIPHER:
209  		case NVKM_ENGINE_SEC   : id = 0x0a; break;
210  		case NVKM_ENGINE_CE    : id = 0x0d; break;
211  		default:
212  			continue;
213  		}
214  
215  		nvkm_wr32(device, 0x100c80, (id << 16) | 1);
216  		if (nvkm_msec(device, 2000,
217  			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
218  				break;
219  		) < 0)
220  			nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]);
221  	}
222  	mutex_unlock(&vmm->mmu->mutex);
223  }
224  
225  int
nv50_vmm_valid(struct nvkm_vmm * vmm,void * argv,u32 argc,struct nvkm_vmm_map * map)226  nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
227  	       struct nvkm_vmm_map *map)
228  {
229  	const struct nvkm_vmm_page *page = map->page;
230  	union {
231  		struct nv50_vmm_map_vn vn;
232  		struct nv50_vmm_map_v0 v0;
233  	} *args = argv;
234  	struct nvkm_device *device = vmm->mmu->subdev.device;
235  	struct nvkm_ram *ram = device->fb->ram;
236  	struct nvkm_memory *memory = map->memory;
237  	u8  aper, kind, kind_inv, comp, priv, ro;
238  	int kindn, ret = -ENOSYS;
239  	const u8 *kindm;
240  
241  	map->type = map->ctag = 0;
242  	map->next = 1 << page->shift;
243  
244  	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
245  		ro   = !!args->v0.ro;
246  		priv = !!args->v0.priv;
247  		kind = args->v0.kind & 0x7f;
248  		comp = args->v0.comp & 0x03;
249  	} else
250  	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
251  		ro   = 0;
252  		priv = 0;
253  		kind = 0x00;
254  		comp = 0;
255  	} else {
256  		VMM_DEBUG(vmm, "args");
257  		return ret;
258  	}
259  
260  	switch (nvkm_memory_target(memory)) {
261  	case NVKM_MEM_TARGET_VRAM:
262  		if (ram->stolen) {
263  			map->type |= ram->stolen;
264  			aper = 3;
265  		} else {
266  			aper = 0;
267  		}
268  		break;
269  	case NVKM_MEM_TARGET_HOST:
270  		aper = 2;
271  		break;
272  	case NVKM_MEM_TARGET_NCOH:
273  		aper = 3;
274  		break;
275  	default:
276  		WARN_ON(1);
277  		return -EINVAL;
278  	}
279  
280  	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
281  	if (kind >= kindn || kindm[kind] == kind_inv) {
282  		VMM_DEBUG(vmm, "kind %02x", kind);
283  		return -EINVAL;
284  	}
285  
286  	if (map->mem && map->mem->type != kindm[kind]) {
287  		VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
288  			  kindm[kind], map->mem->type);
289  		return -EINVAL;
290  	}
291  
292  	if (comp) {
293  		u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
294  		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
295  			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
296  			return -EINVAL;
297  		}
298  
299  		if (!map->no_comp) {
300  			ret = nvkm_memory_tags_get(memory, device, tags, NULL,
301  						   &map->tags);
302  			if (ret) {
303  				VMM_DEBUG(vmm, "comp %d", ret);
304  				return ret;
305  			}
306  
307  			if (map->tags->mn) {
308  				u32 tags = map->tags->mn->offset +
309  					   (map->offset >> 16);
310  				map->ctag |= (u64)comp << 49;
311  				map->type |= (u64)comp << 47;
312  				map->type |= (u64)tags << 49;
313  				map->next |= map->ctag;
314  			}
315  		}
316  	}
317  
318  	map->type |= BIT(0); /* Valid. */
319  	map->type |= (u64)ro << 3;
320  	map->type |= (u64)aper << 4;
321  	map->type |= (u64)priv << 6;
322  	map->type |= (u64)kind << 40;
323  	return 0;
324  }
325  
326  void
nv50_vmm_part(struct nvkm_vmm * vmm,struct nvkm_memory * inst)327  nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
328  {
329  	struct nvkm_vmm_join *join;
330  
331  	list_for_each_entry(join, &vmm->join, head) {
332  		if (join->inst == inst) {
333  			list_del(&join->head);
334  			kfree(join);
335  			break;
336  		}
337  	}
338  }
339  
340  int
nv50_vmm_join(struct nvkm_vmm * vmm,struct nvkm_memory * inst)341  nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
342  {
343  	const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
344  	struct nvkm_vmm_join *join;
345  	int ret = 0;
346  	u64 data;
347  	u32 pdei;
348  
349  	if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
350  		return -ENOMEM;
351  	join->inst = inst;
352  	list_add_tail(&join->head, &vmm->join);
353  
354  	nvkm_kmap(join->inst);
355  	for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
356  		if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
357  			ret = -EINVAL;
358  			break;
359  		}
360  		nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
361  	}
362  	nvkm_done(join->inst);
363  	return ret;
364  }
365  
366  static const struct nvkm_vmm_func
367  nv50_vmm = {
368  	.join = nv50_vmm_join,
369  	.part = nv50_vmm_part,
370  	.valid = nv50_vmm_valid,
371  	.flush = nv50_vmm_flush,
372  	.page_block = 1 << 29,
373  	.page = {
374  		{ 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
375  		{ 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
376  		{}
377  	}
378  };
379  
380  int
nv50_vmm_new(struct nvkm_mmu * mmu,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)381  nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
382  	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
383  	     struct nvkm_vmm **pvmm)
384  {
385  	return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
386  			     argv, argc, key, name, pvmm);
387  }
388