xref: /openbmc/linux/drivers/gpu/drm/v3d/v3d_mmu.c (revision 12e24d8a)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2017-2018 Broadcom */
3 
4 /**
5  * DOC: Broadcom V3D MMU
6  *
7  * The V3D 3.x hardware (compared to VC4) now includes an MMU.  It has
8  * a single level of page tables for the V3D's 4GB address space to
9  * map to AXI bus addresses, thus it could need up to 4MB of
10  * physically contiguous memory to store the PTEs.
11  *
12  * Because the 4MB of contiguous memory for page tables is precious,
13  * and switching between them is expensive, we load all BOs into the
14  * same 4GB address space.
15  *
16  * To protect clients from each other, we should use the GMP to
17  * quickly mask out (at 128kb granularity) what pages are available to
18  * each client.  This is not yet implemented.
19  */
20 
21 #include "v3d_drv.h"
22 #include "v3d_regs.h"
23 
24 #define V3D_MMU_PAGE_SHIFT 12
25 
26 /* Note: All PTEs for the 1MB superpage must be filled with the
27  * superpage bit set.
28  */
29 #define V3D_PTE_SUPERPAGE BIT(31)
30 #define V3D_PTE_WRITEABLE BIT(29)
31 #define V3D_PTE_VALID BIT(28)
32 
33 static int v3d_mmu_flush_all(struct v3d_dev *v3d)
34 {
35 	int ret;
36 
37 	V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH |
38 		  V3D_MMUC_CONTROL_ENABLE);
39 
40 	ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
41 			 V3D_MMUC_CONTROL_FLUSHING), 100);
42 	if (ret) {
43 		dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
44 		return ret;
45 	}
46 
47 	V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
48 		  V3D_MMU_CTL_TLB_CLEAR);
49 
50 	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
51 			 V3D_MMU_CTL_TLB_CLEARING), 100);
52 	if (ret)
53 		dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n");
54 
55 	return ret;
56 }
57 
58 int v3d_mmu_set_page_table(struct v3d_dev *v3d)
59 {
60 	V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
61 	V3D_WRITE(V3D_MMU_CTL,
62 		  V3D_MMU_CTL_ENABLE |
63 		  V3D_MMU_CTL_PT_INVALID_ENABLE |
64 		  V3D_MMU_CTL_PT_INVALID_ABORT |
65 		  V3D_MMU_CTL_PT_INVALID_INT |
66 		  V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
67 		  V3D_MMU_CTL_WRITE_VIOLATION_INT |
68 		  V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
69 		  V3D_MMU_CTL_CAP_EXCEEDED_INT);
70 	V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
71 		  (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
72 		  V3D_MMU_ILLEGAL_ADDR_ENABLE);
73 	V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
74 
75 	return v3d_mmu_flush_all(v3d);
76 }
77 
78 void v3d_mmu_insert_ptes(struct v3d_bo *bo)
79 {
80 	struct drm_gem_shmem_object *shmem_obj = &bo->base;
81 	struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
82 	u32 page = bo->node.start;
83 	u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
84 	struct sg_dma_page_iter dma_iter;
85 
86 	for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
87 		dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
88 		u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
89 		u32 pte = page_prot | page_address;
90 		u32 i;
91 
92 		BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
93 		       BIT(24));
94 		for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
95 			v3d->pt[page++] = pte + i;
96 	}
97 
98 	WARN_ON_ONCE(page - bo->node.start !=
99 		     shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
100 
101 	if (v3d_mmu_flush_all(v3d))
102 		dev_err(v3d->drm.dev, "MMU flush timeout\n");
103 }
104 
105 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
106 {
107 	struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
108 	u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
109 	u32 page;
110 
111 	for (page = bo->node.start; page < bo->node.start + npages; page++)
112 		v3d->pt[page] = 0;
113 
114 	if (v3d_mmu_flush_all(v3d))
115 		dev_err(v3d->drm.dev, "MMU flush timeout\n");
116 }
117