1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2015-2018 Broadcom */ 3 4 #include <linux/reservation.h> 5 #include <drm/drmP.h> 6 #include <drm/drm_encoder.h> 7 #include <drm/drm_gem.h> 8 #include <drm/gpu_scheduler.h> 9 10 #define GMP_GRANULARITY (128 * 1024) 11 12 /* Enum for each of the V3D queues. We maintain various queue 13 * tracking as an array because at some point we'll want to support 14 * the TFU (texture formatting unit) as another queue. 15 */ 16 enum v3d_queue { 17 V3D_BIN, 18 V3D_RENDER, 19 }; 20 21 #define V3D_MAX_QUEUES (V3D_RENDER + 1) 22 23 struct v3d_queue_state { 24 struct drm_gpu_scheduler sched; 25 26 u64 fence_context; 27 u64 emit_seqno; 28 u64 finished_seqno; 29 }; 30 31 struct v3d_dev { 32 struct drm_device drm; 33 34 /* Short representation (e.g. 33, 41) of the V3D tech version 35 * and revision. 36 */ 37 int ver; 38 39 struct device *dev; 40 struct platform_device *pdev; 41 void __iomem *hub_regs; 42 void __iomem *core_regs[3]; 43 void __iomem *bridge_regs; 44 void __iomem *gca_regs; 45 struct clk *clk; 46 47 /* Virtual and DMA addresses of the single shared page table. */ 48 volatile u32 *pt; 49 dma_addr_t pt_paddr; 50 51 /* Virtual and DMA addresses of the MMU's scratch page. When 52 * a read or write is invalid in the MMU, it will be 53 * redirected here. 54 */ 55 void *mmu_scratch; 56 dma_addr_t mmu_scratch_paddr; 57 58 /* Number of V3D cores. */ 59 u32 cores; 60 61 /* Allocator managing the address space. All units are in 62 * number of pages. 63 */ 64 struct drm_mm mm; 65 spinlock_t mm_lock; 66 67 struct work_struct overflow_mem_work; 68 69 struct v3d_exec_info *bin_job; 70 struct v3d_exec_info *render_job; 71 72 struct v3d_queue_state queue[V3D_MAX_QUEUES]; 73 74 /* Spinlock used to synchronize the overflow memory 75 * management against bin job submission. 76 */ 77 spinlock_t job_lock; 78 79 /* Protects bo_stats */ 80 struct mutex bo_lock; 81 82 /* Lock taken when resetting the GPU, to keep multiple 83 * processes from trying to park the scheduler threads and 84 * reset at once. 85 */ 86 struct mutex reset_lock; 87 88 struct { 89 u32 num_allocated; 90 u32 pages_allocated; 91 } bo_stats; 92 }; 93 94 static inline struct v3d_dev * 95 to_v3d_dev(struct drm_device *dev) 96 { 97 return (struct v3d_dev *)dev->dev_private; 98 } 99 100 /* The per-fd struct, which tracks the MMU mappings. */ 101 struct v3d_file_priv { 102 struct v3d_dev *v3d; 103 104 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; 105 }; 106 107 /* Tracks a mapping of a BO into a per-fd address space */ 108 struct v3d_vma { 109 struct v3d_page_table *pt; 110 struct list_head list; /* entry in v3d_bo.vmas */ 111 }; 112 113 struct v3d_bo { 114 struct drm_gem_object base; 115 116 struct mutex lock; 117 118 struct drm_mm_node node; 119 120 u32 pages_refcount; 121 struct page **pages; 122 struct sg_table *sgt; 123 void *vaddr; 124 125 struct list_head vmas; /* list of v3d_vma */ 126 127 /* List entry for the BO's position in 128 * v3d_exec_info->unref_list 129 */ 130 struct list_head unref_head; 131 132 /* normally (resv == &_resv) except for imported bo's */ 133 struct reservation_object *resv; 134 struct reservation_object _resv; 135 }; 136 137 static inline struct v3d_bo * 138 to_v3d_bo(struct drm_gem_object *bo) 139 { 140 return (struct v3d_bo *)bo; 141 } 142 143 struct v3d_fence { 144 struct dma_fence base; 145 struct drm_device *dev; 146 /* v3d seqno for signaled() test */ 147 u64 seqno; 148 enum v3d_queue queue; 149 }; 150 151 static inline struct v3d_fence * 152 to_v3d_fence(struct dma_fence *fence) 153 { 154 return (struct v3d_fence *)fence; 155 } 156 157 #define V3D_READ(offset) readl(v3d->hub_regs + offset) 158 #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset) 159 160 #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset) 161 #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset) 162 163 #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) 164 #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) 165 166 #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) 167 #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) 168 169 struct v3d_job { 170 struct drm_sched_job base; 171 172 struct v3d_exec_info *exec; 173 174 /* An optional fence userspace can pass in for the job to depend on. */ 175 struct dma_fence *in_fence; 176 177 /* v3d fence to be signaled by IRQ handler when the job is complete. */ 178 struct dma_fence *done_fence; 179 180 /* GPU virtual addresses of the start/end of the CL job. */ 181 u32 start, end; 182 }; 183 184 struct v3d_exec_info { 185 struct v3d_dev *v3d; 186 187 struct v3d_job bin, render; 188 189 /* Fence for when the scheduler considers the binner to be 190 * done, for render to depend on. 191 */ 192 struct dma_fence *bin_done_fence; 193 194 struct kref refcount; 195 196 /* This is the array of BOs that were looked up at the start of exec. */ 197 struct v3d_bo **bo; 198 u32 bo_count; 199 200 /* List of overflow BOs used in the job that need to be 201 * released once the job is complete. 202 */ 203 struct list_head unref_list; 204 205 /* Submitted tile memory allocation start/size, tile state. */ 206 u32 qma, qms, qts; 207 }; 208 209 /** 210 * _wait_for - magic (register) wait macro 211 * 212 * Does the right thing for modeset paths when run under kdgb or similar atomic 213 * contexts. Note that it's important that we check the condition again after 214 * having timed out, since the timeout could be due to preemption or similar and 215 * we've never had a chance to check the condition before the timeout. 216 */ 217 #define wait_for(COND, MS) ({ \ 218 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ 219 int ret__ = 0; \ 220 while (!(COND)) { \ 221 if (time_after(jiffies, timeout__)) { \ 222 if (!(COND)) \ 223 ret__ = -ETIMEDOUT; \ 224 break; \ 225 } \ 226 msleep(1); \ 227 } \ 228 ret__; \ 229 }) 230 231 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 232 { 233 /* nsecs_to_jiffies64() does not guard against overflow */ 234 if (NSEC_PER_SEC % HZ && 235 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 236 return MAX_JIFFY_OFFSET; 237 238 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 239 } 240 241 /* v3d_bo.c */ 242 void v3d_free_object(struct drm_gem_object *gem_obj); 243 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, 244 size_t size); 245 int v3d_create_bo_ioctl(struct drm_device *dev, void *data, 246 struct drm_file *file_priv); 247 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, 248 struct drm_file *file_priv); 249 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 250 struct drm_file *file_priv); 251 int v3d_gem_fault(struct vm_fault *vmf); 252 int v3d_mmap(struct file *filp, struct vm_area_struct *vma); 253 struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); 254 int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 255 struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj); 256 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, 257 struct dma_buf_attachment *attach, 258 struct sg_table *sgt); 259 260 /* v3d_debugfs.c */ 261 int v3d_debugfs_init(struct drm_minor *minor); 262 263 /* v3d_fence.c */ 264 extern const struct dma_fence_ops v3d_fence_ops; 265 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); 266 267 /* v3d_gem.c */ 268 int v3d_gem_init(struct drm_device *dev); 269 void v3d_gem_destroy(struct drm_device *dev); 270 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, 271 struct drm_file *file_priv); 272 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, 273 struct drm_file *file_priv); 274 void v3d_exec_put(struct v3d_exec_info *exec); 275 void v3d_reset(struct v3d_dev *v3d); 276 void v3d_invalidate_caches(struct v3d_dev *v3d); 277 void v3d_flush_caches(struct v3d_dev *v3d); 278 279 /* v3d_irq.c */ 280 void v3d_irq_init(struct v3d_dev *v3d); 281 void v3d_irq_enable(struct v3d_dev *v3d); 282 void v3d_irq_disable(struct v3d_dev *v3d); 283 void v3d_irq_reset(struct v3d_dev *v3d); 284 285 /* v3d_mmu.c */ 286 int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, 287 u32 *offset); 288 int v3d_mmu_set_page_table(struct v3d_dev *v3d); 289 void v3d_mmu_insert_ptes(struct v3d_bo *bo); 290 void v3d_mmu_remove_ptes(struct v3d_bo *bo); 291 292 /* v3d_sched.c */ 293 int v3d_sched_init(struct v3d_dev *v3d); 294 void v3d_sched_fini(struct v3d_dev *v3d); 295