1 /* 2 * Copyright (C) 2014 Etnaviv Project 3 * Author: Christian Gmeiner <christian.gmeiner@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "etnaviv_gpu.h" 19 #include "etnaviv_gem.h" 20 #include "etnaviv_mmu.h" 21 22 #include "common.xml.h" 23 #include "state.xml.h" 24 #include "state_3d.xml.h" 25 #include "cmdstream.xml.h" 26 27 /* 28 * Command Buffer helper: 29 */ 30 31 32 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data) 33 { 34 u32 *vaddr = (u32 *)buffer->vaddr; 35 36 BUG_ON(buffer->user_size >= buffer->size); 37 38 vaddr[buffer->user_size / 4] = data; 39 buffer->user_size += 4; 40 } 41 42 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer, 43 u32 reg, u32 value) 44 { 45 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR; 46 47 buffer->user_size = ALIGN(buffer->user_size, 8); 48 49 /* write a register via cmd stream */ 50 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | 51 VIV_FE_LOAD_STATE_HEADER_COUNT(1) | 52 VIV_FE_LOAD_STATE_HEADER_OFFSET(index)); 53 OUT(buffer, value); 54 } 55 56 static inline void CMD_END(struct etnaviv_cmdbuf *buffer) 57 { 58 buffer->user_size = ALIGN(buffer->user_size, 8); 59 60 OUT(buffer, VIV_FE_END_HEADER_OP_END); 61 } 62 63 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer) 64 { 65 buffer->user_size = ALIGN(buffer->user_size, 8); 66 67 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200); 68 } 69 70 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer, 71 u16 prefetch, u32 address) 72 { 73 buffer->user_size = ALIGN(buffer->user_size, 8); 74 75 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK | 76 VIV_FE_LINK_HEADER_PREFETCH(prefetch)); 77 OUT(buffer, address); 78 } 79 80 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, 81 u32 from, u32 to) 82 { 83 buffer->user_size = ALIGN(buffer->user_size, 8); 84 85 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL); 86 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); 87 } 88 89 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to) 90 { 91 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, 92 VIVS_GL_SEMAPHORE_TOKEN_FROM(from) | 93 VIVS_GL_SEMAPHORE_TOKEN_TO(to)); 94 } 95 96 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, 97 struct etnaviv_cmdbuf *buffer, u8 pipe) 98 { 99 u32 flush = 0; 100 101 /* 102 * This assumes that if we're switching to 2D, we're switching 103 * away from 3D, and vice versa. Hence, if we're switching to 104 * the 2D core, we need to flush the 3D depth and color caches, 105 * otherwise we need to flush the 2D pixel engine cache. 106 */ 107 if (gpu->exec_state == ETNA_PIPE_2D) 108 flush = VIVS_GL_FLUSH_CACHE_PE2D; 109 else if (gpu->exec_state == ETNA_PIPE_3D) 110 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; 111 112 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); 113 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 114 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 115 116 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, 117 VIVS_GL_PIPE_SELECT_PIPE(pipe)); 118 } 119 120 static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf) 121 { 122 return buf->paddr - gpu->memory_base; 123 } 124 125 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, 126 struct etnaviv_cmdbuf *buf, u32 off, u32 len) 127 { 128 u32 size = buf->size; 129 u32 *ptr = buf->vaddr + off; 130 131 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", 132 ptr, gpu_va(gpu, buf) + off, size - len * 4 - off); 133 134 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, 135 ptr, len * 4, 0); 136 } 137 138 /* 139 * Safely replace the WAIT of a waitlink with a new command and argument. 140 * The GPU may be executing this WAIT while we're modifying it, so we have 141 * to write it in a specific order to avoid the GPU branching to somewhere 142 * else. 'wl_offset' is the offset to the first byte of the WAIT command. 143 */ 144 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer, 145 unsigned int wl_offset, u32 cmd, u32 arg) 146 { 147 u32 *lw = buffer->vaddr + wl_offset; 148 149 lw[1] = arg; 150 mb(); 151 lw[0] = cmd; 152 mb(); 153 } 154 155 /* 156 * Ensure that there is space in the command buffer to contiguously write 157 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary. 158 */ 159 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, 160 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords) 161 { 162 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size) 163 buffer->user_size = 0; 164 165 return gpu_va(gpu, buffer) + buffer->user_size; 166 } 167 168 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) 169 { 170 struct etnaviv_cmdbuf *buffer = gpu->buffer; 171 172 /* initialize buffer */ 173 buffer->user_size = 0; 174 175 CMD_WAIT(buffer); 176 CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4); 177 178 return buffer->user_size / 8; 179 } 180 181 void etnaviv_buffer_end(struct etnaviv_gpu *gpu) 182 { 183 struct etnaviv_cmdbuf *buffer = gpu->buffer; 184 unsigned int waitlink_offset = buffer->user_size - 16; 185 u32 link_target, flush = 0; 186 187 if (gpu->exec_state == ETNA_PIPE_2D) 188 flush = VIVS_GL_FLUSH_CACHE_PE2D; 189 else if (gpu->exec_state == ETNA_PIPE_3D) 190 flush = VIVS_GL_FLUSH_CACHE_DEPTH | 191 VIVS_GL_FLUSH_CACHE_COLOR | 192 VIVS_GL_FLUSH_CACHE_TEXTURE | 193 VIVS_GL_FLUSH_CACHE_TEXTUREVS | 194 VIVS_GL_FLUSH_CACHE_SHADER_L2; 195 196 if (flush) { 197 unsigned int dwords = 7; 198 199 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); 200 201 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 202 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 203 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); 204 if (gpu->exec_state == ETNA_PIPE_3D) 205 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, 206 VIVS_TS_FLUSH_CACHE_FLUSH); 207 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 208 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 209 CMD_END(buffer); 210 211 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 212 VIV_FE_LINK_HEADER_OP_LINK | 213 VIV_FE_LINK_HEADER_PREFETCH(dwords), 214 link_target); 215 } else { 216 /* Replace the last link-wait with an "END" command */ 217 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 218 VIV_FE_END_HEADER_OP_END, 0); 219 } 220 } 221 222 /* Append a command buffer to the ring buffer. */ 223 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, 224 struct etnaviv_cmdbuf *cmdbuf) 225 { 226 struct etnaviv_cmdbuf *buffer = gpu->buffer; 227 unsigned int waitlink_offset = buffer->user_size - 16; 228 u32 return_target, return_dwords; 229 u32 link_target, link_dwords; 230 231 if (drm_debug & DRM_UT_DRIVER) 232 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); 233 234 link_target = gpu_va(gpu, cmdbuf); 235 link_dwords = cmdbuf->size / 8; 236 237 /* 238 * If we need maintanence prior to submitting this buffer, we will 239 * need to append a mmu flush load state, followed by a new 240 * link to this buffer - a total of four additional words. 241 */ 242 if (gpu->mmu->need_flush || gpu->switch_context) { 243 u32 target, extra_dwords; 244 245 /* link command */ 246 extra_dwords = 1; 247 248 /* flush command */ 249 if (gpu->mmu->need_flush) 250 extra_dwords += 1; 251 252 /* pipe switch commands */ 253 if (gpu->switch_context) 254 extra_dwords += 4; 255 256 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); 257 258 if (gpu->mmu->need_flush) { 259 /* Add the MMU flush */ 260 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, 261 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | 262 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 | 263 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 | 264 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU | 265 VIVS_GL_FLUSH_MMU_FLUSH_UNK4); 266 267 gpu->mmu->need_flush = false; 268 } 269 270 if (gpu->switch_context) { 271 etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state); 272 gpu->exec_state = cmdbuf->exec_state; 273 gpu->switch_context = false; 274 } 275 276 /* And the link to the submitted buffer */ 277 CMD_LINK(buffer, link_dwords, link_target); 278 279 /* Update the link target to point to above instructions */ 280 link_target = target; 281 link_dwords = extra_dwords; 282 } 283 284 /* 285 * Append a LINK to the submitted command buffer to return to 286 * the ring buffer. return_target is the ring target address. 287 * We need three dwords: event, wait, link. 288 */ 289 return_dwords = 3; 290 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); 291 CMD_LINK(cmdbuf, return_dwords, return_target); 292 293 /* 294 * Append event, wait and link pointing back to the wait 295 * command to the ring buffer. 296 */ 297 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 298 VIVS_GL_EVENT_FROM_PE); 299 CMD_WAIT(buffer); 300 CMD_LINK(buffer, 2, return_target + 8); 301 302 if (drm_debug & DRM_UT_DRIVER) 303 pr_info("stream link to 0x%08x @ 0x%08x %p\n", 304 return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr); 305 306 if (drm_debug & DRM_UT_DRIVER) { 307 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, 308 cmdbuf->vaddr, cmdbuf->size, 0); 309 310 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset); 311 pr_info("addr: 0x%08x\n", link_target); 312 pr_info("back: 0x%08x\n", return_target); 313 pr_info("event: %d\n", event); 314 } 315 316 /* 317 * Kick off the submitted command by replacing the previous 318 * WAIT with a link to the address in the ring buffer. 319 */ 320 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 321 VIV_FE_LINK_HEADER_OP_LINK | 322 VIV_FE_LINK_HEADER_PREFETCH(link_dwords), 323 link_target); 324 325 if (drm_debug & DRM_UT_DRIVER) 326 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); 327 } 328