1 /* 2 * Copyright (C) 2014 Etnaviv Project 3 * Author: Christian Gmeiner <christian.gmeiner@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "etnaviv_cmdbuf.h" 19 #include "etnaviv_gpu.h" 20 #include "etnaviv_gem.h" 21 #include "etnaviv_mmu.h" 22 23 #include "common.xml.h" 24 #include "state.xml.h" 25 #include "state_hi.xml.h" 26 #include "state_3d.xml.h" 27 #include "cmdstream.xml.h" 28 29 /* 30 * Command Buffer helper: 31 */ 32 33 34 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data) 35 { 36 u32 *vaddr = (u32 *)buffer->vaddr; 37 38 BUG_ON(buffer->user_size >= buffer->size); 39 40 vaddr[buffer->user_size / 4] = data; 41 buffer->user_size += 4; 42 } 43 44 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer, 45 u32 reg, u32 value) 46 { 47 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR; 48 49 buffer->user_size = ALIGN(buffer->user_size, 8); 50 51 /* write a register via cmd stream */ 52 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | 53 VIV_FE_LOAD_STATE_HEADER_COUNT(1) | 54 VIV_FE_LOAD_STATE_HEADER_OFFSET(index)); 55 OUT(buffer, value); 56 } 57 58 static inline void CMD_END(struct etnaviv_cmdbuf *buffer) 59 { 60 buffer->user_size = ALIGN(buffer->user_size, 8); 61 62 OUT(buffer, VIV_FE_END_HEADER_OP_END); 63 } 64 65 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer) 66 { 67 buffer->user_size = ALIGN(buffer->user_size, 8); 68 69 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200); 70 } 71 72 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer, 73 u16 prefetch, u32 address) 74 { 75 buffer->user_size = ALIGN(buffer->user_size, 8); 76 77 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK | 78 VIV_FE_LINK_HEADER_PREFETCH(prefetch)); 79 OUT(buffer, address); 80 } 81 82 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, 83 u32 from, u32 to) 84 { 85 buffer->user_size = ALIGN(buffer->user_size, 8); 86 87 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL); 88 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); 89 } 90 91 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to) 92 { 93 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, 94 VIVS_GL_SEMAPHORE_TOKEN_FROM(from) | 95 VIVS_GL_SEMAPHORE_TOKEN_TO(to)); 96 } 97 98 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, 99 struct etnaviv_cmdbuf *buffer, u8 pipe) 100 { 101 u32 flush = 0; 102 103 /* 104 * This assumes that if we're switching to 2D, we're switching 105 * away from 3D, and vice versa. Hence, if we're switching to 106 * the 2D core, we need to flush the 3D depth and color caches, 107 * otherwise we need to flush the 2D pixel engine cache. 108 */ 109 if (gpu->exec_state == ETNA_PIPE_2D) 110 flush = VIVS_GL_FLUSH_CACHE_PE2D; 111 else if (gpu->exec_state == ETNA_PIPE_3D) 112 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; 113 114 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); 115 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 116 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 117 118 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, 119 VIVS_GL_PIPE_SELECT_PIPE(pipe)); 120 } 121 122 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, 123 struct etnaviv_cmdbuf *buf, u32 off, u32 len) 124 { 125 u32 size = buf->size; 126 u32 *ptr = buf->vaddr + off; 127 128 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", 129 ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off); 130 131 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, 132 ptr, len * 4, 0); 133 } 134 135 /* 136 * Safely replace the WAIT of a waitlink with a new command and argument. 137 * The GPU may be executing this WAIT while we're modifying it, so we have 138 * to write it in a specific order to avoid the GPU branching to somewhere 139 * else. 'wl_offset' is the offset to the first byte of the WAIT command. 140 */ 141 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer, 142 unsigned int wl_offset, u32 cmd, u32 arg) 143 { 144 u32 *lw = buffer->vaddr + wl_offset; 145 146 lw[1] = arg; 147 mb(); 148 lw[0] = cmd; 149 mb(); 150 } 151 152 /* 153 * Ensure that there is space in the command buffer to contiguously write 154 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary. 155 */ 156 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, 157 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords) 158 { 159 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size) 160 buffer->user_size = 0; 161 162 return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size; 163 } 164 165 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) 166 { 167 struct etnaviv_cmdbuf *buffer = gpu->buffer; 168 169 /* initialize buffer */ 170 buffer->user_size = 0; 171 172 CMD_WAIT(buffer); 173 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + 174 buffer->user_size - 4); 175 176 return buffer->user_size / 8; 177 } 178 179 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr) 180 { 181 struct etnaviv_cmdbuf *buffer = gpu->buffer; 182 183 buffer->user_size = 0; 184 185 if (gpu->identity.features & chipFeatures_PIPE_3D) { 186 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, 187 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D)); 188 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION, 189 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K); 190 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr); 191 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 192 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 193 } 194 195 if (gpu->identity.features & chipFeatures_PIPE_2D) { 196 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, 197 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D)); 198 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION, 199 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K); 200 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr); 201 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 202 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 203 } 204 205 CMD_END(buffer); 206 207 buffer->user_size = ALIGN(buffer->user_size, 8); 208 209 return buffer->user_size / 8; 210 } 211 212 void etnaviv_buffer_end(struct etnaviv_gpu *gpu) 213 { 214 struct etnaviv_cmdbuf *buffer = gpu->buffer; 215 unsigned int waitlink_offset = buffer->user_size - 16; 216 u32 link_target, flush = 0; 217 218 if (gpu->exec_state == ETNA_PIPE_2D) 219 flush = VIVS_GL_FLUSH_CACHE_PE2D; 220 else if (gpu->exec_state == ETNA_PIPE_3D) 221 flush = VIVS_GL_FLUSH_CACHE_DEPTH | 222 VIVS_GL_FLUSH_CACHE_COLOR | 223 VIVS_GL_FLUSH_CACHE_TEXTURE | 224 VIVS_GL_FLUSH_CACHE_TEXTUREVS | 225 VIVS_GL_FLUSH_CACHE_SHADER_L2; 226 227 if (flush) { 228 unsigned int dwords = 7; 229 230 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); 231 232 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 233 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 234 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); 235 if (gpu->exec_state == ETNA_PIPE_3D) 236 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, 237 VIVS_TS_FLUSH_CACHE_FLUSH); 238 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 239 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 240 CMD_END(buffer); 241 242 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 243 VIV_FE_LINK_HEADER_OP_LINK | 244 VIV_FE_LINK_HEADER_PREFETCH(dwords), 245 link_target); 246 } else { 247 /* Replace the last link-wait with an "END" command */ 248 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 249 VIV_FE_END_HEADER_OP_END, 0); 250 } 251 } 252 253 /* Append a 'sync point' to the ring buffer. */ 254 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event) 255 { 256 struct etnaviv_cmdbuf *buffer = gpu->buffer; 257 unsigned int waitlink_offset = buffer->user_size - 16; 258 u32 dwords, target; 259 260 /* 261 * We need at most 3 dwords in the return target: 262 * 1 event + 1 end + 1 wait + 1 link. 263 */ 264 dwords = 4; 265 target = etnaviv_buffer_reserve(gpu, buffer, dwords); 266 267 /* Signal sync point event */ 268 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 269 VIVS_GL_EVENT_FROM_PE); 270 271 /* Stop the FE to 'pause' the GPU */ 272 CMD_END(buffer); 273 274 /* Append waitlink */ 275 CMD_WAIT(buffer); 276 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + 277 buffer->user_size - 4); 278 279 /* 280 * Kick off the 'sync point' command by replacing the previous 281 * WAIT with a link to the address in the ring buffer. 282 */ 283 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 284 VIV_FE_LINK_HEADER_OP_LINK | 285 VIV_FE_LINK_HEADER_PREFETCH(dwords), 286 target); 287 } 288 289 /* Append a command buffer to the ring buffer. */ 290 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, 291 struct etnaviv_cmdbuf *cmdbuf) 292 { 293 struct etnaviv_cmdbuf *buffer = gpu->buffer; 294 unsigned int waitlink_offset = buffer->user_size - 16; 295 u32 return_target, return_dwords; 296 u32 link_target, link_dwords; 297 298 if (drm_debug & DRM_UT_DRIVER) 299 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); 300 301 link_target = etnaviv_cmdbuf_get_va(cmdbuf); 302 link_dwords = cmdbuf->size / 8; 303 304 /* 305 * If we need maintanence prior to submitting this buffer, we will 306 * need to append a mmu flush load state, followed by a new 307 * link to this buffer - a total of four additional words. 308 */ 309 if (gpu->mmu->need_flush || gpu->switch_context) { 310 u32 target, extra_dwords; 311 312 /* link command */ 313 extra_dwords = 1; 314 315 /* flush command */ 316 if (gpu->mmu->need_flush) { 317 if (gpu->mmu->version == ETNAVIV_IOMMU_V1) 318 extra_dwords += 1; 319 else 320 extra_dwords += 3; 321 } 322 323 /* pipe switch commands */ 324 if (gpu->switch_context) 325 extra_dwords += 4; 326 327 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); 328 329 if (gpu->mmu->need_flush) { 330 /* Add the MMU flush */ 331 if (gpu->mmu->version == ETNAVIV_IOMMU_V1) { 332 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, 333 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | 334 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 | 335 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 | 336 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU | 337 VIVS_GL_FLUSH_MMU_FLUSH_UNK4); 338 } else { 339 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION, 340 VIVS_MMUv2_CONFIGURATION_MODE_MASK | 341 VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK | 342 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH); 343 CMD_SEM(buffer, SYNC_RECIPIENT_FE, 344 SYNC_RECIPIENT_PE); 345 CMD_STALL(buffer, SYNC_RECIPIENT_FE, 346 SYNC_RECIPIENT_PE); 347 } 348 349 gpu->mmu->need_flush = false; 350 } 351 352 if (gpu->switch_context) { 353 etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state); 354 gpu->exec_state = cmdbuf->exec_state; 355 gpu->switch_context = false; 356 } 357 358 /* And the link to the submitted buffer */ 359 CMD_LINK(buffer, link_dwords, link_target); 360 361 /* Update the link target to point to above instructions */ 362 link_target = target; 363 link_dwords = extra_dwords; 364 } 365 366 /* 367 * Append a LINK to the submitted command buffer to return to 368 * the ring buffer. return_target is the ring target address. 369 * We need at most 7 dwords in the return target: 2 cache flush + 370 * 2 semaphore stall + 1 event + 1 wait + 1 link. 371 */ 372 return_dwords = 7; 373 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); 374 CMD_LINK(cmdbuf, return_dwords, return_target); 375 376 /* 377 * Append a cache flush, stall, event, wait and link pointing back to 378 * the wait command to the ring buffer. 379 */ 380 if (gpu->exec_state == ETNA_PIPE_2D) { 381 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, 382 VIVS_GL_FLUSH_CACHE_PE2D); 383 } else { 384 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, 385 VIVS_GL_FLUSH_CACHE_DEPTH | 386 VIVS_GL_FLUSH_CACHE_COLOR); 387 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, 388 VIVS_TS_FLUSH_CACHE_FLUSH); 389 } 390 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 391 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); 392 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 393 VIVS_GL_EVENT_FROM_PE); 394 CMD_WAIT(buffer); 395 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + 396 buffer->user_size - 4); 397 398 if (drm_debug & DRM_UT_DRIVER) 399 pr_info("stream link to 0x%08x @ 0x%08x %p\n", 400 return_target, etnaviv_cmdbuf_get_va(cmdbuf), 401 cmdbuf->vaddr); 402 403 if (drm_debug & DRM_UT_DRIVER) { 404 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, 405 cmdbuf->vaddr, cmdbuf->size, 0); 406 407 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset); 408 pr_info("addr: 0x%08x\n", link_target); 409 pr_info("back: 0x%08x\n", return_target); 410 pr_info("event: %d\n", event); 411 } 412 413 /* 414 * Kick off the submitted command by replacing the previous 415 * WAIT with a link to the address in the ring buffer. 416 */ 417 etnaviv_buffer_replace_wait(buffer, waitlink_offset, 418 VIV_FE_LINK_HEADER_OP_LINK | 419 VIV_FE_LINK_HEADER_PREFETCH(link_dwords), 420 link_target); 421 422 if (drm_debug & DRM_UT_DRIVER) 423 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); 424 } 425