1 /* 2 * Copyright (C) 2015 Etnaviv Project 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #ifndef __ETNAVIV_DRM_H__ 18 #define __ETNAVIV_DRM_H__ 19 20 #include "drm.h" 21 22 /* Please note that modifications to all structs defined here are 23 * subject to backwards-compatibility constraints: 24 * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit 25 * user/kernel compatibility 26 * 2) Keep fields aligned to their size 27 * 3) Because of how drm_ioctl() works, we can add new fields at 28 * the end of an ioctl if some care is taken: drm_ioctl() will 29 * zero out the new fields at the tail of the ioctl, so a zero 30 * value should have a backwards compatible meaning. And for 31 * output params, userspace won't see the newly added output 32 * fields.. so that has to be somehow ok. 33 */ 34 35 /* timeouts are specified in clock-monotonic absolute times (to simplify 36 * restarting interrupted ioctls). The following struct is logically the 37 * same as 'struct timespec' but 32/64b ABI safe. 38 */ 39 struct drm_etnaviv_timespec { 40 __s64 tv_sec; /* seconds */ 41 __s64 tv_nsec; /* nanoseconds */ 42 }; 43 44 #define ETNAVIV_PARAM_GPU_MODEL 0x01 45 #define ETNAVIV_PARAM_GPU_REVISION 0x02 46 #define ETNAVIV_PARAM_GPU_FEATURES_0 0x03 47 #define ETNAVIV_PARAM_GPU_FEATURES_1 0x04 48 #define ETNAVIV_PARAM_GPU_FEATURES_2 0x05 49 #define ETNAVIV_PARAM_GPU_FEATURES_3 0x06 50 #define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 51 #define ETNAVIV_PARAM_GPU_FEATURES_5 0x08 52 #define ETNAVIV_PARAM_GPU_FEATURES_6 0x09 53 54 #define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 55 #define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 56 #define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12 57 #define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13 58 #define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14 59 #define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15 60 #define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16 61 #define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17 62 #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 63 #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 64 #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a 65 66 #define ETNA_MAX_PIPES 4 67 68 struct drm_etnaviv_param { 69 __u32 pipe; /* in */ 70 __u32 param; /* in, ETNAVIV_PARAM_x */ 71 __u64 value; /* out (get_param) or in (set_param) */ 72 }; 73 74 /* 75 * GEM buffers: 76 */ 77 78 #define ETNA_BO_CACHE_MASK 0x000f0000 79 /* cache modes */ 80 #define ETNA_BO_CACHED 0x00010000 81 #define ETNA_BO_WC 0x00020000 82 #define ETNA_BO_UNCACHED 0x00040000 83 /* map flags */ 84 #define ETNA_BO_FORCE_MMU 0x00100000 85 86 struct drm_etnaviv_gem_new { 87 __u64 size; /* in */ 88 __u32 flags; /* in, mask of ETNA_BO_x */ 89 __u32 handle; /* out */ 90 }; 91 92 struct drm_etnaviv_gem_info { 93 __u32 handle; /* in */ 94 __u32 pad; 95 __u64 offset; /* out, offset to pass to mmap() */ 96 }; 97 98 #define ETNA_PREP_READ 0x01 99 #define ETNA_PREP_WRITE 0x02 100 #define ETNA_PREP_NOSYNC 0x04 101 102 struct drm_etnaviv_gem_cpu_prep { 103 __u32 handle; /* in */ 104 __u32 op; /* in, mask of ETNA_PREP_x */ 105 struct drm_etnaviv_timespec timeout; /* in */ 106 }; 107 108 struct drm_etnaviv_gem_cpu_fini { 109 __u32 handle; /* in */ 110 __u32 flags; /* in, placeholder for now, no defined values */ 111 }; 112 113 /* 114 * Cmdstream Submission: 115 */ 116 117 /* The value written into the cmdstream is logically: 118 * relocbuf->gpuaddr + reloc_offset 119 * 120 * NOTE that reloc's must be sorted by order of increasing submit_offset, 121 * otherwise EINVAL. 122 */ 123 struct drm_etnaviv_gem_submit_reloc { 124 __u32 submit_offset; /* in, offset from submit_bo */ 125 __u32 reloc_idx; /* in, index of reloc_bo buffer */ 126 __u64 reloc_offset; /* in, offset from start of reloc_bo */ 127 __u32 flags; /* in, placeholder for now, no defined values */ 128 }; 129 130 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the 131 * cmdstream buffer(s) themselves or reloc entries) has one (and only 132 * one) entry in the submit->bos[] table. 133 * 134 * As a optimization, the current buffer (gpu virtual address) can be 135 * passed back through the 'presumed' field. If on a subsequent reloc, 136 * userspace passes back a 'presumed' address that is still valid, 137 * then patching the cmdstream for this entry is skipped. This can 138 * avoid kernel needing to map/access the cmdstream bo in the common 139 * case. 140 */ 141 #define ETNA_SUBMIT_BO_READ 0x0001 142 #define ETNA_SUBMIT_BO_WRITE 0x0002 143 struct drm_etnaviv_gem_submit_bo { 144 __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */ 145 __u32 handle; /* in, GEM handle */ 146 __u64 presumed; /* in/out, presumed buffer address */ 147 }; 148 149 /* Each cmdstream submit consists of a table of buffers involved, and 150 * one or more cmdstream buffers. This allows for conditional execution 151 * (context-restore), and IB buffers needed for per tile/bin draw cmds. 152 */ 153 #define ETNA_PIPE_3D 0x00 154 #define ETNA_PIPE_2D 0x01 155 #define ETNA_PIPE_VG 0x02 156 struct drm_etnaviv_gem_submit { 157 __u32 fence; /* out */ 158 __u32 pipe; /* in */ 159 __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */ 160 __u32 nr_bos; /* in, number of submit_bo's */ 161 __u32 nr_relocs; /* in, number of submit_reloc's */ 162 __u32 stream_size; /* in, cmdstream size */ 163 __u64 bos; /* in, ptr to array of submit_bo's */ 164 __u64 relocs; /* in, ptr to array of submit_reloc's */ 165 __u64 stream; /* in, ptr to cmdstream */ 166 }; 167 168 /* The normal way to synchronize with the GPU is just to CPU_PREP on 169 * a buffer if you need to access it from the CPU (other cmdstream 170 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all 171 * handle the required synchronization under the hood). This ioctl 172 * mainly just exists as a way to implement the gallium pipe_fence 173 * APIs without requiring a dummy bo to synchronize on. 174 */ 175 #define ETNA_WAIT_NONBLOCK 0x01 176 struct drm_etnaviv_wait_fence { 177 __u32 pipe; /* in */ 178 __u32 fence; /* in */ 179 __u32 flags; /* in, mask of ETNA_WAIT_x */ 180 __u32 pad; 181 struct drm_etnaviv_timespec timeout; /* in */ 182 }; 183 184 #define ETNA_USERPTR_READ 0x01 185 #define ETNA_USERPTR_WRITE 0x02 186 struct drm_etnaviv_gem_userptr { 187 __u64 user_ptr; /* in, page aligned user pointer */ 188 __u64 user_size; /* in, page aligned user size */ 189 __u32 flags; /* in, flags */ 190 __u32 handle; /* out, non-zero handle */ 191 }; 192 193 struct drm_etnaviv_gem_wait { 194 __u32 pipe; /* in */ 195 __u32 handle; /* in, bo to be waited for */ 196 __u32 flags; /* in, mask of ETNA_WAIT_x */ 197 __u32 pad; 198 struct drm_etnaviv_timespec timeout; /* in */ 199 }; 200 201 #define DRM_ETNAVIV_GET_PARAM 0x00 202 /* placeholder: 203 #define DRM_ETNAVIV_SET_PARAM 0x01 204 */ 205 #define DRM_ETNAVIV_GEM_NEW 0x02 206 #define DRM_ETNAVIV_GEM_INFO 0x03 207 #define DRM_ETNAVIV_GEM_CPU_PREP 0x04 208 #define DRM_ETNAVIV_GEM_CPU_FINI 0x05 209 #define DRM_ETNAVIV_GEM_SUBMIT 0x06 210 #define DRM_ETNAVIV_WAIT_FENCE 0x07 211 #define DRM_ETNAVIV_GEM_USERPTR 0x08 212 #define DRM_ETNAVIV_GEM_WAIT 0x09 213 #define DRM_ETNAVIV_NUM_IOCTLS 0x0a 214 215 #define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param) 216 #define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new) 217 #define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info) 218 #define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep) 219 #define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini) 220 #define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit) 221 #define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence) 222 #define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) 223 #define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) 224 225 #endif /* __ETNAVIV_DRM_H__ */ 226