1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Monk.liu@amd.com 23 */ 24 #ifndef AMDGPU_VIRT_H 25 #define AMDGPU_VIRT_H 26 27 #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ 28 #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ 29 #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ 30 #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ 31 #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ 32 33 /* all asic after AI use this offset */ 34 #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 35 /* tonga/fiji use this offset */ 36 #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 37 38 enum amdgpu_sriov_vf_mode { 39 SRIOV_VF_MODE_BARE_METAL = 0, 40 SRIOV_VF_MODE_ONE_VF, 41 SRIOV_VF_MODE_MULTI_VF, 42 }; 43 44 struct amdgpu_mm_table { 45 struct amdgpu_bo *bo; 46 uint32_t *cpu_addr; 47 uint64_t gpu_addr; 48 }; 49 50 #define AMDGPU_VF_ERROR_ENTRY_SIZE 16 51 52 /* struct error_entry - amdgpu VF error information. */ 53 struct amdgpu_vf_error_buffer { 54 struct mutex lock; 55 int read_count; 56 int write_count; 57 uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; 58 uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; 59 uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; 60 }; 61 62 /** 63 * struct amdgpu_virt_ops - amdgpu device virt operations 64 */ 65 struct amdgpu_virt_ops { 66 int (*req_full_gpu)(struct amdgpu_device *adev, bool init); 67 int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); 68 int (*req_init_data)(struct amdgpu_device *adev); 69 int (*reset_gpu)(struct amdgpu_device *adev); 70 int (*wait_reset)(struct amdgpu_device *adev); 71 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); 72 }; 73 74 /* 75 * Firmware Reserve Frame buffer 76 */ 77 struct amdgpu_virt_fw_reserve { 78 struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; 79 struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; 80 unsigned int checksum_key; 81 }; 82 /* 83 * Defination between PF and VF 84 * Structures forcibly aligned to 4 to keep the same style as PF. 85 */ 86 #define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024) 87 88 #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \ 89 (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2)) 90 91 enum AMDGIM_FEATURE_FLAG { 92 /* GIM supports feature of Error log collecting */ 93 AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1, 94 /* GIM supports feature of loading uCodes */ 95 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, 96 /* VRAM LOST by GIM */ 97 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, 98 /* MM bandwidth */ 99 AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, 100 /* PP ONE VF MODE in GIM */ 101 AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), 102 }; 103 104 struct amd_sriov_msg_pf2vf_info_header { 105 /* the total structure size in byte. */ 106 uint32_t size; 107 /* version of this structure, written by the GIM */ 108 uint32_t version; 109 /* reserved */ 110 uint32_t reserved[2]; 111 } __aligned(4); 112 struct amdgim_pf2vf_info_v1 { 113 /* header contains size and version */ 114 struct amd_sriov_msg_pf2vf_info_header header; 115 /* max_width * max_height */ 116 unsigned int uvd_enc_max_pixels_count; 117 /* 16x16 pixels/sec, codec independent */ 118 unsigned int uvd_enc_max_bandwidth; 119 /* max_width * max_height */ 120 unsigned int vce_enc_max_pixels_count; 121 /* 16x16 pixels/sec, codec independent */ 122 unsigned int vce_enc_max_bandwidth; 123 /* MEC FW position in kb from the start of visible frame buffer */ 124 unsigned int mecfw_kboffset; 125 /* The features flags of the GIM driver supports. */ 126 unsigned int feature_flags; 127 /* use private key from mailbox 2 to create chueksum */ 128 unsigned int checksum; 129 } __aligned(4); 130 131 struct amdgim_pf2vf_info_v2 { 132 /* header contains size and version */ 133 struct amd_sriov_msg_pf2vf_info_header header; 134 /* use private key from mailbox 2 to create chueksum */ 135 uint32_t checksum; 136 /* The features flags of the GIM driver supports. */ 137 uint32_t feature_flags; 138 /* max_width * max_height */ 139 uint32_t uvd_enc_max_pixels_count; 140 /* 16x16 pixels/sec, codec independent */ 141 uint32_t uvd_enc_max_bandwidth; 142 /* max_width * max_height */ 143 uint32_t vce_enc_max_pixels_count; 144 /* 16x16 pixels/sec, codec independent */ 145 uint32_t vce_enc_max_bandwidth; 146 /* MEC FW position in kb from the start of VF visible frame buffer */ 147 uint64_t mecfw_kboffset; 148 /* MEC FW size in KB */ 149 uint32_t mecfw_ksize; 150 /* UVD FW position in kb from the start of VF visible frame buffer */ 151 uint64_t uvdfw_kboffset; 152 /* UVD FW size in KB */ 153 uint32_t uvdfw_ksize; 154 /* VCE FW position in kb from the start of VF visible frame buffer */ 155 uint64_t vcefw_kboffset; 156 /* VCE FW size in KB */ 157 uint32_t vcefw_ksize; 158 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)]; 159 } __aligned(4); 160 161 162 struct amd_sriov_msg_vf2pf_info_header { 163 /* the total structure size in byte. */ 164 uint32_t size; 165 /*version of this structure, written by the guest */ 166 uint32_t version; 167 /* reserved */ 168 uint32_t reserved[2]; 169 } __aligned(4); 170 171 struct amdgim_vf2pf_info_v1 { 172 /* header contains size and version */ 173 struct amd_sriov_msg_vf2pf_info_header header; 174 /* driver version */ 175 char driver_version[64]; 176 /* driver certification, 1=WHQL, 0=None */ 177 unsigned int driver_cert; 178 /* guest OS type and version: need a define */ 179 unsigned int os_info; 180 /* in the unit of 1M */ 181 unsigned int fb_usage; 182 /* guest gfx engine usage percentage */ 183 unsigned int gfx_usage; 184 /* guest gfx engine health percentage */ 185 unsigned int gfx_health; 186 /* guest compute engine usage percentage */ 187 unsigned int compute_usage; 188 /* guest compute engine health percentage */ 189 unsigned int compute_health; 190 /* guest vce engine usage percentage. 0xffff means N/A. */ 191 unsigned int vce_enc_usage; 192 /* guest vce engine health percentage. 0xffff means N/A. */ 193 unsigned int vce_enc_health; 194 /* guest uvd engine usage percentage. 0xffff means N/A. */ 195 unsigned int uvd_enc_usage; 196 /* guest uvd engine usage percentage. 0xffff means N/A. */ 197 unsigned int uvd_enc_health; 198 unsigned int checksum; 199 } __aligned(4); 200 201 struct amdgim_vf2pf_info_v2 { 202 /* header contains size and version */ 203 struct amd_sriov_msg_vf2pf_info_header header; 204 uint32_t checksum; 205 /* driver version */ 206 uint8_t driver_version[64]; 207 /* driver certification, 1=WHQL, 0=None */ 208 uint32_t driver_cert; 209 /* guest OS type and version: need a define */ 210 uint32_t os_info; 211 /* in the unit of 1M */ 212 uint32_t fb_usage; 213 /* guest gfx engine usage percentage */ 214 uint32_t gfx_usage; 215 /* guest gfx engine health percentage */ 216 uint32_t gfx_health; 217 /* guest compute engine usage percentage */ 218 uint32_t compute_usage; 219 /* guest compute engine health percentage */ 220 uint32_t compute_health; 221 /* guest vce engine usage percentage. 0xffff means N/A. */ 222 uint32_t vce_enc_usage; 223 /* guest vce engine health percentage. 0xffff means N/A. */ 224 uint32_t vce_enc_health; 225 /* guest uvd engine usage percentage. 0xffff means N/A. */ 226 uint32_t uvd_enc_usage; 227 /* guest uvd engine usage percentage. 0xffff means N/A. */ 228 uint32_t uvd_enc_health; 229 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; 230 } __aligned(4); 231 232 #define AMDGPU_FW_VRAM_VF2PF_VER 2 233 typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ; 234 235 #define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \ 236 do { \ 237 ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \ 238 } while (0) 239 240 #define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \ 241 do { \ 242 (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \ 243 } while (0) 244 245 #define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \ 246 do { \ 247 if (!adev->virt.fw_reserve.p_pf2vf) \ 248 *(val) = 0; \ 249 else { \ 250 if (adev->virt.fw_reserve.p_pf2vf->version == 1) \ 251 *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \ 252 if (adev->virt.fw_reserve.p_pf2vf->version == 2) \ 253 *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \ 254 } \ 255 } while (0) 256 257 /* GPU virtualization */ 258 struct amdgpu_virt { 259 uint32_t caps; 260 struct amdgpu_bo *csa_obj; 261 void *csa_cpu_addr; 262 bool chained_ib_support; 263 uint32_t reg_val_offs; 264 struct amdgpu_irq_src ack_irq; 265 struct amdgpu_irq_src rcv_irq; 266 struct work_struct flr_work; 267 struct amdgpu_mm_table mm_table; 268 const struct amdgpu_virt_ops *ops; 269 struct amdgpu_vf_error_buffer vf_errors; 270 struct amdgpu_virt_fw_reserve fw_reserve; 271 uint32_t gim_feature; 272 uint32_t reg_access_mode; 273 int req_init_data_ver; 274 bool tdr_debug; 275 }; 276 277 #define amdgpu_sriov_enabled(adev) \ 278 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) 279 280 #define amdgpu_sriov_vf(adev) \ 281 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) 282 283 #define amdgpu_sriov_bios(adev) \ 284 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) 285 286 #define amdgpu_sriov_runtime(adev) \ 287 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) 288 289 #define amdgpu_sriov_fullaccess(adev) \ 290 (amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) 291 292 #define amdgpu_passthrough(adev) \ 293 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) 294 295 static inline bool is_virtual_machine(void) 296 { 297 #ifdef CONFIG_X86 298 return boot_cpu_has(X86_FEATURE_HYPERVISOR); 299 #else 300 return false; 301 #endif 302 } 303 304 #define amdgpu_sriov_is_pp_one_vf(adev) \ 305 ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) 306 #define amdgpu_sriov_is_debug(adev) \ 307 ((!adev->in_gpu_reset) && adev->virt.tdr_debug) 308 #define amdgpu_sriov_is_normal(adev) \ 309 ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug)) 310 311 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 312 void amdgpu_virt_init_setting(struct amdgpu_device *adev); 313 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 314 uint32_t reg0, uint32_t rreg1, 315 uint32_t ref, uint32_t mask); 316 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); 317 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); 318 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); 319 void amdgpu_virt_request_init_data(struct amdgpu_device *adev); 320 int amdgpu_virt_wait_reset(struct amdgpu_device *adev); 321 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); 322 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); 323 int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, 324 unsigned int key, 325 unsigned int chksum); 326 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); 327 void amdgpu_detect_virtualization(struct amdgpu_device *adev); 328 329 bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); 330 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); 331 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); 332 333 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); 334 #endif 335