1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Monk.liu@amd.com 23 */ 24 #ifndef AMDGPU_VIRT_H 25 #define AMDGPU_VIRT_H 26 27 #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ 28 #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ 29 #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ 30 #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ 31 #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ 32 33 struct amdgpu_mm_table { 34 struct amdgpu_bo *bo; 35 uint32_t *cpu_addr; 36 uint64_t gpu_addr; 37 }; 38 39 #define AMDGPU_VF_ERROR_ENTRY_SIZE 16 40 41 /* struct error_entry - amdgpu VF error information. */ 42 struct amdgpu_vf_error_buffer { 43 struct mutex lock; 44 int read_count; 45 int write_count; 46 uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; 47 uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; 48 uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; 49 }; 50 51 /** 52 * struct amdgpu_virt_ops - amdgpu device virt operations 53 */ 54 struct amdgpu_virt_ops { 55 int (*req_full_gpu)(struct amdgpu_device *adev, bool init); 56 int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); 57 int (*reset_gpu)(struct amdgpu_device *adev); 58 int (*wait_reset)(struct amdgpu_device *adev); 59 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); 60 }; 61 62 /* 63 * Firmware Reserve Frame buffer 64 */ 65 struct amdgpu_virt_fw_reserve { 66 struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; 67 struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; 68 unsigned int checksum_key; 69 }; 70 /* 71 * Defination between PF and VF 72 * Structures forcibly aligned to 4 to keep the same style as PF. 73 */ 74 #define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024) 75 76 #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \ 77 (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2)) 78 79 enum AMDGIM_FEATURE_FLAG { 80 /* GIM supports feature of Error log collecting */ 81 AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1, 82 /* GIM supports feature of loading uCodes */ 83 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, 84 /* VRAM LOST by GIM */ 85 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, 86 }; 87 88 struct amd_sriov_msg_pf2vf_info_header { 89 /* the total structure size in byte. */ 90 uint32_t size; 91 /* version of this structure, written by the GIM */ 92 uint32_t version; 93 /* reserved */ 94 uint32_t reserved[2]; 95 } __aligned(4); 96 struct amdgim_pf2vf_info_v1 { 97 /* header contains size and version */ 98 struct amd_sriov_msg_pf2vf_info_header header; 99 /* max_width * max_height */ 100 unsigned int uvd_enc_max_pixels_count; 101 /* 16x16 pixels/sec, codec independent */ 102 unsigned int uvd_enc_max_bandwidth; 103 /* max_width * max_height */ 104 unsigned int vce_enc_max_pixels_count; 105 /* 16x16 pixels/sec, codec independent */ 106 unsigned int vce_enc_max_bandwidth; 107 /* MEC FW position in kb from the start of visible frame buffer */ 108 unsigned int mecfw_kboffset; 109 /* The features flags of the GIM driver supports. */ 110 unsigned int feature_flags; 111 /* use private key from mailbox 2 to create chueksum */ 112 unsigned int checksum; 113 } __aligned(4); 114 115 struct amdgim_pf2vf_info_v2 { 116 /* header contains size and version */ 117 struct amd_sriov_msg_pf2vf_info_header header; 118 /* use private key from mailbox 2 to create chueksum */ 119 uint32_t checksum; 120 /* The features flags of the GIM driver supports. */ 121 uint32_t feature_flags; 122 /* max_width * max_height */ 123 uint32_t uvd_enc_max_pixels_count; 124 /* 16x16 pixels/sec, codec independent */ 125 uint32_t uvd_enc_max_bandwidth; 126 /* max_width * max_height */ 127 uint32_t vce_enc_max_pixels_count; 128 /* 16x16 pixels/sec, codec independent */ 129 uint32_t vce_enc_max_bandwidth; 130 /* MEC FW position in kb from the start of VF visible frame buffer */ 131 uint64_t mecfw_kboffset; 132 /* MEC FW size in KB */ 133 uint32_t mecfw_ksize; 134 /* UVD FW position in kb from the start of VF visible frame buffer */ 135 uint64_t uvdfw_kboffset; 136 /* UVD FW size in KB */ 137 uint32_t uvdfw_ksize; 138 /* VCE FW position in kb from the start of VF visible frame buffer */ 139 uint64_t vcefw_kboffset; 140 /* VCE FW size in KB */ 141 uint32_t vcefw_ksize; 142 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)]; 143 } __aligned(4); 144 145 146 struct amd_sriov_msg_vf2pf_info_header { 147 /* the total structure size in byte. */ 148 uint32_t size; 149 /*version of this structure, written by the guest */ 150 uint32_t version; 151 /* reserved */ 152 uint32_t reserved[2]; 153 } __aligned(4); 154 155 struct amdgim_vf2pf_info_v1 { 156 /* header contains size and version */ 157 struct amd_sriov_msg_vf2pf_info_header header; 158 /* driver version */ 159 char driver_version[64]; 160 /* driver certification, 1=WHQL, 0=None */ 161 unsigned int driver_cert; 162 /* guest OS type and version: need a define */ 163 unsigned int os_info; 164 /* in the unit of 1M */ 165 unsigned int fb_usage; 166 /* guest gfx engine usage percentage */ 167 unsigned int gfx_usage; 168 /* guest gfx engine health percentage */ 169 unsigned int gfx_health; 170 /* guest compute engine usage percentage */ 171 unsigned int compute_usage; 172 /* guest compute engine health percentage */ 173 unsigned int compute_health; 174 /* guest vce engine usage percentage. 0xffff means N/A. */ 175 unsigned int vce_enc_usage; 176 /* guest vce engine health percentage. 0xffff means N/A. */ 177 unsigned int vce_enc_health; 178 /* guest uvd engine usage percentage. 0xffff means N/A. */ 179 unsigned int uvd_enc_usage; 180 /* guest uvd engine usage percentage. 0xffff means N/A. */ 181 unsigned int uvd_enc_health; 182 unsigned int checksum; 183 } __aligned(4); 184 185 struct amdgim_vf2pf_info_v2 { 186 /* header contains size and version */ 187 struct amd_sriov_msg_vf2pf_info_header header; 188 uint32_t checksum; 189 /* driver version */ 190 uint8_t driver_version[64]; 191 /* driver certification, 1=WHQL, 0=None */ 192 uint32_t driver_cert; 193 /* guest OS type and version: need a define */ 194 uint32_t os_info; 195 /* in the unit of 1M */ 196 uint32_t fb_usage; 197 /* guest gfx engine usage percentage */ 198 uint32_t gfx_usage; 199 /* guest gfx engine health percentage */ 200 uint32_t gfx_health; 201 /* guest compute engine usage percentage */ 202 uint32_t compute_usage; 203 /* guest compute engine health percentage */ 204 uint32_t compute_health; 205 /* guest vce engine usage percentage. 0xffff means N/A. */ 206 uint32_t vce_enc_usage; 207 /* guest vce engine health percentage. 0xffff means N/A. */ 208 uint32_t vce_enc_health; 209 /* guest uvd engine usage percentage. 0xffff means N/A. */ 210 uint32_t uvd_enc_usage; 211 /* guest uvd engine usage percentage. 0xffff means N/A. */ 212 uint32_t uvd_enc_health; 213 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; 214 } __aligned(4); 215 216 #define AMDGPU_FW_VRAM_VF2PF_VER 2 217 typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ; 218 219 #define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \ 220 do { \ 221 ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \ 222 } while (0) 223 224 #define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \ 225 do { \ 226 (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \ 227 } while (0) 228 229 #define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \ 230 do { \ 231 if (!adev->virt.fw_reserve.p_pf2vf) \ 232 *(val) = 0; \ 233 else { \ 234 if (adev->virt.fw_reserve.p_pf2vf->version == 1) \ 235 *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \ 236 if (adev->virt.fw_reserve.p_pf2vf->version == 2) \ 237 *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \ 238 } \ 239 } while (0) 240 241 /* GPU virtualization */ 242 struct amdgpu_virt { 243 uint32_t caps; 244 struct amdgpu_bo *csa_obj; 245 bool chained_ib_support; 246 uint32_t reg_val_offs; 247 struct amdgpu_irq_src ack_irq; 248 struct amdgpu_irq_src rcv_irq; 249 struct work_struct flr_work; 250 struct amdgpu_mm_table mm_table; 251 const struct amdgpu_virt_ops *ops; 252 struct amdgpu_vf_error_buffer vf_errors; 253 struct amdgpu_virt_fw_reserve fw_reserve; 254 uint32_t gim_feature; 255 }; 256 257 #define amdgpu_sriov_enabled(adev) \ 258 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) 259 260 #define amdgpu_sriov_vf(adev) \ 261 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) 262 263 #define amdgpu_sriov_bios(adev) \ 264 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) 265 266 #define amdgpu_sriov_runtime(adev) \ 267 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) 268 269 #define amdgpu_passthrough(adev) \ 270 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) 271 272 static inline bool is_virtual_machine(void) 273 { 274 #ifdef CONFIG_X86 275 return boot_cpu_has(X86_FEATURE_HYPERVISOR); 276 #else 277 return false; 278 #endif 279 } 280 281 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 282 void amdgpu_virt_init_setting(struct amdgpu_device *adev); 283 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 284 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); 285 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 286 uint32_t reg0, uint32_t rreg1, 287 uint32_t ref, uint32_t mask); 288 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); 289 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); 290 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); 291 int amdgpu_virt_wait_reset(struct amdgpu_device *adev); 292 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); 293 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); 294 int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, 295 unsigned int key, 296 unsigned int chksum); 297 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); 298 299 #endif 300