1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Monk.liu@amd.com
23  */
24 #ifndef AMDGPU_VIRT_H
25 #define AMDGPU_VIRT_H
26 
27 #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS  (1 << 0) /* vBIOS is sr-iov ready */
28 #define AMDGPU_SRIOV_CAPS_ENABLE_IOV   (1 << 1) /* sr-iov is enabled on this GPU */
29 #define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
30 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
31 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
32 
33 struct amdgpu_mm_table {
34 	struct amdgpu_bo	*bo;
35 	uint32_t		*cpu_addr;
36 	uint64_t		gpu_addr;
37 };
38 
39 #define AMDGPU_VF_ERROR_ENTRY_SIZE    16
40 
41 /* struct error_entry - amdgpu VF error information. */
42 struct amdgpu_vf_error_buffer {
43 	struct mutex lock;
44 	int read_count;
45 	int write_count;
46 	uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
47 	uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
48 	uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
49 };
50 
51 /* According to the fw feature, some new reg access modes are supported */
52 #define AMDGPU_VIRT_REG_ACCESS_LEGACY          (1 << 0) /* directly mmio */
53 #define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH      (1 << 1) /* by PSP */
54 #define AMDGPU_VIRT_REG_ACCESS_RLC             (1 << 2) /* by RLC */
55 #define AMDGPU_VIRT_REG_SKIP_SEETING           (1 << 3) /* Skip setting reg */
56 
57 /**
58  * struct amdgpu_virt_ops - amdgpu device virt operations
59  */
60 struct amdgpu_virt_ops {
61 	int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
62 	int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
63 	int (*reset_gpu)(struct amdgpu_device *adev);
64 	int (*wait_reset)(struct amdgpu_device *adev);
65 	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
66 	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
67 	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
68 	void (*init_reg_access_mode)(struct amdgpu_device *adev);
69 };
70 
71 /*
72  * Firmware Reserve Frame buffer
73  */
74 struct amdgpu_virt_fw_reserve {
75 	struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
76 	struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
77 	unsigned int checksum_key;
78 };
79 /*
80  * Defination between PF and VF
81  * Structures forcibly aligned to 4 to keep the same style as PF.
82  */
83 #define AMDGIM_DATAEXCHANGE_OFFSET		(64 * 1024)
84 
85 #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
86 		(total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
87 
88 enum AMDGIM_FEATURE_FLAG {
89 	/* GIM supports feature of Error log collecting */
90 	AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
91 	/* GIM supports feature of loading uCodes */
92 	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
93 	/* VRAM LOST by GIM */
94 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
95 	/* HW PERF SIM in GIM */
96 	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
97 };
98 
99 struct amd_sriov_msg_pf2vf_info_header {
100 	/* the total structure size in byte. */
101 	uint32_t size;
102 	/* version of this structure, written by the GIM */
103 	uint32_t version;
104 	/* reserved */
105 	uint32_t reserved[2];
106 } __aligned(4);
107 struct  amdgim_pf2vf_info_v1 {
108 	/* header contains size and version */
109 	struct amd_sriov_msg_pf2vf_info_header header;
110 	/* max_width * max_height */
111 	unsigned int uvd_enc_max_pixels_count;
112 	/* 16x16 pixels/sec, codec independent */
113 	unsigned int uvd_enc_max_bandwidth;
114 	/* max_width * max_height */
115 	unsigned int vce_enc_max_pixels_count;
116 	/* 16x16 pixels/sec, codec independent */
117 	unsigned int vce_enc_max_bandwidth;
118 	/* MEC FW position in kb from the start of visible frame buffer */
119 	unsigned int mecfw_kboffset;
120 	/* The features flags of the GIM driver supports. */
121 	unsigned int feature_flags;
122 	/* use private key from mailbox 2 to create chueksum */
123 	unsigned int checksum;
124 } __aligned(4);
125 
126 struct  amdgim_pf2vf_info_v2 {
127 	/* header contains size and version */
128 	struct amd_sriov_msg_pf2vf_info_header header;
129 	/* use private key from mailbox 2 to create chueksum */
130 	uint32_t checksum;
131 	/* The features flags of the GIM driver supports. */
132 	uint32_t feature_flags;
133 	/* max_width * max_height */
134 	uint32_t uvd_enc_max_pixels_count;
135 	/* 16x16 pixels/sec, codec independent */
136 	uint32_t uvd_enc_max_bandwidth;
137 	/* max_width * max_height */
138 	uint32_t vce_enc_max_pixels_count;
139 	/* 16x16 pixels/sec, codec independent */
140 	uint32_t vce_enc_max_bandwidth;
141 	/* MEC FW position in kb from the start of VF visible frame buffer */
142 	uint64_t mecfw_kboffset;
143 	/* MEC FW size in KB */
144 	uint32_t mecfw_ksize;
145 	/* UVD FW position in kb from the start of VF visible frame buffer */
146 	uint64_t uvdfw_kboffset;
147 	/* UVD FW size in KB */
148 	uint32_t uvdfw_ksize;
149 	/* VCE FW position in kb from the start of VF visible frame buffer */
150 	uint64_t vcefw_kboffset;
151 	/* VCE FW size in KB */
152 	uint32_t vcefw_ksize;
153 	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
154 } __aligned(4);
155 
156 
157 struct amd_sriov_msg_vf2pf_info_header {
158 	/* the total structure size in byte. */
159 	uint32_t size;
160 	/*version of this structure, written by the guest */
161 	uint32_t version;
162 	/* reserved */
163 	uint32_t reserved[2];
164 } __aligned(4);
165 
166 struct amdgim_vf2pf_info_v1 {
167 	/* header contains size and version */
168 	struct amd_sriov_msg_vf2pf_info_header header;
169 	/* driver version */
170 	char driver_version[64];
171 	/* driver certification, 1=WHQL, 0=None */
172 	unsigned int driver_cert;
173 	/* guest OS type and version: need a define */
174 	unsigned int os_info;
175 	/* in the unit of 1M */
176 	unsigned int fb_usage;
177 	/* guest gfx engine usage percentage */
178 	unsigned int gfx_usage;
179 	/* guest gfx engine health percentage */
180 	unsigned int gfx_health;
181 	/* guest compute engine usage percentage */
182 	unsigned int compute_usage;
183 	/* guest compute engine health percentage */
184 	unsigned int compute_health;
185 	/* guest vce engine usage percentage. 0xffff means N/A. */
186 	unsigned int vce_enc_usage;
187 	/* guest vce engine health percentage. 0xffff means N/A. */
188 	unsigned int vce_enc_health;
189 	/* guest uvd engine usage percentage. 0xffff means N/A. */
190 	unsigned int uvd_enc_usage;
191 	/* guest uvd engine usage percentage. 0xffff means N/A. */
192 	unsigned int uvd_enc_health;
193 	unsigned int checksum;
194 } __aligned(4);
195 
196 struct amdgim_vf2pf_info_v2 {
197 	/* header contains size and version */
198 	struct amd_sriov_msg_vf2pf_info_header header;
199 	uint32_t checksum;
200 	/* driver version */
201 	uint8_t driver_version[64];
202 	/* driver certification, 1=WHQL, 0=None */
203 	uint32_t driver_cert;
204 	/* guest OS type and version: need a define */
205 	uint32_t os_info;
206 	/* in the unit of 1M */
207 	uint32_t fb_usage;
208 	/* guest gfx engine usage percentage */
209 	uint32_t gfx_usage;
210 	/* guest gfx engine health percentage */
211 	uint32_t gfx_health;
212 	/* guest compute engine usage percentage */
213 	uint32_t compute_usage;
214 	/* guest compute engine health percentage */
215 	uint32_t compute_health;
216 	/* guest vce engine usage percentage. 0xffff means N/A. */
217 	uint32_t vce_enc_usage;
218 	/* guest vce engine health percentage. 0xffff means N/A. */
219 	uint32_t vce_enc_health;
220 	/* guest uvd engine usage percentage. 0xffff means N/A. */
221 	uint32_t uvd_enc_usage;
222 	/* guest uvd engine usage percentage. 0xffff means N/A. */
223 	uint32_t uvd_enc_health;
224 	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
225 } __aligned(4);
226 
227 #define AMDGPU_FW_VRAM_VF2PF_VER 2
228 typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
229 
230 #define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
231 	do { \
232 		((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
233 	} while (0)
234 
235 #define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
236 	do { \
237 		(*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
238 	} while (0)
239 
240 #define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
241 	do { \
242 		if (!adev->virt.fw_reserve.p_pf2vf) \
243 			*(val) = 0; \
244 		else { \
245 			if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
246 				*(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
247 			if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
248 				*(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
249 		} \
250 	} while (0)
251 
252 /* GPU virtualization */
253 struct amdgpu_virt {
254 	uint32_t			caps;
255 	struct amdgpu_bo		*csa_obj;
256 	void				*csa_cpu_addr;
257 	bool chained_ib_support;
258 	uint32_t			reg_val_offs;
259 	struct amdgpu_irq_src		ack_irq;
260 	struct amdgpu_irq_src		rcv_irq;
261 	struct work_struct		flr_work;
262 	struct amdgpu_mm_table		mm_table;
263 	const struct amdgpu_virt_ops	*ops;
264 	struct amdgpu_vf_error_buffer   vf_errors;
265 	struct amdgpu_virt_fw_reserve	fw_reserve;
266 	uint32_t gim_feature;
267 	/* protect DPM events to GIM */
268 	struct mutex                    dpm_mutex;
269 	uint32_t reg_access_mode;
270 };
271 
272 #define amdgpu_sriov_enabled(adev) \
273 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
274 
275 #define amdgpu_sriov_vf(adev) \
276 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF)
277 
278 #define amdgpu_sriov_bios(adev) \
279 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
280 
281 #define amdgpu_sriov_runtime(adev) \
282 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
283 
284 #define amdgpu_passthrough(adev) \
285 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
286 
287 static inline bool is_virtual_machine(void)
288 {
289 #ifdef CONFIG_X86
290 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
291 #else
292 	return false;
293 #endif
294 }
295 
296 #define amdgim_is_hwperf(adev) \
297 	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
298 
299 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
300 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
301 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
302 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
303 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
304 					uint32_t reg0, uint32_t rreg1,
305 					uint32_t ref, uint32_t mask);
306 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
307 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
308 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
309 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
310 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
311 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
312 int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
313 					unsigned int key,
314 					unsigned int chksum);
315 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
316 uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
317 uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
318 
319 void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
320 bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
321 bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
322 bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
323 
324 #endif
325