1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26 
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29 
30 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
31 			    struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
32 {
33 	int (*run_func)(void *handle, uint32_t inst_mask);
34 	int ret = 0;
35 
36 	if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
37 		return 0;
38 
39 	run_func = NULL;
40 
41 	switch (xcp_state) {
42 	case AMDGPU_XCP_PREPARE_SUSPEND:
43 		run_func = xcp_ip->ip_funcs->prepare_suspend;
44 		break;
45 	case AMDGPU_XCP_SUSPEND:
46 		run_func = xcp_ip->ip_funcs->suspend;
47 		break;
48 	case AMDGPU_XCP_PREPARE_RESUME:
49 		run_func = xcp_ip->ip_funcs->prepare_resume;
50 		break;
51 	case AMDGPU_XCP_RESUME:
52 		run_func = xcp_ip->ip_funcs->resume;
53 		break;
54 	}
55 
56 	if (run_func)
57 		ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
58 
59 	return ret;
60 }
61 
62 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
63 				     int state)
64 {
65 	struct amdgpu_xcp_ip *xcp_ip;
66 	struct amdgpu_xcp *xcp;
67 	int i, ret;
68 
69 	if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
70 		return -EINVAL;
71 
72 	xcp = &xcp_mgr->xcp[xcp_id];
73 	for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
74 		xcp_ip = &xcp->ip[i];
75 		ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
76 		if (ret)
77 			break;
78 	}
79 
80 	return ret;
81 }
82 
83 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
84 {
85 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
86 					 AMDGPU_XCP_PREPARE_SUSPEND);
87 }
88 
89 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
90 {
91 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
92 }
93 
94 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
95 {
96 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
97 					 AMDGPU_XCP_PREPARE_RESUME);
98 }
99 
100 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
101 {
102 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
103 }
104 
105 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
106 				   struct amdgpu_xcp_ip *ip)
107 {
108 	struct amdgpu_xcp *xcp;
109 
110 	if (!ip)
111 		return;
112 
113 	xcp = &xcp_mgr->xcp[xcp_id];
114 	xcp->ip[ip->ip_id] = *ip;
115 	xcp->ip[ip->ip_id].valid = true;
116 
117 	xcp->valid = true;
118 }
119 
120 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
121 {
122 	struct amdgpu_device *adev = xcp_mgr->adev;
123 	struct amdgpu_xcp_ip ip;
124 	uint8_t mem_id;
125 	int i, j, ret;
126 
127 	if (!num_xcps || num_xcps > MAX_XCP)
128 		return -EINVAL;
129 
130 	xcp_mgr->mode = mode;
131 
132 	for (i = 0; i < MAX_XCP; ++i)
133 		xcp_mgr->xcp[i].valid = false;
134 
135 	/* This is needed for figuring out memory id of xcp */
136 	xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
137 
138 	for (i = 0; i < num_xcps; ++i) {
139 		for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
140 			ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
141 							     &ip);
142 			if (ret)
143 				continue;
144 
145 			__amdgpu_xcp_add_block(xcp_mgr, i, &ip);
146 		}
147 
148 		xcp_mgr->xcp[i].id = i;
149 
150 		if (xcp_mgr->funcs->get_xcp_mem_id) {
151 			ret = xcp_mgr->funcs->get_xcp_mem_id(
152 				xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
153 			if (ret)
154 				continue;
155 			else
156 				xcp_mgr->xcp[i].mem_id = mem_id;
157 		}
158 	}
159 
160 	xcp_mgr->num_xcps = num_xcps;
161 	amdgpu_xcp_update_partition_sched_list(adev);
162 
163 	return 0;
164 }
165 
166 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
167 {
168 	int ret, curr_mode, num_xcps = 0;
169 
170 	if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
171 		return -EINVAL;
172 
173 	if (xcp_mgr->mode == mode)
174 		return 0;
175 
176 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
177 		return 0;
178 
179 	mutex_lock(&xcp_mgr->xcp_lock);
180 
181 	curr_mode = xcp_mgr->mode;
182 	/* State set to transient mode */
183 	xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
184 
185 	ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
186 
187 	if (ret) {
188 		/* Failed, get whatever mode it's at now */
189 		if (xcp_mgr->funcs->query_partition_mode)
190 			xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
191 				xcp_mgr, AMDGPU_XCP_FL_LOCKED);
192 		else
193 			xcp_mgr->mode = curr_mode;
194 
195 		goto out;
196 	}
197 
198 out:
199 	mutex_unlock(&xcp_mgr->xcp_lock);
200 
201 	return ret;
202 }
203 
204 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
205 {
206 	int mode;
207 
208 	if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
209 		return xcp_mgr->mode;
210 
211 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
212 		return xcp_mgr->mode;
213 
214 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
215 		mutex_lock(&xcp_mgr->xcp_lock);
216 	mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
217 	if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
218 		dev_WARN(
219 			xcp_mgr->adev->dev,
220 			"Cached partition mode %d not matching with device mode %d",
221 			xcp_mgr->mode, mode);
222 
223 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
224 		mutex_unlock(&xcp_mgr->xcp_lock);
225 
226 	return mode;
227 }
228 
229 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
230 {
231 	struct drm_device *p_ddev;
232 	struct drm_device *ddev;
233 	int i, ret;
234 
235 	ddev = adev_to_drm(adev);
236 
237 	/* xcp #0 shares drm device setting with adev */
238 	adev->xcp_mgr->xcp->ddev = ddev;
239 
240 	for (i = 1; i < MAX_XCP; i++) {
241 		ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
242 		if (ret == -ENOSPC) {
243 			dev_warn(adev->dev,
244 			"Skip xcp node #%d when out of drm node resource.", i);
245 			return 0;
246 		} else if (ret) {
247 			return ret;
248 		}
249 
250 		/* Redirect all IOCTLs to the primary device */
251 		adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
252 		adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
253 		adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
254 		adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
255 		p_ddev->render->dev = ddev;
256 		p_ddev->primary->dev = ddev;
257 		p_ddev->vma_offset_manager = ddev->vma_offset_manager;
258 		p_ddev->driver = &amdgpu_partition_driver;
259 		adev->xcp_mgr->xcp[i].ddev = p_ddev;
260 	}
261 
262 	return 0;
263 }
264 
265 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
266 			int init_num_xcps,
267 			struct amdgpu_xcp_mgr_funcs *xcp_funcs)
268 {
269 	struct amdgpu_xcp_mgr *xcp_mgr;
270 
271 	if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
272 	    !xcp_funcs->get_ip_details)
273 		return -EINVAL;
274 
275 	xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
276 
277 	if (!xcp_mgr)
278 		return -ENOMEM;
279 
280 	xcp_mgr->adev = adev;
281 	xcp_mgr->funcs = xcp_funcs;
282 	xcp_mgr->mode = init_mode;
283 	mutex_init(&xcp_mgr->xcp_lock);
284 
285 	if (init_mode != AMDGPU_XCP_MODE_NONE)
286 		amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
287 
288 	adev->xcp_mgr = xcp_mgr;
289 
290 	return amdgpu_xcp_dev_alloc(adev);
291 }
292 
293 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
294 			     enum AMDGPU_XCP_IP_BLOCK ip, int instance)
295 {
296 	struct amdgpu_xcp *xcp;
297 	int i, id_mask = 0;
298 
299 	if (ip >= AMDGPU_XCP_MAX_BLOCKS)
300 		return -EINVAL;
301 
302 	for (i = 0; i < xcp_mgr->num_xcps; ++i) {
303 		xcp = &xcp_mgr->xcp[i];
304 		if ((xcp->valid) && (xcp->ip[ip].valid) &&
305 		    (xcp->ip[ip].inst_mask & BIT(instance)))
306 			id_mask |= BIT(i);
307 	}
308 
309 	if (!id_mask)
310 		id_mask = -ENXIO;
311 
312 	return id_mask;
313 }
314 
315 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
316 				enum AMDGPU_XCP_IP_BLOCK ip,
317 				uint32_t *inst_mask)
318 {
319 	if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
320 		return -EINVAL;
321 
322 	*inst_mask = xcp->ip[ip].inst_mask;
323 
324 	return 0;
325 }
326 
327 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
328 			const struct pci_device_id *ent)
329 {
330 	int i, ret;
331 
332 	if (!adev->xcp_mgr)
333 		return 0;
334 
335 	for (i = 1; i < MAX_XCP; i++) {
336 		if (!adev->xcp_mgr->xcp[i].ddev)
337 			break;
338 
339 		ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
340 		if (ret)
341 			return ret;
342 	}
343 
344 	return 0;
345 }
346 
347 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
348 {
349 	struct drm_device *p_ddev;
350 	int i;
351 
352 	if (!adev->xcp_mgr)
353 		return;
354 
355 	for (i = 1; i < MAX_XCP; i++) {
356 		if (!adev->xcp_mgr->xcp[i].ddev)
357 			break;
358 
359 		p_ddev = adev->xcp_mgr->xcp[i].ddev;
360 		drm_dev_unplug(p_ddev);
361 		p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
362 		p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
363 		p_ddev->driver =  adev->xcp_mgr->xcp[i].driver;
364 		p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
365 	}
366 }
367 
368 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
369 			   struct amdgpu_fpriv *fpriv,
370 			   struct drm_file *file_priv)
371 {
372 	int i;
373 
374 	if (!adev->xcp_mgr)
375 		return 0;
376 
377 	fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
378 	for (i = 0; i < MAX_XCP; ++i) {
379 		if (!adev->xcp_mgr->xcp[i].ddev)
380 			break;
381 
382 		if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
383 			if (adev->xcp_mgr->xcp[i].valid == FALSE) {
384 				dev_err(adev->dev, "renderD%d partition %d not valid!",
385 						file_priv->minor->index, i);
386 				return -ENOENT;
387 			}
388 			dev_dbg(adev->dev, "renderD%d partition %d opened!",
389 					file_priv->minor->index, i);
390 			fpriv->xcp_id = i;
391 			break;
392 		}
393 	}
394 
395 	fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
396 				adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
397 	return 0;
398 }
399 
400 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
401 				  struct amdgpu_ctx_entity *entity)
402 {
403 	struct drm_gpu_scheduler *sched;
404 	struct amdgpu_ring *ring;
405 
406 	if (!adev->xcp_mgr)
407 		return;
408 
409 	sched = entity->entity.rq->sched;
410 	if (sched->ready) {
411 		ring = to_amdgpu_ring(entity->entity.rq->sched);
412 		atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
413 	}
414 }
415 
416