1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26 
27 #include <drm/drm_drv.h>
28 
29 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
30 			    struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
31 {
32 	int (*run_func)(void *handle, uint32_t inst_mask);
33 	int ret = 0;
34 
35 	if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
36 		return 0;
37 
38 	run_func = NULL;
39 
40 	switch (xcp_state) {
41 	case AMDGPU_XCP_PREPARE_SUSPEND:
42 		run_func = xcp_ip->ip_funcs->prepare_suspend;
43 		break;
44 	case AMDGPU_XCP_SUSPEND:
45 		run_func = xcp_ip->ip_funcs->suspend;
46 		break;
47 	case AMDGPU_XCP_PREPARE_RESUME:
48 		run_func = xcp_ip->ip_funcs->prepare_resume;
49 		break;
50 	case AMDGPU_XCP_RESUME:
51 		run_func = xcp_ip->ip_funcs->resume;
52 		break;
53 	}
54 
55 	if (run_func)
56 		ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
57 
58 	return ret;
59 }
60 
61 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
62 				     int state)
63 {
64 	struct amdgpu_xcp_ip *xcp_ip;
65 	struct amdgpu_xcp *xcp;
66 	int i, ret;
67 
68 	if (xcp_id > MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
69 		return -EINVAL;
70 
71 	xcp = &xcp_mgr->xcp[xcp_id];
72 	for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
73 		xcp_ip = &xcp->ip[i];
74 		ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
75 		if (ret)
76 			break;
77 	}
78 
79 	return ret;
80 }
81 
82 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
83 {
84 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
85 					 AMDGPU_XCP_PREPARE_SUSPEND);
86 }
87 
88 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
89 {
90 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
91 }
92 
93 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
94 {
95 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
96 					 AMDGPU_XCP_PREPARE_RESUME);
97 }
98 
99 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
100 {
101 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
102 }
103 
104 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
105 				   struct amdgpu_xcp_ip *ip)
106 {
107 	struct amdgpu_xcp *xcp;
108 
109 	if (!ip)
110 		return;
111 
112 	xcp = &xcp_mgr->xcp[xcp_id];
113 	xcp->ip[ip->ip_id] = *ip;
114 	xcp->ip[ip->ip_id].valid = true;
115 
116 	xcp->valid = true;
117 }
118 
119 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
120 {
121 	struct amdgpu_device *adev = xcp_mgr->adev;
122 	struct amdgpu_xcp_ip ip;
123 	uint8_t mem_id;
124 	int i, j, ret;
125 
126 	if (!num_xcps || num_xcps > MAX_XCP)
127 		return -EINVAL;
128 
129 	xcp_mgr->mode = mode;
130 
131 	for (i = 0; i < MAX_XCP; ++i)
132 		xcp_mgr->xcp[i].valid = false;
133 
134 	for (i = 0; i < num_xcps; ++i) {
135 		for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
136 			ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
137 							     &ip);
138 			if (ret)
139 				continue;
140 
141 			__amdgpu_xcp_add_block(xcp_mgr, i, &ip);
142 		}
143 
144 		xcp_mgr->xcp[i].id = i;
145 
146 		if (xcp_mgr->funcs->get_xcp_mem_id) {
147 			ret = xcp_mgr->funcs->get_xcp_mem_id(
148 				xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
149 			if (ret)
150 				continue;
151 			else
152 				xcp_mgr->xcp[i].mem_id = mem_id;
153 		}
154 	}
155 
156 	xcp_mgr->num_xcps = num_xcps;
157 	amdgpu_xcp_update_partition_sched_list(adev);
158 
159 	xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
160 	return 0;
161 }
162 
163 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
164 {
165 	int ret, curr_mode, num_xcps = 0;
166 
167 	if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
168 		return -EINVAL;
169 
170 	if (xcp_mgr->mode == mode)
171 		return 0;
172 
173 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
174 		return 0;
175 
176 	mutex_lock(&xcp_mgr->xcp_lock);
177 
178 	curr_mode = xcp_mgr->mode;
179 	/* State set to transient mode */
180 	xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
181 
182 	ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
183 
184 	if (ret) {
185 		/* Failed, get whatever mode it's at now */
186 		if (xcp_mgr->funcs->query_partition_mode)
187 			xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
188 				xcp_mgr, AMDGPU_XCP_FL_LOCKED);
189 		else
190 			xcp_mgr->mode = curr_mode;
191 
192 		goto out;
193 	}
194 
195 out:
196 	mutex_unlock(&xcp_mgr->xcp_lock);
197 
198 	return ret;
199 }
200 
201 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
202 {
203 	int mode;
204 
205 	if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
206 		return xcp_mgr->mode;
207 
208 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
209 		return xcp_mgr->mode;
210 
211 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
212 		mutex_lock(&xcp_mgr->xcp_lock);
213 	mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
214 	if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
215 		dev_WARN(
216 			xcp_mgr->adev->dev,
217 			"Cached partition mode %d not matching with device mode %d",
218 			xcp_mgr->mode, mode);
219 
220 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
221 		mutex_unlock(&xcp_mgr->xcp_lock);
222 
223 	return mode;
224 }
225 
226 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
227 {
228 	struct drm_device *p_ddev;
229 	struct pci_dev *pdev;
230 	struct drm_device *ddev;
231 	int i;
232 
233 	pdev = adev->pdev;
234 	ddev = adev_to_drm(adev);
235 
236 	for (i = 0; i < MAX_XCP; i++) {
237 		p_ddev = drm_dev_alloc(&amdgpu_partition_driver,
238 			&pci_upstream_bridge(pdev)->dev);
239 		if (IS_ERR(p_ddev))
240 			return PTR_ERR(p_ddev);
241 
242 		/* Redirect all IOCTLs to the primary device */
243 		p_ddev->render->dev = ddev;
244 		p_ddev->primary->dev = ddev;
245 		p_ddev->vma_offset_manager = ddev->vma_offset_manager;
246 		adev->xcp_mgr->xcp[i].ddev = p_ddev;
247 	}
248 
249 	return 0;
250 }
251 
252 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
253 			int init_num_xcps,
254 			struct amdgpu_xcp_mgr_funcs *xcp_funcs)
255 {
256 	struct amdgpu_xcp_mgr *xcp_mgr;
257 
258 	if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
259 	    !xcp_funcs->get_ip_details)
260 		return -EINVAL;
261 
262 	xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
263 
264 	if (!xcp_mgr)
265 		return -ENOMEM;
266 
267 	xcp_mgr->adev = adev;
268 	xcp_mgr->funcs = xcp_funcs;
269 	xcp_mgr->mode = init_mode;
270 	mutex_init(&xcp_mgr->xcp_lock);
271 
272 	if (init_mode != AMDGPU_XCP_MODE_NONE)
273 		amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
274 
275 	adev->xcp_mgr = xcp_mgr;
276 
277 	return amdgpu_xcp_dev_alloc(adev);
278 }
279 
280 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
281 			     enum AMDGPU_XCP_IP_BLOCK ip, int instance)
282 {
283 	struct amdgpu_xcp *xcp;
284 	int i, id_mask = 0;
285 
286 	if (ip >= AMDGPU_XCP_MAX_BLOCKS)
287 		return -EINVAL;
288 
289 	for (i = 0; i < xcp_mgr->num_xcps; ++i) {
290 		xcp = &xcp_mgr->xcp[i];
291 		if ((xcp->valid) && (xcp->ip[ip].valid) &&
292 		    (xcp->ip[ip].inst_mask & BIT(instance)))
293 			id_mask |= BIT(i);
294 	}
295 
296 	if (!id_mask)
297 		id_mask = -ENXIO;
298 
299 	return id_mask;
300 }
301 
302 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
303 				enum AMDGPU_XCP_IP_BLOCK ip,
304 				uint32_t *inst_mask)
305 {
306 	if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
307 		return -EINVAL;
308 
309 	*inst_mask = xcp->ip[ip].inst_mask;
310 
311 	return 0;
312 }
313 
314 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
315 			const struct pci_device_id *ent)
316 {
317 	int i, ret;
318 
319 	if (!adev->xcp_mgr)
320 		return 0;
321 
322 	for (i = 0; i < MAX_XCP; i++) {
323 		ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
324 		if (ret)
325 			return ret;
326 	}
327 
328 	return 0;
329 }
330 
331 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
332 {
333 	int i;
334 
335 	if (!adev->xcp_mgr)
336 		return;
337 
338 	for (i = 0; i < MAX_XCP; i++)
339 		drm_dev_unplug(adev->xcp_mgr->xcp[i].ddev);
340 }
341 
342 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
343 			   struct amdgpu_fpriv *fpriv,
344 			   struct drm_file *file_priv)
345 {
346 	int i;
347 
348 	if (!adev->xcp_mgr)
349 		return 0;
350 
351 	fpriv->xcp_id = ~0;
352 	for (i = 0; i < MAX_XCP; ++i) {
353 		if (!adev->xcp_mgr->xcp[i].ddev)
354 			break;
355 
356 		if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
357 			if (adev->xcp_mgr->xcp[i].valid == FALSE) {
358 				dev_err(adev->dev, "renderD%d partition %d not valid!",
359 						file_priv->minor->index, i);
360 				return -ENOENT;
361 			}
362 			dev_dbg(adev->dev, "renderD%d partition %d openned!",
363 					file_priv->minor->index, i);
364 			fpriv->xcp_id = i;
365 			break;
366 		}
367 	}
368 
369 	fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
370 				adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
371 	return 0;
372 }
373 
374 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
375 				  struct amdgpu_ctx_entity *entity)
376 {
377 	struct drm_gpu_scheduler *sched;
378 	struct amdgpu_ring *ring;
379 
380 	if (!adev->xcp_mgr)
381 		return;
382 
383 	sched = entity->entity.rq->sched;
384 	if (sched->ready) {
385 		ring = to_amdgpu_ring(entity->entity.rq->sched);
386 		atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
387 	}
388 }
389 
390