1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_xcp.h"
28 #include "gfx_v9_4_3.h"
29 #include "gfxhub_v1_2.h"
30 #include "sdma_v4_4_2.h"
31
32 #define XCP_INST_MASK(num_inst, xcp_id) \
33 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
34
35 #define AMDGPU_XCP_OPS_KFD (1 << 0)
36
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)37 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
38 {
39 int i;
40
41 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
42
43 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
44
45 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
46 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
47 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
48
49 adev->doorbell_index.sdma_doorbell_range = 20;
50 for (i = 0; i < adev->sdma.num_instances; i++)
51 adev->doorbell_index.sdma_engine[i] =
52 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
53 i * (adev->doorbell_index.sdma_doorbell_range >> 1);
54
55 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
56 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
57
58 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
59 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
60
61 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
62 }
63
aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device * adev)64 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
65 {
66 return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
67 }
68
aqua_vanjaram_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)69 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
70 uint32_t inst_idx, struct amdgpu_ring *ring)
71 {
72 int xcp_id;
73 enum AMDGPU_XCP_IP_BLOCK ip_blk;
74 uint32_t inst_mask;
75
76 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
77 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
78 return;
79
80 inst_mask = 1 << inst_idx;
81
82 switch (ring->funcs->type) {
83 case AMDGPU_HW_IP_GFX:
84 case AMDGPU_RING_TYPE_COMPUTE:
85 case AMDGPU_RING_TYPE_KIQ:
86 ip_blk = AMDGPU_XCP_GFX;
87 break;
88 case AMDGPU_RING_TYPE_SDMA:
89 ip_blk = AMDGPU_XCP_SDMA;
90 break;
91 case AMDGPU_RING_TYPE_VCN_ENC:
92 case AMDGPU_RING_TYPE_VCN_JPEG:
93 ip_blk = AMDGPU_XCP_VCN;
94 if (aqua_vanjaram_xcp_vcn_shared(adev))
95 inst_mask = 1 << (inst_idx * 2);
96 break;
97 default:
98 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
99 return;
100 }
101
102 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
103 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
104 ring->xcp_id = xcp_id;
105 break;
106 }
107 }
108 }
109
aqua_vanjaram_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)110 static void aqua_vanjaram_xcp_gpu_sched_update(
111 struct amdgpu_device *adev,
112 struct amdgpu_ring *ring,
113 unsigned int sel_xcp_id)
114 {
115 unsigned int *num_gpu_sched;
116
117 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
118 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
119 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
120 .sched[(*num_gpu_sched)++] = &ring->sched;
121 DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
122 sel_xcp_id, ring->funcs->type,
123 ring->hw_prio, *num_gpu_sched);
124 }
125
aqua_vanjaram_xcp_sched_list_update(struct amdgpu_device * adev)126 static int aqua_vanjaram_xcp_sched_list_update(
127 struct amdgpu_device *adev)
128 {
129 struct amdgpu_ring *ring;
130 int i;
131
132 for (i = 0; i < MAX_XCP; i++) {
133 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
134 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
135 }
136
137 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
138 return 0;
139
140 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
141 ring = adev->rings[i];
142 if (!ring || !ring->sched.ready || ring->no_scheduler)
143 continue;
144
145 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
146
147 /* VCN may be shared by two partitions under CPX MODE in certain
148 * configs.
149 */
150 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
151 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
152 aqua_vanjaram_xcp_vcn_shared(adev))
153 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
154 }
155
156 return 0;
157 }
158
aqua_vanjaram_update_partition_sched_list(struct amdgpu_device * adev)159 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
160 {
161 int i;
162
163 for (i = 0; i < adev->num_rings; i++) {
164 struct amdgpu_ring *ring = adev->rings[i];
165
166 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
167 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
168 aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
169 else
170 aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
171 }
172
173 return aqua_vanjaram_xcp_sched_list_update(adev);
174 }
175
aqua_vanjaram_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)176 static int aqua_vanjaram_select_scheds(
177 struct amdgpu_device *adev,
178 u32 hw_ip,
179 u32 hw_prio,
180 struct amdgpu_fpriv *fpriv,
181 unsigned int *num_scheds,
182 struct drm_gpu_scheduler ***scheds)
183 {
184 u32 sel_xcp_id;
185 int i;
186
187 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
188 u32 least_ref_cnt = ~0;
189
190 fpriv->xcp_id = 0;
191 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
192 u32 total_ref_cnt;
193
194 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
195 if (total_ref_cnt < least_ref_cnt) {
196 fpriv->xcp_id = i;
197 least_ref_cnt = total_ref_cnt;
198 }
199 }
200 }
201 sel_xcp_id = fpriv->xcp_id;
202
203 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
204 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
205 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
206 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
207 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
208 } else {
209 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
210 return -ENOENT;
211 }
212
213 return 0;
214 }
215
aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,int8_t inst)216 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
217 enum amd_hw_ip_block_type block,
218 int8_t inst)
219 {
220 int8_t dev_inst;
221
222 switch (block) {
223 case GC_HWIP:
224 case SDMA0_HWIP:
225 /* Both JPEG and VCN as JPEG is only alias of VCN */
226 case VCN_HWIP:
227 dev_inst = adev->ip_map.dev_inst[block][inst];
228 break;
229 default:
230 /* For rest of the IPs, no look up required.
231 * Assume 'logical instance == physical instance' for all configs. */
232 dev_inst = inst;
233 break;
234 }
235
236 return dev_inst;
237 }
238
aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,uint32_t mask)239 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
240 enum amd_hw_ip_block_type block,
241 uint32_t mask)
242 {
243 uint32_t dev_mask = 0;
244 int8_t log_inst, dev_inst;
245
246 while (mask) {
247 log_inst = ffs(mask) - 1;
248 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
249 dev_mask |= (1 << dev_inst);
250 mask &= ~(1 << log_inst);
251 }
252
253 return dev_mask;
254 }
255
aqua_vanjaram_populate_ip_map(struct amdgpu_device * adev,enum amd_hw_ip_block_type ip_block,uint32_t inst_mask)256 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
257 enum amd_hw_ip_block_type ip_block,
258 uint32_t inst_mask)
259 {
260 int l = 0, i;
261
262 while (inst_mask) {
263 i = ffs(inst_mask) - 1;
264 adev->ip_map.dev_inst[ip_block][l++] = i;
265 inst_mask &= ~(1 << i);
266 }
267 for (; l < HWIP_MAX_INSTANCE; l++)
268 adev->ip_map.dev_inst[ip_block][l] = -1;
269 }
270
aqua_vanjaram_ip_map_init(struct amdgpu_device * adev)271 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
272 {
273 u32 ip_map[][2] = {
274 { GC_HWIP, adev->gfx.xcc_mask },
275 { SDMA0_HWIP, adev->sdma.sdma_mask },
276 { VCN_HWIP, adev->vcn.inst_mask },
277 };
278 int i;
279
280 for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
281 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
282
283 adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
284 adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
285 }
286
287 /* Fixed pattern for smn addressing on different AIDs:
288 * bit[34]: indicate cross AID access
289 * bit[33:32]: indicate target AID id
290 * AID id range is 0 ~ 3 as maximum AID number is 4.
291 */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)292 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
293 {
294 u64 ext_offset;
295
296 /* local routing and bit[34:32] will be zeros */
297 if (ext_id == 0)
298 return 0;
299
300 /* Initiated from host, accessing to all non-zero aids are cross traffic */
301 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
302
303 return ext_offset;
304 }
305
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)306 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
307 {
308 enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
309 struct amdgpu_device *adev = xcp_mgr->adev;
310
311 if (adev->nbio.funcs->get_compute_partition_mode)
312 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
313
314 return mode;
315 }
316
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)317 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
318 {
319 int num_xcc, num_xcc_per_xcp = 0;
320
321 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
322
323 switch (mode) {
324 case AMDGPU_SPX_PARTITION_MODE:
325 num_xcc_per_xcp = num_xcc;
326 break;
327 case AMDGPU_DPX_PARTITION_MODE:
328 num_xcc_per_xcp = num_xcc / 2;
329 break;
330 case AMDGPU_TPX_PARTITION_MODE:
331 num_xcc_per_xcp = num_xcc / 3;
332 break;
333 case AMDGPU_QPX_PARTITION_MODE:
334 num_xcc_per_xcp = num_xcc / 4;
335 break;
336 case AMDGPU_CPX_PARTITION_MODE:
337 num_xcc_per_xcp = 1;
338 break;
339 }
340
341 return num_xcc_per_xcp;
342 }
343
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)344 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
345 enum AMDGPU_XCP_IP_BLOCK ip_id,
346 struct amdgpu_xcp_ip *ip)
347 {
348 struct amdgpu_device *adev = xcp_mgr->adev;
349 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
350 int num_sdma, num_vcn;
351
352 num_sdma = adev->sdma.num_instances;
353 num_vcn = adev->vcn.num_vcn_inst;
354
355 switch (xcp_mgr->mode) {
356 case AMDGPU_SPX_PARTITION_MODE:
357 num_sdma_xcp = num_sdma;
358 num_vcn_xcp = num_vcn;
359 break;
360 case AMDGPU_DPX_PARTITION_MODE:
361 num_sdma_xcp = num_sdma / 2;
362 num_vcn_xcp = num_vcn / 2;
363 break;
364 case AMDGPU_TPX_PARTITION_MODE:
365 num_sdma_xcp = num_sdma / 3;
366 num_vcn_xcp = num_vcn / 3;
367 break;
368 case AMDGPU_QPX_PARTITION_MODE:
369 num_sdma_xcp = num_sdma / 4;
370 num_vcn_xcp = num_vcn / 4;
371 break;
372 case AMDGPU_CPX_PARTITION_MODE:
373 num_sdma_xcp = 2;
374 num_vcn_xcp = num_vcn ? 1 : 0;
375 break;
376 default:
377 return -EINVAL;
378 }
379
380 num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
381
382 switch (ip_id) {
383 case AMDGPU_XCP_GFXHUB:
384 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
385 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
386 break;
387 case AMDGPU_XCP_GFX:
388 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
389 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
390 break;
391 case AMDGPU_XCP_SDMA:
392 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
393 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
394 break;
395 case AMDGPU_XCP_VCN:
396 ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
397 /* TODO : Assign IP funcs */
398 break;
399 default:
400 return -EINVAL;
401 }
402
403 ip->ip_id = ip_id;
404
405 return 0;
406 }
407
408 static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)409 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
410 {
411 struct amdgpu_device *adev = xcp_mgr->adev;
412 int num_xcc;
413
414 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
415
416 if (adev->gmc.num_mem_partitions == 1)
417 return AMDGPU_SPX_PARTITION_MODE;
418
419 if (adev->gmc.num_mem_partitions == num_xcc)
420 return AMDGPU_CPX_PARTITION_MODE;
421
422 if (adev->gmc.num_mem_partitions == num_xcc / 2)
423 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
424 AMDGPU_QPX_PARTITION_MODE;
425
426 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
427 return AMDGPU_DPX_PARTITION_MODE;
428
429 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
430 }
431
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)432 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
433 enum amdgpu_gfx_partition mode)
434 {
435 struct amdgpu_device *adev = xcp_mgr->adev;
436 int num_xcc, num_xccs_per_xcp;
437
438 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
439 switch (mode) {
440 case AMDGPU_SPX_PARTITION_MODE:
441 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
442 case AMDGPU_DPX_PARTITION_MODE:
443 return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
444 case AMDGPU_TPX_PARTITION_MODE:
445 return (adev->gmc.num_mem_partitions == 1 ||
446 adev->gmc.num_mem_partitions == 3) &&
447 ((num_xcc % 3) == 0);
448 case AMDGPU_QPX_PARTITION_MODE:
449 num_xccs_per_xcp = num_xcc / 4;
450 return (adev->gmc.num_mem_partitions == 1 ||
451 adev->gmc.num_mem_partitions == 4) &&
452 (num_xccs_per_xcp >= 2);
453 case AMDGPU_CPX_PARTITION_MODE:
454 return ((num_xcc > 1) &&
455 (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
456 (num_xcc % adev->gmc.num_mem_partitions) == 0);
457 default:
458 return false;
459 }
460
461 return false;
462 }
463
__aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)464 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
465 {
466 /* TODO:
467 * Stop user queues and threads, and make sure GPU is empty of work.
468 */
469
470 if (flags & AMDGPU_XCP_OPS_KFD)
471 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
472
473 return 0;
474 }
475
__aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)476 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
477 {
478 int ret = 0;
479
480 if (flags & AMDGPU_XCP_OPS_KFD) {
481 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
482 amdgpu_amdkfd_device_init(xcp_mgr->adev);
483 /* If KFD init failed, return failure */
484 if (!xcp_mgr->adev->kfd.init_complete)
485 ret = -EIO;
486 }
487
488 return ret;
489 }
490
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)491 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
492 int mode, int *num_xcps)
493 {
494 int num_xcc_per_xcp, num_xcc, ret;
495 struct amdgpu_device *adev;
496 u32 flags = 0;
497
498 adev = xcp_mgr->adev;
499 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
500
501 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
502 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
503 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
504 dev_err(adev->dev,
505 "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
506 adev->gmc.num_mem_partitions);
507 return -EINVAL;
508 }
509 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
510 dev_err(adev->dev,
511 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
512 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
513 return -EINVAL;
514 }
515
516 if (adev->kfd.init_complete)
517 flags |= AMDGPU_XCP_OPS_KFD;
518
519 if (flags & AMDGPU_XCP_OPS_KFD) {
520 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
521 if (ret)
522 goto out;
523 }
524
525 ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
526 if (ret)
527 goto unlock;
528
529 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
530 if (adev->gfx.funcs->switch_partition_mode)
531 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
532 num_xcc_per_xcp);
533
534 /* Init info about new xcps */
535 *num_xcps = num_xcc / num_xcc_per_xcp;
536 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
537
538 ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
539 unlock:
540 if (flags & AMDGPU_XCP_OPS_KFD)
541 amdgpu_amdkfd_unlock_kfd(adev);
542 out:
543 return ret;
544 }
545
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)546 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
547 int xcc_id, uint8_t *mem_id)
548 {
549 /* memory/spatial modes validation check is already done */
550 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
551 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
552
553 return 0;
554 }
555
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)556 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
557 struct amdgpu_xcp *xcp, uint8_t *mem_id)
558 {
559 struct amdgpu_numa_info numa_info;
560 struct amdgpu_device *adev;
561 uint32_t xcc_mask;
562 int r, i, xcc_id;
563
564 adev = xcp_mgr->adev;
565 /* TODO: BIOS is not returning the right info now
566 * Check on this later
567 */
568 /*
569 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
570 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
571 */
572 if (adev->gmc.num_mem_partitions == 1) {
573 /* Only one range */
574 *mem_id = 0;
575 return 0;
576 }
577
578 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
579 if (r || !xcc_mask)
580 return -EINVAL;
581
582 xcc_id = ffs(xcc_mask) - 1;
583 if (!adev->gmc.is_app_apu)
584 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
585
586 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
587
588 if (r)
589 return r;
590
591 r = -EINVAL;
592 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
593 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
594 *mem_id = i;
595 r = 0;
596 break;
597 }
598 }
599
600 return r;
601 }
602
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)603 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
604 enum AMDGPU_XCP_IP_BLOCK ip_id,
605 struct amdgpu_xcp_ip *ip)
606 {
607 if (!ip)
608 return -EINVAL;
609
610 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
611 }
612
613 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
614 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
615 .query_partition_mode = &aqua_vanjaram_query_partition_mode,
616 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
617 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
618 .select_scheds = &aqua_vanjaram_select_scheds,
619 .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
620 };
621
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)622 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
623 {
624 int ret;
625
626 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
627 &aqua_vanjaram_xcp_funcs);
628 if (ret)
629 return ret;
630
631 /* TODO: Default memory node affinity init */
632
633 return ret;
634 }
635
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)636 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
637 {
638 u32 mask, inst_mask = adev->sdma.sdma_mask;
639 int ret, i;
640
641 /* generally 1 AID supports 4 instances */
642 adev->sdma.num_inst_per_aid = 4;
643 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
644
645 adev->aid_mask = i = 1;
646 inst_mask >>= adev->sdma.num_inst_per_aid;
647
648 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
649 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
650 if ((inst_mask & mask) == mask)
651 adev->aid_mask |= (1 << i);
652 }
653
654 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
655 * addressed based on logical instance ids.
656 */
657 adev->vcn.harvest_config = 0;
658 adev->vcn.num_inst_per_aid = 1;
659 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
660 adev->jpeg.harvest_config = 0;
661 adev->jpeg.num_inst_per_aid = 1;
662 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
663
664 ret = aqua_vanjaram_xcp_mgr_init(adev);
665 if (ret)
666 return ret;
667
668 aqua_vanjaram_ip_map_init(adev);
669
670 return 0;
671 }
672