xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_crat.c (revision a790cc3a)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2015-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/acpi.h>
26 #include "kfd_crat.h"
27 #include "kfd_priv.h"
28 #include "kfd_topology.h"
29 #include "kfd_iommu.h"
30 #include "amdgpu.h"
31 #include "amdgpu_amdkfd.h"
32 
33 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
34  * GPU processor ID are expressed with Bit[31]=1.
35  * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
36  * used in the CRAT.
37  */
38 static uint32_t gpu_processor_id_low = 0x80001000;
39 
40 /* Return the next available gpu_processor_id and increment it for next GPU
41  *	@total_cu_count - Total CUs present in the GPU including ones
42  *			  masked off
43  */
44 static inline unsigned int get_and_inc_gpu_processor_id(
45 				unsigned int total_cu_count)
46 {
47 	int current_id = gpu_processor_id_low;
48 
49 	gpu_processor_id_low += total_cu_count;
50 	return current_id;
51 }
52 
53 /* Static table to describe GPU Cache information */
54 struct kfd_gpu_cache_info {
55 	uint32_t	cache_size;
56 	uint32_t	cache_level;
57 	uint32_t	flags;
58 	/* Indicates how many Compute Units share this cache
59 	 * within a SA. Value = 1 indicates the cache is not shared
60 	 */
61 	uint32_t	num_cu_shared;
62 };
63 
64 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
65 	{
66 		/* TCP L1 Cache per CU */
67 		.cache_size = 16,
68 		.cache_level = 1,
69 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
70 				CRAT_CACHE_FLAGS_DATA_CACHE |
71 				CRAT_CACHE_FLAGS_SIMD_CACHE),
72 		.num_cu_shared = 1,
73 	},
74 	{
75 		/* Scalar L1 Instruction Cache (in SQC module) per bank */
76 		.cache_size = 16,
77 		.cache_level = 1,
78 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
79 				CRAT_CACHE_FLAGS_INST_CACHE |
80 				CRAT_CACHE_FLAGS_SIMD_CACHE),
81 		.num_cu_shared = 2,
82 	},
83 	{
84 		/* Scalar L1 Data Cache (in SQC module) per bank */
85 		.cache_size = 8,
86 		.cache_level = 1,
87 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
88 				CRAT_CACHE_FLAGS_DATA_CACHE |
89 				CRAT_CACHE_FLAGS_SIMD_CACHE),
90 		.num_cu_shared = 2,
91 	},
92 
93 	/* TODO: Add L2 Cache information */
94 };
95 
96 
97 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
98 	{
99 		/* TCP L1 Cache per CU */
100 		.cache_size = 16,
101 		.cache_level = 1,
102 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
103 				CRAT_CACHE_FLAGS_DATA_CACHE |
104 				CRAT_CACHE_FLAGS_SIMD_CACHE),
105 		.num_cu_shared = 1,
106 	},
107 	{
108 		/* Scalar L1 Instruction Cache (in SQC module) per bank */
109 		.cache_size = 8,
110 		.cache_level = 1,
111 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
112 				CRAT_CACHE_FLAGS_INST_CACHE |
113 				CRAT_CACHE_FLAGS_SIMD_CACHE),
114 		.num_cu_shared = 4,
115 	},
116 	{
117 		/* Scalar L1 Data Cache (in SQC module) per bank. */
118 		.cache_size = 4,
119 		.cache_level = 1,
120 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
121 				CRAT_CACHE_FLAGS_DATA_CACHE |
122 				CRAT_CACHE_FLAGS_SIMD_CACHE),
123 		.num_cu_shared = 4,
124 	},
125 
126 	/* TODO: Add L2 Cache information */
127 };
128 
129 #define hawaii_cache_info kaveri_cache_info
130 #define tonga_cache_info carrizo_cache_info
131 #define fiji_cache_info  carrizo_cache_info
132 #define polaris10_cache_info carrizo_cache_info
133 #define polaris11_cache_info carrizo_cache_info
134 #define polaris12_cache_info carrizo_cache_info
135 #define vegam_cache_info carrizo_cache_info
136 
137 /* NOTE: L1 cache information has been updated and L2/L3
138  * cache information has been added for Vega10 and
139  * newer ASICs. The unit for cache_size is KiB.
140  * In future,  check & update cache details
141  * for every new ASIC is required.
142  */
143 
144 static struct kfd_gpu_cache_info vega10_cache_info[] = {
145 	{
146 		/* TCP L1 Cache per CU */
147 		.cache_size = 16,
148 		.cache_level = 1,
149 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
150 				CRAT_CACHE_FLAGS_DATA_CACHE |
151 				CRAT_CACHE_FLAGS_SIMD_CACHE),
152 		.num_cu_shared = 1,
153 	},
154 	{
155 		/* Scalar L1 Instruction Cache per SQC */
156 		.cache_size = 32,
157 		.cache_level = 1,
158 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
159 				CRAT_CACHE_FLAGS_INST_CACHE |
160 				CRAT_CACHE_FLAGS_SIMD_CACHE),
161 		.num_cu_shared = 3,
162 	},
163 	{
164 		/* Scalar L1 Data Cache per SQC */
165 		.cache_size = 16,
166 		.cache_level = 1,
167 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
168 				CRAT_CACHE_FLAGS_DATA_CACHE |
169 				CRAT_CACHE_FLAGS_SIMD_CACHE),
170 		.num_cu_shared = 3,
171 	},
172 	{
173 		/* L2 Data Cache per GPU (Total Tex Cache) */
174 		.cache_size = 4096,
175 		.cache_level = 2,
176 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
177 				CRAT_CACHE_FLAGS_DATA_CACHE |
178 				CRAT_CACHE_FLAGS_SIMD_CACHE),
179 		.num_cu_shared = 16,
180 	},
181 };
182 
183 static struct kfd_gpu_cache_info raven_cache_info[] = {
184 	{
185 		/* TCP L1 Cache per CU */
186 		.cache_size = 16,
187 		.cache_level = 1,
188 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
189 				CRAT_CACHE_FLAGS_DATA_CACHE |
190 				CRAT_CACHE_FLAGS_SIMD_CACHE),
191 		.num_cu_shared = 1,
192 	},
193 	{
194 		/* Scalar L1 Instruction Cache per SQC */
195 		.cache_size = 32,
196 		.cache_level = 1,
197 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
198 				CRAT_CACHE_FLAGS_INST_CACHE |
199 				CRAT_CACHE_FLAGS_SIMD_CACHE),
200 		.num_cu_shared = 3,
201 	},
202 	{
203 		/* Scalar L1 Data Cache per SQC */
204 		.cache_size = 16,
205 		.cache_level = 1,
206 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
207 				CRAT_CACHE_FLAGS_DATA_CACHE |
208 				CRAT_CACHE_FLAGS_SIMD_CACHE),
209 		.num_cu_shared = 3,
210 	},
211 	{
212 		/* L2 Data Cache per GPU (Total Tex Cache) */
213 		.cache_size = 1024,
214 		.cache_level = 2,
215 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
216 				CRAT_CACHE_FLAGS_DATA_CACHE |
217 				CRAT_CACHE_FLAGS_SIMD_CACHE),
218 		.num_cu_shared = 11,
219 	},
220 };
221 
222 static struct kfd_gpu_cache_info renoir_cache_info[] = {
223 	{
224 		/* TCP L1 Cache per CU */
225 		.cache_size = 16,
226 		.cache_level = 1,
227 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
228 				CRAT_CACHE_FLAGS_DATA_CACHE |
229 				CRAT_CACHE_FLAGS_SIMD_CACHE),
230 		.num_cu_shared = 1,
231 	},
232 	{
233 		/* Scalar L1 Instruction Cache per SQC */
234 		.cache_size = 32,
235 		.cache_level = 1,
236 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
237 				CRAT_CACHE_FLAGS_INST_CACHE |
238 				CRAT_CACHE_FLAGS_SIMD_CACHE),
239 		.num_cu_shared = 3,
240 	},
241 	{
242 		/* Scalar L1 Data Cache per SQC */
243 		.cache_size = 16,
244 		.cache_level = 1,
245 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
246 				CRAT_CACHE_FLAGS_DATA_CACHE |
247 				CRAT_CACHE_FLAGS_SIMD_CACHE),
248 		.num_cu_shared = 3,
249 	},
250 	{
251 		/* L2 Data Cache per GPU (Total Tex Cache) */
252 		.cache_size = 1024,
253 		.cache_level = 2,
254 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
255 				CRAT_CACHE_FLAGS_DATA_CACHE |
256 				CRAT_CACHE_FLAGS_SIMD_CACHE),
257 		.num_cu_shared = 8,
258 	},
259 };
260 
261 static struct kfd_gpu_cache_info vega12_cache_info[] = {
262 	{
263 		/* TCP L1 Cache per CU */
264 		.cache_size = 16,
265 		.cache_level = 1,
266 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
267 				CRAT_CACHE_FLAGS_DATA_CACHE |
268 				CRAT_CACHE_FLAGS_SIMD_CACHE),
269 		.num_cu_shared = 1,
270 	},
271 	{
272 		/* Scalar L1 Instruction Cache per SQC */
273 		.cache_size = 32,
274 		.cache_level = 1,
275 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
276 				CRAT_CACHE_FLAGS_INST_CACHE |
277 				CRAT_CACHE_FLAGS_SIMD_CACHE),
278 		.num_cu_shared = 3,
279 	},
280 	{
281 		/* Scalar L1 Data Cache per SQC */
282 		.cache_size = 16,
283 		.cache_level = 1,
284 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
285 				CRAT_CACHE_FLAGS_DATA_CACHE |
286 				CRAT_CACHE_FLAGS_SIMD_CACHE),
287 		.num_cu_shared = 3,
288 	},
289 	{
290 		/* L2 Data Cache per GPU (Total Tex Cache) */
291 		.cache_size = 2048,
292 		.cache_level = 2,
293 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
294 				CRAT_CACHE_FLAGS_DATA_CACHE |
295 				CRAT_CACHE_FLAGS_SIMD_CACHE),
296 		.num_cu_shared = 5,
297 	},
298 };
299 
300 static struct kfd_gpu_cache_info vega20_cache_info[] = {
301 	{
302 		/* TCP L1 Cache per CU */
303 		.cache_size = 16,
304 		.cache_level = 1,
305 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
306 				CRAT_CACHE_FLAGS_DATA_CACHE |
307 				CRAT_CACHE_FLAGS_SIMD_CACHE),
308 		.num_cu_shared = 1,
309 	},
310 	{
311 		/* Scalar L1 Instruction Cache per SQC */
312 		.cache_size = 32,
313 		.cache_level = 1,
314 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
315 				CRAT_CACHE_FLAGS_INST_CACHE |
316 				CRAT_CACHE_FLAGS_SIMD_CACHE),
317 		.num_cu_shared = 3,
318 	},
319 	{
320 		/* Scalar L1 Data Cache per SQC */
321 		.cache_size = 16,
322 		.cache_level = 1,
323 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
324 				CRAT_CACHE_FLAGS_DATA_CACHE |
325 				CRAT_CACHE_FLAGS_SIMD_CACHE),
326 		.num_cu_shared = 3,
327 	},
328 	{
329 		/* L2 Data Cache per GPU (Total Tex Cache) */
330 		.cache_size = 8192,
331 		.cache_level = 2,
332 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
333 				CRAT_CACHE_FLAGS_DATA_CACHE |
334 				CRAT_CACHE_FLAGS_SIMD_CACHE),
335 		.num_cu_shared = 16,
336 	},
337 };
338 
339 static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
340 	{
341 		/* TCP L1 Cache per CU */
342 		.cache_size = 16,
343 		.cache_level = 1,
344 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
345 				CRAT_CACHE_FLAGS_DATA_CACHE |
346 				CRAT_CACHE_FLAGS_SIMD_CACHE),
347 		.num_cu_shared = 1,
348 	},
349 	{
350 		/* Scalar L1 Instruction Cache per SQC */
351 		.cache_size = 32,
352 		.cache_level = 1,
353 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
354 				CRAT_CACHE_FLAGS_INST_CACHE |
355 				CRAT_CACHE_FLAGS_SIMD_CACHE),
356 		.num_cu_shared = 2,
357 	},
358 	{
359 		/* Scalar L1 Data Cache per SQC */
360 		.cache_size = 16,
361 		.cache_level = 1,
362 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
363 				CRAT_CACHE_FLAGS_DATA_CACHE |
364 				CRAT_CACHE_FLAGS_SIMD_CACHE),
365 		.num_cu_shared = 2,
366 	},
367 	{
368 		/* L2 Data Cache per GPU (Total Tex Cache) */
369 		.cache_size = 8192,
370 		.cache_level = 2,
371 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
372 				CRAT_CACHE_FLAGS_DATA_CACHE |
373 				CRAT_CACHE_FLAGS_SIMD_CACHE),
374 		.num_cu_shared = 14,
375 	},
376 };
377 
378 static struct kfd_gpu_cache_info navi10_cache_info[] = {
379 	{
380 		/* TCP L1 Cache per CU */
381 		.cache_size = 16,
382 		.cache_level = 1,
383 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
384 				CRAT_CACHE_FLAGS_DATA_CACHE |
385 				CRAT_CACHE_FLAGS_SIMD_CACHE),
386 		.num_cu_shared = 1,
387 	},
388 	{
389 		/* Scalar L1 Instruction Cache per SQC */
390 		.cache_size = 32,
391 		.cache_level = 1,
392 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
393 				CRAT_CACHE_FLAGS_INST_CACHE |
394 				CRAT_CACHE_FLAGS_SIMD_CACHE),
395 		.num_cu_shared = 2,
396 	},
397 	{
398 		/* Scalar L1 Data Cache per SQC */
399 		.cache_size = 16,
400 		.cache_level = 1,
401 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
402 				CRAT_CACHE_FLAGS_DATA_CACHE |
403 				CRAT_CACHE_FLAGS_SIMD_CACHE),
404 		.num_cu_shared = 2,
405 	},
406 	{
407 		/* GL1 Data Cache per SA */
408 		.cache_size = 128,
409 		.cache_level = 1,
410 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
411 				CRAT_CACHE_FLAGS_DATA_CACHE |
412 				CRAT_CACHE_FLAGS_SIMD_CACHE),
413 		.num_cu_shared = 10,
414 	},
415 	{
416 		/* L2 Data Cache per GPU (Total Tex Cache) */
417 		.cache_size = 4096,
418 		.cache_level = 2,
419 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
420 				CRAT_CACHE_FLAGS_DATA_CACHE |
421 				CRAT_CACHE_FLAGS_SIMD_CACHE),
422 		.num_cu_shared = 10,
423 	},
424 };
425 
426 static struct kfd_gpu_cache_info vangogh_cache_info[] = {
427 	{
428 		/* TCP L1 Cache per CU */
429 		.cache_size = 16,
430 		.cache_level = 1,
431 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
432 				CRAT_CACHE_FLAGS_DATA_CACHE |
433 				CRAT_CACHE_FLAGS_SIMD_CACHE),
434 		.num_cu_shared = 1,
435 	},
436 	{
437 		/* Scalar L1 Instruction Cache per SQC */
438 		.cache_size = 32,
439 		.cache_level = 1,
440 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
441 				CRAT_CACHE_FLAGS_INST_CACHE |
442 				CRAT_CACHE_FLAGS_SIMD_CACHE),
443 		.num_cu_shared = 2,
444 	},
445 	{
446 		/* Scalar L1 Data Cache per SQC */
447 		.cache_size = 16,
448 		.cache_level = 1,
449 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
450 				CRAT_CACHE_FLAGS_DATA_CACHE |
451 				CRAT_CACHE_FLAGS_SIMD_CACHE),
452 		.num_cu_shared = 2,
453 	},
454 	{
455 		/* GL1 Data Cache per SA */
456 		.cache_size = 128,
457 		.cache_level = 1,
458 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
459 				CRAT_CACHE_FLAGS_DATA_CACHE |
460 				CRAT_CACHE_FLAGS_SIMD_CACHE),
461 		.num_cu_shared = 8,
462 	},
463 	{
464 		/* L2 Data Cache per GPU (Total Tex Cache) */
465 		.cache_size = 1024,
466 		.cache_level = 2,
467 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
468 				CRAT_CACHE_FLAGS_DATA_CACHE |
469 				CRAT_CACHE_FLAGS_SIMD_CACHE),
470 		.num_cu_shared = 8,
471 	},
472 };
473 
474 static struct kfd_gpu_cache_info navi14_cache_info[] = {
475 	{
476 		/* TCP L1 Cache per CU */
477 		.cache_size = 16,
478 		.cache_level = 1,
479 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
480 				CRAT_CACHE_FLAGS_DATA_CACHE |
481 				CRAT_CACHE_FLAGS_SIMD_CACHE),
482 		.num_cu_shared = 1,
483 	},
484 	{
485 		/* Scalar L1 Instruction Cache per SQC */
486 		.cache_size = 32,
487 		.cache_level = 1,
488 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
489 				CRAT_CACHE_FLAGS_INST_CACHE |
490 				CRAT_CACHE_FLAGS_SIMD_CACHE),
491 		.num_cu_shared = 2,
492 	},
493 	{
494 		/* Scalar L1 Data Cache per SQC */
495 		.cache_size = 16,
496 		.cache_level = 1,
497 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
498 				CRAT_CACHE_FLAGS_DATA_CACHE |
499 				CRAT_CACHE_FLAGS_SIMD_CACHE),
500 		.num_cu_shared = 2,
501 	},
502 	{
503 		/* GL1 Data Cache per SA */
504 		.cache_size = 128,
505 		.cache_level = 1,
506 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
507 				CRAT_CACHE_FLAGS_DATA_CACHE |
508 				CRAT_CACHE_FLAGS_SIMD_CACHE),
509 		.num_cu_shared = 12,
510 	},
511 	{
512 		/* L2 Data Cache per GPU (Total Tex Cache) */
513 		.cache_size = 2048,
514 		.cache_level = 2,
515 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
516 				CRAT_CACHE_FLAGS_DATA_CACHE |
517 				CRAT_CACHE_FLAGS_SIMD_CACHE),
518 		.num_cu_shared = 12,
519 	},
520 };
521 
522 static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
523 	{
524 		/* TCP L1 Cache per CU */
525 		.cache_size = 16,
526 		.cache_level = 1,
527 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
528 				CRAT_CACHE_FLAGS_DATA_CACHE |
529 				CRAT_CACHE_FLAGS_SIMD_CACHE),
530 		.num_cu_shared = 1,
531 	},
532 	{
533 		/* Scalar L1 Instruction Cache per SQC */
534 		.cache_size = 32,
535 		.cache_level = 1,
536 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
537 				CRAT_CACHE_FLAGS_INST_CACHE |
538 				CRAT_CACHE_FLAGS_SIMD_CACHE),
539 		.num_cu_shared = 2,
540 	},
541 	{
542 		/* Scalar L1 Data Cache per SQC */
543 		.cache_size = 16,
544 		.cache_level = 1,
545 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
546 				CRAT_CACHE_FLAGS_DATA_CACHE |
547 				CRAT_CACHE_FLAGS_SIMD_CACHE),
548 		.num_cu_shared = 2,
549 	},
550 	{
551 		/* GL1 Data Cache per SA */
552 		.cache_size = 128,
553 		.cache_level = 1,
554 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
555 				CRAT_CACHE_FLAGS_DATA_CACHE |
556 				CRAT_CACHE_FLAGS_SIMD_CACHE),
557 		.num_cu_shared = 10,
558 	},
559 	{
560 		/* L2 Data Cache per GPU (Total Tex Cache) */
561 		.cache_size = 4096,
562 		.cache_level = 2,
563 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
564 				CRAT_CACHE_FLAGS_DATA_CACHE |
565 				CRAT_CACHE_FLAGS_SIMD_CACHE),
566 		.num_cu_shared = 10,
567 	},
568 	{
569 		/* L3 Data Cache per GPU */
570 		.cache_size = 128*1024,
571 		.cache_level = 3,
572 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
573 				CRAT_CACHE_FLAGS_DATA_CACHE |
574 				CRAT_CACHE_FLAGS_SIMD_CACHE),
575 		.num_cu_shared = 10,
576 	},
577 };
578 
579 static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
580 	{
581 		/* TCP L1 Cache per CU */
582 		.cache_size = 16,
583 		.cache_level = 1,
584 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
585 				CRAT_CACHE_FLAGS_DATA_CACHE |
586 				CRAT_CACHE_FLAGS_SIMD_CACHE),
587 		.num_cu_shared = 1,
588 	},
589 	{
590 		/* Scalar L1 Instruction Cache per SQC */
591 		.cache_size = 32,
592 		.cache_level = 1,
593 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
594 				CRAT_CACHE_FLAGS_INST_CACHE |
595 				CRAT_CACHE_FLAGS_SIMD_CACHE),
596 		.num_cu_shared = 2,
597 	},
598 	{
599 		/* Scalar L1 Data Cache per SQC */
600 		.cache_size = 16,
601 		.cache_level = 1,
602 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
603 				CRAT_CACHE_FLAGS_DATA_CACHE |
604 				CRAT_CACHE_FLAGS_SIMD_CACHE),
605 		.num_cu_shared = 2,
606 	},
607 	{
608 		/* GL1 Data Cache per SA */
609 		.cache_size = 128,
610 		.cache_level = 1,
611 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
612 				CRAT_CACHE_FLAGS_DATA_CACHE |
613 				CRAT_CACHE_FLAGS_SIMD_CACHE),
614 		.num_cu_shared = 10,
615 	},
616 	{
617 		/* L2 Data Cache per GPU (Total Tex Cache) */
618 		.cache_size = 3072,
619 		.cache_level = 2,
620 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
621 				CRAT_CACHE_FLAGS_DATA_CACHE |
622 				CRAT_CACHE_FLAGS_SIMD_CACHE),
623 		.num_cu_shared = 10,
624 	},
625 	{
626 		/* L3 Data Cache per GPU */
627 		.cache_size = 96*1024,
628 		.cache_level = 3,
629 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
630 				CRAT_CACHE_FLAGS_DATA_CACHE |
631 				CRAT_CACHE_FLAGS_SIMD_CACHE),
632 		.num_cu_shared = 10,
633 	},
634 };
635 
636 static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
637 	{
638 		/* TCP L1 Cache per CU */
639 		.cache_size = 16,
640 		.cache_level = 1,
641 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
642 				CRAT_CACHE_FLAGS_DATA_CACHE |
643 				CRAT_CACHE_FLAGS_SIMD_CACHE),
644 		.num_cu_shared = 1,
645 	},
646 	{
647 		/* Scalar L1 Instruction Cache per SQC */
648 		.cache_size = 32,
649 		.cache_level = 1,
650 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
651 				CRAT_CACHE_FLAGS_INST_CACHE |
652 				CRAT_CACHE_FLAGS_SIMD_CACHE),
653 		.num_cu_shared = 2,
654 	},
655 	{
656 		/* Scalar L1 Data Cache per SQC */
657 		.cache_size = 16,
658 		.cache_level = 1,
659 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
660 				CRAT_CACHE_FLAGS_DATA_CACHE |
661 				CRAT_CACHE_FLAGS_SIMD_CACHE),
662 		.num_cu_shared = 2,
663 	},
664 	{
665 		/* GL1 Data Cache per SA */
666 		.cache_size = 128,
667 		.cache_level = 1,
668 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
669 				CRAT_CACHE_FLAGS_DATA_CACHE |
670 				CRAT_CACHE_FLAGS_SIMD_CACHE),
671 		.num_cu_shared = 8,
672 	},
673 	{
674 		/* L2 Data Cache per GPU (Total Tex Cache) */
675 		.cache_size = 2048,
676 		.cache_level = 2,
677 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
678 				CRAT_CACHE_FLAGS_DATA_CACHE |
679 				CRAT_CACHE_FLAGS_SIMD_CACHE),
680 		.num_cu_shared = 8,
681 	},
682 	{
683 		/* L3 Data Cache per GPU */
684 		.cache_size = 32*1024,
685 		.cache_level = 3,
686 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
687 				CRAT_CACHE_FLAGS_DATA_CACHE |
688 				CRAT_CACHE_FLAGS_SIMD_CACHE),
689 		.num_cu_shared = 8,
690 	},
691 };
692 
693 static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
694 	{
695 		/* TCP L1 Cache per CU */
696 		.cache_size = 16,
697 		.cache_level = 1,
698 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
699 				CRAT_CACHE_FLAGS_DATA_CACHE |
700 				CRAT_CACHE_FLAGS_SIMD_CACHE),
701 		.num_cu_shared = 1,
702 	},
703 	{
704 		/* Scalar L1 Instruction Cache per SQC */
705 		.cache_size = 32,
706 		.cache_level = 1,
707 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
708 				CRAT_CACHE_FLAGS_INST_CACHE |
709 				CRAT_CACHE_FLAGS_SIMD_CACHE),
710 		.num_cu_shared = 2,
711 	},
712 	{
713 		/* Scalar L1 Data Cache per SQC */
714 		.cache_size = 16,
715 		.cache_level = 1,
716 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
717 				CRAT_CACHE_FLAGS_DATA_CACHE |
718 				CRAT_CACHE_FLAGS_SIMD_CACHE),
719 		.num_cu_shared = 2,
720 	},
721 	{
722 		/* GL1 Data Cache per SA */
723 		.cache_size = 128,
724 		.cache_level = 1,
725 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
726 				CRAT_CACHE_FLAGS_DATA_CACHE |
727 				CRAT_CACHE_FLAGS_SIMD_CACHE),
728 		.num_cu_shared = 8,
729 	},
730 	{
731 		/* L2 Data Cache per GPU (Total Tex Cache) */
732 		.cache_size = 1024,
733 		.cache_level = 2,
734 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
735 				CRAT_CACHE_FLAGS_DATA_CACHE |
736 				CRAT_CACHE_FLAGS_SIMD_CACHE),
737 		.num_cu_shared = 8,
738 	},
739 	{
740 		/* L3 Data Cache per GPU */
741 		.cache_size = 16*1024,
742 		.cache_level = 3,
743 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
744 				CRAT_CACHE_FLAGS_DATA_CACHE |
745 				CRAT_CACHE_FLAGS_SIMD_CACHE),
746 		.num_cu_shared = 8,
747 	},
748 };
749 
750 static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
751 	{
752 		/* TCP L1 Cache per CU */
753 		.cache_size = 16,
754 		.cache_level = 1,
755 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
756 				CRAT_CACHE_FLAGS_DATA_CACHE |
757 				CRAT_CACHE_FLAGS_SIMD_CACHE),
758 		.num_cu_shared = 1,
759 	},
760 	{
761 		/* Scalar L1 Instruction Cache per SQC */
762 		.cache_size = 32,
763 		.cache_level = 1,
764 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
765 				CRAT_CACHE_FLAGS_INST_CACHE |
766 				CRAT_CACHE_FLAGS_SIMD_CACHE),
767 		.num_cu_shared = 2,
768 	},
769 	{
770 		/* Scalar L1 Data Cache per SQC */
771 		.cache_size = 16,
772 		.cache_level = 1,
773 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
774 				CRAT_CACHE_FLAGS_DATA_CACHE |
775 				CRAT_CACHE_FLAGS_SIMD_CACHE),
776 		.num_cu_shared = 2,
777 	},
778 	{
779 		/* GL1 Data Cache per SA */
780 		.cache_size = 128,
781 		.cache_level = 1,
782 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
783 				CRAT_CACHE_FLAGS_DATA_CACHE |
784 				CRAT_CACHE_FLAGS_SIMD_CACHE),
785 		.num_cu_shared = 6,
786 	},
787 	{
788 		/* L2 Data Cache per GPU (Total Tex Cache) */
789 		.cache_size = 2048,
790 		.cache_level = 2,
791 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
792 				CRAT_CACHE_FLAGS_DATA_CACHE |
793 				CRAT_CACHE_FLAGS_SIMD_CACHE),
794 		.num_cu_shared = 6,
795 	},
796 };
797 
798 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
799 		struct crat_subtype_computeunit *cu)
800 {
801 	dev->node_props.cpu_cores_count = cu->num_cpu_cores;
802 	dev->node_props.cpu_core_id_base = cu->processor_id_low;
803 	if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
804 		dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
805 
806 	pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
807 			cu->processor_id_low);
808 }
809 
810 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
811 		struct crat_subtype_computeunit *cu)
812 {
813 	dev->node_props.simd_id_base = cu->processor_id_low;
814 	dev->node_props.simd_count = cu->num_simd_cores;
815 	dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
816 	dev->node_props.max_waves_per_simd = cu->max_waves_simd;
817 	dev->node_props.wave_front_size = cu->wave_front_size;
818 	dev->node_props.array_count = cu->array_count;
819 	dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
820 	dev->node_props.simd_per_cu = cu->num_simd_per_cu;
821 	dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
822 	if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
823 		dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
824 	pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
825 }
826 
827 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
828  * topology device present in the device_list
829  */
830 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
831 				struct list_head *device_list)
832 {
833 	struct kfd_topology_device *dev;
834 
835 	pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
836 			cu->proximity_domain, cu->hsa_capability);
837 	list_for_each_entry(dev, device_list, list) {
838 		if (cu->proximity_domain == dev->proximity_domain) {
839 			if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
840 				kfd_populated_cu_info_cpu(dev, cu);
841 
842 			if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
843 				kfd_populated_cu_info_gpu(dev, cu);
844 			break;
845 		}
846 	}
847 
848 	return 0;
849 }
850 
851 static struct kfd_mem_properties *
852 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
853 		struct kfd_topology_device *dev)
854 {
855 	struct kfd_mem_properties *props;
856 
857 	list_for_each_entry(props, &dev->mem_props, list) {
858 		if (props->heap_type == heap_type
859 				&& props->flags == flags
860 				&& props->width == width)
861 			return props;
862 	}
863 
864 	return NULL;
865 }
866 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
867  * topology device present in the device_list
868  */
869 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
870 				struct list_head *device_list)
871 {
872 	struct kfd_mem_properties *props;
873 	struct kfd_topology_device *dev;
874 	uint32_t heap_type;
875 	uint64_t size_in_bytes;
876 	uint32_t flags = 0;
877 	uint32_t width;
878 
879 	pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
880 			mem->proximity_domain);
881 	list_for_each_entry(dev, device_list, list) {
882 		if (mem->proximity_domain == dev->proximity_domain) {
883 			/* We're on GPU node */
884 			if (dev->node_props.cpu_cores_count == 0) {
885 				/* APU */
886 				if (mem->visibility_type == 0)
887 					heap_type =
888 						HSA_MEM_HEAP_TYPE_FB_PRIVATE;
889 				/* dGPU */
890 				else
891 					heap_type = mem->visibility_type;
892 			} else
893 				heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
894 
895 			if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
896 				flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
897 			if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
898 				flags |= HSA_MEM_FLAGS_NON_VOLATILE;
899 
900 			size_in_bytes =
901 				((uint64_t)mem->length_high << 32) +
902 							mem->length_low;
903 			width = mem->width;
904 
905 			/* Multiple banks of the same type are aggregated into
906 			 * one. User mode doesn't care about multiple physical
907 			 * memory segments. It's managed as a single virtual
908 			 * heap for user mode.
909 			 */
910 			props = find_subtype_mem(heap_type, flags, width, dev);
911 			if (props) {
912 				props->size_in_bytes += size_in_bytes;
913 				break;
914 			}
915 
916 			props = kfd_alloc_struct(props);
917 			if (!props)
918 				return -ENOMEM;
919 
920 			props->heap_type = heap_type;
921 			props->flags = flags;
922 			props->size_in_bytes = size_in_bytes;
923 			props->width = width;
924 
925 			dev->node_props.mem_banks_count++;
926 			list_add_tail(&props->list, &dev->mem_props);
927 
928 			break;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
936  * topology device present in the device_list
937  */
938 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
939 			struct list_head *device_list)
940 {
941 	struct kfd_cache_properties *props;
942 	struct kfd_topology_device *dev;
943 	uint32_t id;
944 	uint32_t total_num_of_cu;
945 
946 	id = cache->processor_id_low;
947 
948 	pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
949 	list_for_each_entry(dev, device_list, list) {
950 		total_num_of_cu = (dev->node_props.array_count *
951 					dev->node_props.cu_per_simd_array);
952 
953 		/* Cache infomration in CRAT doesn't have proximity_domain
954 		 * information as it is associated with a CPU core or GPU
955 		 * Compute Unit. So map the cache using CPU core Id or SIMD
956 		 * (GPU) ID.
957 		 * TODO: This works because currently we can safely assume that
958 		 *  Compute Units are parsed before caches are parsed. In
959 		 *  future, remove this dependency
960 		 */
961 		if ((id >= dev->node_props.cpu_core_id_base &&
962 			id <= dev->node_props.cpu_core_id_base +
963 				dev->node_props.cpu_cores_count) ||
964 			(id >= dev->node_props.simd_id_base &&
965 			id < dev->node_props.simd_id_base +
966 				total_num_of_cu)) {
967 			props = kfd_alloc_struct(props);
968 			if (!props)
969 				return -ENOMEM;
970 
971 			props->processor_id_low = id;
972 			props->cache_level = cache->cache_level;
973 			props->cache_size = cache->cache_size;
974 			props->cacheline_size = cache->cache_line_size;
975 			props->cachelines_per_tag = cache->lines_per_tag;
976 			props->cache_assoc = cache->associativity;
977 			props->cache_latency = cache->cache_latency;
978 			memcpy(props->sibling_map, cache->sibling_map,
979 					sizeof(props->sibling_map));
980 
981 			if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
982 				props->cache_type |= HSA_CACHE_TYPE_DATA;
983 			if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
984 				props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
985 			if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
986 				props->cache_type |= HSA_CACHE_TYPE_CPU;
987 			if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
988 				props->cache_type |= HSA_CACHE_TYPE_HSACU;
989 
990 			dev->cache_count++;
991 			dev->node_props.caches_count++;
992 			list_add_tail(&props->list, &dev->cache_props);
993 
994 			break;
995 		}
996 	}
997 
998 	return 0;
999 }
1000 
1001 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
1002  * topology device present in the device_list
1003  */
1004 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
1005 					struct list_head *device_list)
1006 {
1007 	struct kfd_iolink_properties *props = NULL, *props2;
1008 	struct kfd_topology_device *dev, *to_dev;
1009 	uint32_t id_from;
1010 	uint32_t id_to;
1011 
1012 	id_from = iolink->proximity_domain_from;
1013 	id_to = iolink->proximity_domain_to;
1014 
1015 	pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
1016 			id_from, id_to);
1017 	list_for_each_entry(dev, device_list, list) {
1018 		if (id_from == dev->proximity_domain) {
1019 			props = kfd_alloc_struct(props);
1020 			if (!props)
1021 				return -ENOMEM;
1022 
1023 			props->node_from = id_from;
1024 			props->node_to = id_to;
1025 			props->ver_maj = iolink->version_major;
1026 			props->ver_min = iolink->version_minor;
1027 			props->iolink_type = iolink->io_interface_type;
1028 
1029 			if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1030 				props->weight = 20;
1031 			else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1032 				props->weight = 15 * iolink->num_hops_xgmi;
1033 			else
1034 				props->weight = node_distance(id_from, id_to);
1035 
1036 			props->min_latency = iolink->minimum_latency;
1037 			props->max_latency = iolink->maximum_latency;
1038 			props->min_bandwidth = iolink->minimum_bandwidth_mbs;
1039 			props->max_bandwidth = iolink->maximum_bandwidth_mbs;
1040 			props->rec_transfer_size =
1041 					iolink->recommended_transfer_size;
1042 
1043 			dev->node_props.io_links_count++;
1044 			list_add_tail(&props->list, &dev->io_link_props);
1045 			break;
1046 		}
1047 	}
1048 
1049 	/* CPU topology is created before GPUs are detected, so CPU->GPU
1050 	 * links are not built at that time. If a PCIe type is discovered, it
1051 	 * means a GPU is detected and we are adding GPU->CPU to the topology.
1052 	 * At this time, also add the corresponded CPU->GPU link if GPU
1053 	 * is large bar.
1054 	 * For xGMI, we only added the link with one direction in the crat
1055 	 * table, add corresponded reversed direction link now.
1056 	 */
1057 	if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
1058 		to_dev = kfd_topology_device_by_proximity_domain_no_lock(id_to);
1059 		if (!to_dev)
1060 			return -ENODEV;
1061 		/* same everything but the other direction */
1062 		props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
1063 		if (!props2)
1064 			return -ENOMEM;
1065 
1066 		props2->node_from = id_to;
1067 		props2->node_to = id_from;
1068 		props2->kobj = NULL;
1069 		to_dev->node_props.io_links_count++;
1070 		list_add_tail(&props2->list, &to_dev->io_link_props);
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
1077  * present in the device_list
1078  *	@sub_type_hdr - subtype section of crat_image
1079  *	@device_list - list of topology devices present in this crat_image
1080  */
1081 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
1082 				struct list_head *device_list)
1083 {
1084 	struct crat_subtype_computeunit *cu;
1085 	struct crat_subtype_memory *mem;
1086 	struct crat_subtype_cache *cache;
1087 	struct crat_subtype_iolink *iolink;
1088 	int ret = 0;
1089 
1090 	switch (sub_type_hdr->type) {
1091 	case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
1092 		cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1093 		ret = kfd_parse_subtype_cu(cu, device_list);
1094 		break;
1095 	case CRAT_SUBTYPE_MEMORY_AFFINITY:
1096 		mem = (struct crat_subtype_memory *)sub_type_hdr;
1097 		ret = kfd_parse_subtype_mem(mem, device_list);
1098 		break;
1099 	case CRAT_SUBTYPE_CACHE_AFFINITY:
1100 		cache = (struct crat_subtype_cache *)sub_type_hdr;
1101 		ret = kfd_parse_subtype_cache(cache, device_list);
1102 		break;
1103 	case CRAT_SUBTYPE_TLB_AFFINITY:
1104 		/*
1105 		 * For now, nothing to do here
1106 		 */
1107 		pr_debug("Found TLB entry in CRAT table (not processing)\n");
1108 		break;
1109 	case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
1110 		/*
1111 		 * For now, nothing to do here
1112 		 */
1113 		pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
1114 		break;
1115 	case CRAT_SUBTYPE_IOLINK_AFFINITY:
1116 		iolink = (struct crat_subtype_iolink *)sub_type_hdr;
1117 		ret = kfd_parse_subtype_iolink(iolink, device_list);
1118 		break;
1119 	default:
1120 		pr_warn("Unknown subtype %d in CRAT\n",
1121 				sub_type_hdr->type);
1122 	}
1123 
1124 	return ret;
1125 }
1126 
1127 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
1128  * create a kfd_topology_device and add in to device_list. Also parse
1129  * CRAT subtypes and attach it to appropriate kfd_topology_device
1130  *	@crat_image - input image containing CRAT
1131  *	@device_list - [OUT] list of kfd_topology_device generated after
1132  *		       parsing crat_image
1133  *	@proximity_domain - Proximity domain of the first device in the table
1134  *
1135  *	Return - 0 if successful else -ve value
1136  */
1137 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
1138 			 uint32_t proximity_domain)
1139 {
1140 	struct kfd_topology_device *top_dev = NULL;
1141 	struct crat_subtype_generic *sub_type_hdr;
1142 	uint16_t node_id;
1143 	int ret = 0;
1144 	struct crat_header *crat_table = (struct crat_header *)crat_image;
1145 	uint16_t num_nodes;
1146 	uint32_t image_len;
1147 
1148 	if (!crat_image)
1149 		return -EINVAL;
1150 
1151 	if (!list_empty(device_list)) {
1152 		pr_warn("Error device list should be empty\n");
1153 		return -EINVAL;
1154 	}
1155 
1156 	num_nodes = crat_table->num_domains;
1157 	image_len = crat_table->length;
1158 
1159 	pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
1160 
1161 	for (node_id = 0; node_id < num_nodes; node_id++) {
1162 		top_dev = kfd_create_topology_device(device_list);
1163 		if (!top_dev)
1164 			break;
1165 		top_dev->proximity_domain = proximity_domain++;
1166 	}
1167 
1168 	if (!top_dev) {
1169 		ret = -ENOMEM;
1170 		goto err;
1171 	}
1172 
1173 	memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
1174 	memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
1175 			CRAT_OEMTABLEID_LENGTH);
1176 	top_dev->oem_revision = crat_table->oem_revision;
1177 
1178 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1179 	while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
1180 			((char *)crat_image) + image_len) {
1181 		if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
1182 			ret = kfd_parse_subtype(sub_type_hdr, device_list);
1183 			if (ret)
1184 				break;
1185 		}
1186 
1187 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1188 				sub_type_hdr->length);
1189 	}
1190 
1191 err:
1192 	if (ret)
1193 		kfd_release_topology_device_list(device_list);
1194 
1195 	return ret;
1196 }
1197 
1198 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1199 static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
1200 				struct kfd_gpu_cache_info *pcache_info,
1201 				struct kfd_cu_info *cu_info,
1202 				int mem_available,
1203 				int cu_bitmask,
1204 				int cache_type, unsigned int cu_processor_id,
1205 				int cu_block)
1206 {
1207 	unsigned int cu_sibling_map_mask;
1208 	int first_active_cu;
1209 
1210 	/* First check if enough memory is available */
1211 	if (sizeof(struct crat_subtype_cache) > mem_available)
1212 		return -ENOMEM;
1213 
1214 	cu_sibling_map_mask = cu_bitmask;
1215 	cu_sibling_map_mask >>= cu_block;
1216 	cu_sibling_map_mask &=
1217 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
1218 	first_active_cu = ffs(cu_sibling_map_mask);
1219 
1220 	/* CU could be inactive. In case of shared cache find the first active
1221 	 * CU. and incase of non-shared cache check if the CU is inactive. If
1222 	 * inactive active skip it
1223 	 */
1224 	if (first_active_cu) {
1225 		memset(pcache, 0, sizeof(struct crat_subtype_cache));
1226 		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1227 		pcache->length = sizeof(struct crat_subtype_cache);
1228 		pcache->flags = pcache_info[cache_type].flags;
1229 		pcache->processor_id_low = cu_processor_id
1230 					 + (first_active_cu - 1);
1231 		pcache->cache_level = pcache_info[cache_type].cache_level;
1232 		pcache->cache_size = pcache_info[cache_type].cache_size;
1233 
1234 		/* Sibling map is w.r.t processor_id_low, so shift out
1235 		 * inactive CU
1236 		 */
1237 		cu_sibling_map_mask =
1238 			cu_sibling_map_mask >> (first_active_cu - 1);
1239 
1240 		pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1241 		pcache->sibling_map[1] =
1242 				(uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1243 		pcache->sibling_map[2] =
1244 				(uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1245 		pcache->sibling_map[3] =
1246 				(uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1247 		return 0;
1248 	}
1249 	return 1;
1250 }
1251 
1252 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1253 static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
1254 				struct kfd_gpu_cache_info *pcache_info,
1255 				struct kfd_cu_info *cu_info,
1256 				int mem_available,
1257 				int cache_type, unsigned int cu_processor_id)
1258 {
1259 	unsigned int cu_sibling_map_mask;
1260 	int first_active_cu;
1261 	int i, j, k;
1262 
1263 	/* First check if enough memory is available */
1264 	if (sizeof(struct crat_subtype_cache) > mem_available)
1265 		return -ENOMEM;
1266 
1267 	cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1268 	cu_sibling_map_mask &=
1269 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
1270 	first_active_cu = ffs(cu_sibling_map_mask);
1271 
1272 	/* CU could be inactive. In case of shared cache find the first active
1273 	 * CU. and incase of non-shared cache check if the CU is inactive. If
1274 	 * inactive active skip it
1275 	 */
1276 	if (first_active_cu) {
1277 		memset(pcache, 0, sizeof(struct crat_subtype_cache));
1278 		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1279 		pcache->length = sizeof(struct crat_subtype_cache);
1280 		pcache->flags = pcache_info[cache_type].flags;
1281 		pcache->processor_id_low = cu_processor_id
1282 					 + (first_active_cu - 1);
1283 		pcache->cache_level = pcache_info[cache_type].cache_level;
1284 		pcache->cache_size = pcache_info[cache_type].cache_size;
1285 
1286 		/* Sibling map is w.r.t processor_id_low, so shift out
1287 		 * inactive CU
1288 		 */
1289 		cu_sibling_map_mask =
1290 			cu_sibling_map_mask >> (first_active_cu - 1);
1291 		k = 0;
1292 		for (i = 0; i < cu_info->num_shader_engines; i++) {
1293 			for (j = 0; j < cu_info->num_shader_arrays_per_engine;
1294 				j++) {
1295 				pcache->sibling_map[k] =
1296 				 (uint8_t)(cu_sibling_map_mask & 0xFF);
1297 				pcache->sibling_map[k+1] =
1298 				 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1299 				pcache->sibling_map[k+2] =
1300 				 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1301 				pcache->sibling_map[k+3] =
1302 				 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1303 				k += 4;
1304 				cu_sibling_map_mask =
1305 					cu_info->cu_bitmap[i % 4][j + i / 4];
1306 				cu_sibling_map_mask &= (
1307 				 (1 << pcache_info[cache_type].num_cu_shared)
1308 				 - 1);
1309 			}
1310 		}
1311 		return 0;
1312 	}
1313 	return 1;
1314 }
1315 
1316 #define KFD_MAX_CACHE_TYPES 6
1317 
1318 static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
1319 						   struct kfd_gpu_cache_info *pcache_info)
1320 {
1321 	struct amdgpu_device *adev = kdev->adev;
1322 	int i = 0;
1323 
1324 	/* TCP L1 Cache per CU */
1325 	if (adev->gfx.config.gc_tcp_l1_size) {
1326 		pcache_info[i].cache_size = adev->gfx.config.gc_tcp_l1_size;
1327 		pcache_info[i].cache_level = 1;
1328 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
1329 					CRAT_CACHE_FLAGS_DATA_CACHE |
1330 					CRAT_CACHE_FLAGS_SIMD_CACHE);
1331 		pcache_info[0].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
1332 		i++;
1333 	}
1334 	/* Scalar L1 Instruction Cache per SQC */
1335 	if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) {
1336 		pcache_info[i].cache_size =
1337 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
1338 		pcache_info[i].cache_level = 1;
1339 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
1340 					CRAT_CACHE_FLAGS_INST_CACHE |
1341 					CRAT_CACHE_FLAGS_SIMD_CACHE);
1342 		pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
1343 		i++;
1344 	}
1345 	/* Scalar L1 Data Cache per SQC */
1346 	if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
1347 		pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
1348 		pcache_info[i].cache_level = 1;
1349 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
1350 					CRAT_CACHE_FLAGS_DATA_CACHE |
1351 					CRAT_CACHE_FLAGS_SIMD_CACHE);
1352 		pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
1353 		i++;
1354 	}
1355 	/* GL1 Data Cache per SA */
1356 	if (adev->gfx.config.gc_gl1c_per_sa &&
1357 	    adev->gfx.config.gc_gl1c_size_per_instance) {
1358 		pcache_info[i].cache_size = adev->gfx.config.gc_gl1c_per_sa *
1359 			adev->gfx.config.gc_gl1c_size_per_instance;
1360 		pcache_info[i].cache_level = 1;
1361 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
1362 					CRAT_CACHE_FLAGS_DATA_CACHE |
1363 					CRAT_CACHE_FLAGS_SIMD_CACHE);
1364 		pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
1365 		i++;
1366 	}
1367 	/* L2 Data Cache per GPU (Total Tex Cache) */
1368 	if (adev->gfx.config.gc_gl2c_per_gpu) {
1369 		pcache_info[i].cache_size = adev->gfx.config.gc_gl2c_per_gpu;
1370 		pcache_info[i].cache_level = 2;
1371 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
1372 					CRAT_CACHE_FLAGS_DATA_CACHE |
1373 					CRAT_CACHE_FLAGS_SIMD_CACHE);
1374 		pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
1375 		i++;
1376 	}
1377 	/* L3 Data Cache per GPU */
1378 	if (adev->gmc.mall_size) {
1379 		pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
1380 		pcache_info[i].cache_level = 3;
1381 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
1382 					CRAT_CACHE_FLAGS_DATA_CACHE |
1383 					CRAT_CACHE_FLAGS_SIMD_CACHE);
1384 		pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
1385 		i++;
1386 	}
1387 	return i;
1388 }
1389 
1390 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
1391  * tables
1392  *
1393  *	@kdev - [IN] GPU device
1394  *	@gpu_processor_id - [IN] GPU processor ID to which these caches
1395  *			    associate
1396  *	@available_size - [IN] Amount of memory available in pcache
1397  *	@cu_info - [IN] Compute Unit info obtained from KGD
1398  *	@pcache - [OUT] memory into which cache data is to be filled in.
1399  *	@size_filled - [OUT] amount of data used up in pcache.
1400  *	@num_of_entries - [OUT] number of caches added
1401  */
1402 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
1403 			int gpu_processor_id,
1404 			int available_size,
1405 			struct kfd_cu_info *cu_info,
1406 			struct crat_subtype_cache *pcache,
1407 			int *size_filled,
1408 			int *num_of_entries)
1409 {
1410 	struct kfd_gpu_cache_info *pcache_info;
1411 	struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
1412 	int num_of_cache_types = 0;
1413 	int i, j, k;
1414 	int ct = 0;
1415 	int mem_available = available_size;
1416 	unsigned int cu_processor_id;
1417 	int ret;
1418 	unsigned int num_cu_shared;
1419 
1420 	switch (kdev->adev->asic_type) {
1421 	case CHIP_KAVERI:
1422 		pcache_info = kaveri_cache_info;
1423 		num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
1424 		break;
1425 	case CHIP_HAWAII:
1426 		pcache_info = hawaii_cache_info;
1427 		num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
1428 		break;
1429 	case CHIP_CARRIZO:
1430 		pcache_info = carrizo_cache_info;
1431 		num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
1432 		break;
1433 	case CHIP_TONGA:
1434 		pcache_info = tonga_cache_info;
1435 		num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
1436 		break;
1437 	case CHIP_FIJI:
1438 		pcache_info = fiji_cache_info;
1439 		num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
1440 		break;
1441 	case CHIP_POLARIS10:
1442 		pcache_info = polaris10_cache_info;
1443 		num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
1444 		break;
1445 	case CHIP_POLARIS11:
1446 		pcache_info = polaris11_cache_info;
1447 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
1448 		break;
1449 	case CHIP_POLARIS12:
1450 		pcache_info = polaris12_cache_info;
1451 		num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
1452 		break;
1453 	case CHIP_VEGAM:
1454 		pcache_info = vegam_cache_info;
1455 		num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
1456 		break;
1457 	default:
1458 		switch (KFD_GC_VERSION(kdev)) {
1459 		case IP_VERSION(9, 0, 1):
1460 			pcache_info = vega10_cache_info;
1461 			num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
1462 			break;
1463 		case IP_VERSION(9, 2, 1):
1464 			pcache_info = vega12_cache_info;
1465 			num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
1466 			break;
1467 		case IP_VERSION(9, 4, 0):
1468 		case IP_VERSION(9, 4, 1):
1469 			pcache_info = vega20_cache_info;
1470 			num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
1471 			break;
1472 		case IP_VERSION(9, 4, 2):
1473 			pcache_info = aldebaran_cache_info;
1474 			num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
1475 			break;
1476 		case IP_VERSION(9, 1, 0):
1477 		case IP_VERSION(9, 2, 2):
1478 			pcache_info = raven_cache_info;
1479 			num_of_cache_types = ARRAY_SIZE(raven_cache_info);
1480 			break;
1481 		case IP_VERSION(9, 3, 0):
1482 			pcache_info = renoir_cache_info;
1483 			num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
1484 			break;
1485 		case IP_VERSION(10, 1, 10):
1486 		case IP_VERSION(10, 1, 2):
1487 		case IP_VERSION(10, 1, 3):
1488 		case IP_VERSION(10, 1, 4):
1489 			pcache_info = navi10_cache_info;
1490 			num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
1491 			break;
1492 		case IP_VERSION(10, 1, 1):
1493 			pcache_info = navi14_cache_info;
1494 			num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
1495 			break;
1496 		case IP_VERSION(10, 3, 0):
1497 			pcache_info = sienna_cichlid_cache_info;
1498 			num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
1499 			break;
1500 		case IP_VERSION(10, 3, 2):
1501 			pcache_info = navy_flounder_cache_info;
1502 			num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
1503 			break;
1504 		case IP_VERSION(10, 3, 4):
1505 			pcache_info = dimgrey_cavefish_cache_info;
1506 			num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
1507 			break;
1508 		case IP_VERSION(10, 3, 1):
1509 			pcache_info = vangogh_cache_info;
1510 			num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
1511 			break;
1512 		case IP_VERSION(10, 3, 5):
1513 			pcache_info = beige_goby_cache_info;
1514 			num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
1515 			break;
1516 		case IP_VERSION(10, 3, 3):
1517 		case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
1518 		case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
1519 			pcache_info = yellow_carp_cache_info;
1520 			num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
1521 			break;
1522 		case IP_VERSION(11, 0, 0):
1523 		case IP_VERSION(11, 0, 1):
1524 		case IP_VERSION(11, 0, 2):
1525 			pcache_info = cache_info;
1526 			num_of_cache_types =
1527 				kfd_fill_gpu_cache_info_from_gfx_config(kdev, pcache_info);
1528 			break;
1529 		default:
1530 			return -EINVAL;
1531 		}
1532 	}
1533 
1534 	*size_filled = 0;
1535 	*num_of_entries = 0;
1536 
1537 	/* For each type of cache listed in the kfd_gpu_cache_info table,
1538 	 * go through all available Compute Units.
1539 	 * The [i,j,k] loop will
1540 	 *		if kfd_gpu_cache_info.num_cu_shared = 1
1541 	 *			will parse through all available CU
1542 	 *		If (kfd_gpu_cache_info.num_cu_shared != 1)
1543 	 *			then it will consider only one CU from
1544 	 *			the shared unit
1545 	 */
1546 
1547 	for (ct = 0; ct < num_of_cache_types; ct++) {
1548 	  cu_processor_id = gpu_processor_id;
1549 	  if (pcache_info[ct].cache_level == 1) {
1550 	    for (i = 0; i < cu_info->num_shader_engines; i++) {
1551 	      for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1552 	        for (k = 0; k < cu_info->num_cu_per_sh;
1553 		  k += pcache_info[ct].num_cu_shared) {
1554 		  ret = fill_in_l1_pcache(pcache,
1555 					pcache_info,
1556 					cu_info,
1557 					mem_available,
1558 					cu_info->cu_bitmap[i % 4][j + i / 4],
1559 					ct,
1560 					cu_processor_id,
1561 					k);
1562 
1563 		  if (ret < 0)
1564 			break;
1565 
1566 		  if (!ret) {
1567 				pcache++;
1568 				(*num_of_entries)++;
1569 				mem_available -= sizeof(*pcache);
1570 				(*size_filled) += sizeof(*pcache);
1571 		  }
1572 
1573 		  /* Move to next CU block */
1574 		  num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1575 					cu_info->num_cu_per_sh) ?
1576 					pcache_info[ct].num_cu_shared :
1577 					(cu_info->num_cu_per_sh - k);
1578 		  cu_processor_id += num_cu_shared;
1579 		}
1580 	      }
1581 	    }
1582 	  } else {
1583 			ret = fill_in_l2_l3_pcache(pcache,
1584 				pcache_info,
1585 				cu_info,
1586 				mem_available,
1587 				ct,
1588 				cu_processor_id);
1589 
1590 			if (ret < 0)
1591 				break;
1592 
1593 			if (!ret) {
1594 				pcache++;
1595 				(*num_of_entries)++;
1596 				mem_available -= sizeof(*pcache);
1597 				(*size_filled) += sizeof(*pcache);
1598 			}
1599 	  }
1600 	}
1601 
1602 	pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
1603 
1604 	return 0;
1605 }
1606 
1607 static bool kfd_ignore_crat(void)
1608 {
1609 	bool ret;
1610 
1611 	if (ignore_crat)
1612 		return true;
1613 
1614 #ifndef KFD_SUPPORT_IOMMU_V2
1615 	ret = true;
1616 #else
1617 	ret = false;
1618 #endif
1619 
1620 	return ret;
1621 }
1622 
1623 /*
1624  * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
1625  * copies CRAT from ACPI (if available).
1626  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1627  *
1628  *	@crat_image: CRAT read from ACPI. If no CRAT in ACPI then
1629  *		     crat_image will be NULL
1630  *	@size: [OUT] size of crat_image
1631  *
1632  *	Return 0 if successful else return error code
1633  */
1634 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
1635 {
1636 	struct acpi_table_header *crat_table;
1637 	acpi_status status;
1638 	void *pcrat_image;
1639 	int rc = 0;
1640 
1641 	if (!crat_image)
1642 		return -EINVAL;
1643 
1644 	*crat_image = NULL;
1645 
1646 	if (kfd_ignore_crat()) {
1647 		pr_info("CRAT table disabled by module option\n");
1648 		return -ENODATA;
1649 	}
1650 
1651 	/* Fetch the CRAT table from ACPI */
1652 	status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
1653 	if (status == AE_NOT_FOUND) {
1654 		pr_info("CRAT table not found\n");
1655 		return -ENODATA;
1656 	} else if (ACPI_FAILURE(status)) {
1657 		const char *err = acpi_format_exception(status);
1658 
1659 		pr_err("CRAT table error: %s\n", err);
1660 		return -EINVAL;
1661 	}
1662 
1663 	pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
1664 	if (!pcrat_image) {
1665 		rc = -ENOMEM;
1666 		goto out;
1667 	}
1668 
1669 	memcpy(pcrat_image, crat_table, crat_table->length);
1670 	*crat_image = pcrat_image;
1671 	*size = crat_table->length;
1672 out:
1673 	acpi_put_table(crat_table);
1674 	return rc;
1675 }
1676 
1677 /* Memory required to create Virtual CRAT.
1678  * Since there is no easy way to predict the amount of memory required, the
1679  * following amount is allocated for GPU Virtual CRAT. This is
1680  * expected to cover all known conditions. But to be safe additional check
1681  * is put in the code to ensure we don't overwrite.
1682  */
1683 #define VCRAT_SIZE_FOR_GPU	(4 * PAGE_SIZE)
1684 
1685 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
1686  *
1687  *	@numa_node_id: CPU NUMA node id
1688  *	@avail_size: Available size in the memory
1689  *	@sub_type_hdr: Memory into which compute info will be filled in
1690  *
1691  *	Return 0 if successful else return -ve value
1692  */
1693 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
1694 				int proximity_domain,
1695 				struct crat_subtype_computeunit *sub_type_hdr)
1696 {
1697 	const struct cpumask *cpumask;
1698 
1699 	*avail_size -= sizeof(struct crat_subtype_computeunit);
1700 	if (*avail_size < 0)
1701 		return -ENOMEM;
1702 
1703 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1704 
1705 	/* Fill in subtype header data */
1706 	sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1707 	sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1708 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1709 
1710 	cpumask = cpumask_of_node(numa_node_id);
1711 
1712 	/* Fill in CU data */
1713 	sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
1714 	sub_type_hdr->proximity_domain = proximity_domain;
1715 	sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
1716 	if (sub_type_hdr->processor_id_low == -1)
1717 		return -EINVAL;
1718 
1719 	sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
1720 
1721 	return 0;
1722 }
1723 
1724 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
1725  *
1726  *	@numa_node_id: CPU NUMA node id
1727  *	@avail_size: Available size in the memory
1728  *	@sub_type_hdr: Memory into which compute info will be filled in
1729  *
1730  *	Return 0 if successful else return -ve value
1731  */
1732 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
1733 			int proximity_domain,
1734 			struct crat_subtype_memory *sub_type_hdr)
1735 {
1736 	uint64_t mem_in_bytes = 0;
1737 	pg_data_t *pgdat;
1738 	int zone_type;
1739 
1740 	*avail_size -= sizeof(struct crat_subtype_memory);
1741 	if (*avail_size < 0)
1742 		return -ENOMEM;
1743 
1744 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1745 
1746 	/* Fill in subtype header data */
1747 	sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1748 	sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1749 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1750 
1751 	/* Fill in Memory Subunit data */
1752 
1753 	/* Unlike si_meminfo, si_meminfo_node is not exported. So
1754 	 * the following lines are duplicated from si_meminfo_node
1755 	 * function
1756 	 */
1757 	pgdat = NODE_DATA(numa_node_id);
1758 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1759 		mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
1760 	mem_in_bytes <<= PAGE_SHIFT;
1761 
1762 	sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
1763 	sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
1764 	sub_type_hdr->proximity_domain = proximity_domain;
1765 
1766 	return 0;
1767 }
1768 
1769 #ifdef CONFIG_X86_64
1770 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
1771 				uint32_t *num_entries,
1772 				struct crat_subtype_iolink *sub_type_hdr)
1773 {
1774 	int nid;
1775 	struct cpuinfo_x86 *c = &cpu_data(0);
1776 	uint8_t link_type;
1777 
1778 	if (c->x86_vendor == X86_VENDOR_AMD)
1779 		link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
1780 	else
1781 		link_type = CRAT_IOLINK_TYPE_QPI_1_1;
1782 
1783 	*num_entries = 0;
1784 
1785 	/* Create IO links from this node to other CPU nodes */
1786 	for_each_online_node(nid) {
1787 		if (nid == numa_node_id) /* node itself */
1788 			continue;
1789 
1790 		*avail_size -= sizeof(struct crat_subtype_iolink);
1791 		if (*avail_size < 0)
1792 			return -ENOMEM;
1793 
1794 		memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1795 
1796 		/* Fill in subtype header data */
1797 		sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1798 		sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1799 		sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1800 
1801 		/* Fill in IO link data */
1802 		sub_type_hdr->proximity_domain_from = numa_node_id;
1803 		sub_type_hdr->proximity_domain_to = nid;
1804 		sub_type_hdr->io_interface_type = link_type;
1805 
1806 		(*num_entries)++;
1807 		sub_type_hdr++;
1808 	}
1809 
1810 	return 0;
1811 }
1812 #endif
1813 
1814 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
1815  *
1816  *	@pcrat_image: Fill in VCRAT for CPU
1817  *	@size:	[IN] allocated size of crat_image.
1818  *		[OUT] actual size of data filled in crat_image
1819  */
1820 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
1821 {
1822 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1823 	struct acpi_table_header *acpi_table;
1824 	acpi_status status;
1825 	struct crat_subtype_generic *sub_type_hdr;
1826 	int avail_size = *size;
1827 	int numa_node_id;
1828 #ifdef CONFIG_X86_64
1829 	uint32_t entries = 0;
1830 #endif
1831 	int ret = 0;
1832 
1833 	if (!pcrat_image)
1834 		return -EINVAL;
1835 
1836 	/* Fill in CRAT Header.
1837 	 * Modify length and total_entries as subunits are added.
1838 	 */
1839 	avail_size -= sizeof(struct crat_header);
1840 	if (avail_size < 0)
1841 		return -ENOMEM;
1842 
1843 	memset(crat_table, 0, sizeof(struct crat_header));
1844 	memcpy(&crat_table->signature, CRAT_SIGNATURE,
1845 			sizeof(crat_table->signature));
1846 	crat_table->length = sizeof(struct crat_header);
1847 
1848 	status = acpi_get_table("DSDT", 0, &acpi_table);
1849 	if (status != AE_OK)
1850 		pr_warn("DSDT table not found for OEM information\n");
1851 	else {
1852 		crat_table->oem_revision = acpi_table->revision;
1853 		memcpy(crat_table->oem_id, acpi_table->oem_id,
1854 				CRAT_OEMID_LENGTH);
1855 		memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1856 				CRAT_OEMTABLEID_LENGTH);
1857 		acpi_put_table(acpi_table);
1858 	}
1859 	crat_table->total_entries = 0;
1860 	crat_table->num_domains = 0;
1861 
1862 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1863 
1864 	for_each_online_node(numa_node_id) {
1865 		if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1866 			continue;
1867 
1868 		/* Fill in Subtype: Compute Unit */
1869 		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1870 			crat_table->num_domains,
1871 			(struct crat_subtype_computeunit *)sub_type_hdr);
1872 		if (ret < 0)
1873 			return ret;
1874 		crat_table->length += sub_type_hdr->length;
1875 		crat_table->total_entries++;
1876 
1877 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1878 			sub_type_hdr->length);
1879 
1880 		/* Fill in Subtype: Memory */
1881 		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1882 			crat_table->num_domains,
1883 			(struct crat_subtype_memory *)sub_type_hdr);
1884 		if (ret < 0)
1885 			return ret;
1886 		crat_table->length += sub_type_hdr->length;
1887 		crat_table->total_entries++;
1888 
1889 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1890 			sub_type_hdr->length);
1891 
1892 		/* Fill in Subtype: IO Link */
1893 #ifdef CONFIG_X86_64
1894 		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1895 				&entries,
1896 				(struct crat_subtype_iolink *)sub_type_hdr);
1897 		if (ret < 0)
1898 			return ret;
1899 
1900 		if (entries) {
1901 			crat_table->length += (sub_type_hdr->length * entries);
1902 			crat_table->total_entries += entries;
1903 
1904 			sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1905 					sub_type_hdr->length * entries);
1906 		}
1907 #else
1908 		pr_info("IO link not available for non x86 platforms\n");
1909 #endif
1910 
1911 		crat_table->num_domains++;
1912 	}
1913 
1914 	/* TODO: Add cache Subtype for CPU.
1915 	 * Currently, CPU cache information is available in function
1916 	 * detect_cache_attributes(cpu) defined in the file
1917 	 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1918 	 * exported and to get the same information the code needs to be
1919 	 * duplicated.
1920 	 */
1921 
1922 	*size = crat_table->length;
1923 	pr_info("Virtual CRAT table created for CPU\n");
1924 
1925 	return 0;
1926 }
1927 
1928 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1929 		struct kfd_dev *kdev, uint8_t type, uint64_t size,
1930 		struct crat_subtype_memory *sub_type_hdr,
1931 		uint32_t proximity_domain,
1932 		const struct kfd_local_mem_info *local_mem_info)
1933 {
1934 	*avail_size -= sizeof(struct crat_subtype_memory);
1935 	if (*avail_size < 0)
1936 		return -ENOMEM;
1937 
1938 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1939 	sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1940 	sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1941 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1942 
1943 	sub_type_hdr->proximity_domain = proximity_domain;
1944 
1945 	pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1946 			type, size);
1947 
1948 	sub_type_hdr->length_low = lower_32_bits(size);
1949 	sub_type_hdr->length_high = upper_32_bits(size);
1950 
1951 	sub_type_hdr->width = local_mem_info->vram_width;
1952 	sub_type_hdr->visibility_type = type;
1953 
1954 	return 0;
1955 }
1956 
1957 #ifdef CONFIG_ACPI_NUMA
1958 static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
1959 {
1960 	struct acpi_table_header *table_header = NULL;
1961 	struct acpi_subtable_header *sub_header = NULL;
1962 	unsigned long table_end, subtable_len;
1963 	u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 |
1964 			pci_dev_id(kdev->pdev);
1965 	u32 bdf;
1966 	acpi_status status;
1967 	struct acpi_srat_cpu_affinity *cpu;
1968 	struct acpi_srat_generic_affinity *gpu;
1969 	int pxm = 0, max_pxm = 0;
1970 	int numa_node = NUMA_NO_NODE;
1971 	bool found = false;
1972 
1973 	/* Fetch the SRAT table from ACPI */
1974 	status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
1975 	if (status == AE_NOT_FOUND) {
1976 		pr_warn("SRAT table not found\n");
1977 		return;
1978 	} else if (ACPI_FAILURE(status)) {
1979 		const char *err = acpi_format_exception(status);
1980 		pr_err("SRAT table error: %s\n", err);
1981 		return;
1982 	}
1983 
1984 	table_end = (unsigned long)table_header + table_header->length;
1985 
1986 	/* Parse all entries looking for a match. */
1987 	sub_header = (struct acpi_subtable_header *)
1988 			((unsigned long)table_header +
1989 			sizeof(struct acpi_table_srat));
1990 	subtable_len = sub_header->length;
1991 
1992 	while (((unsigned long)sub_header) + subtable_len  < table_end) {
1993 		/*
1994 		 * If length is 0, break from this loop to avoid
1995 		 * infinite loop.
1996 		 */
1997 		if (subtable_len == 0) {
1998 			pr_err("SRAT invalid zero length\n");
1999 			break;
2000 		}
2001 
2002 		switch (sub_header->type) {
2003 		case ACPI_SRAT_TYPE_CPU_AFFINITY:
2004 			cpu = (struct acpi_srat_cpu_affinity *)sub_header;
2005 			pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
2006 					cpu->proximity_domain_lo;
2007 			if (pxm > max_pxm)
2008 				max_pxm = pxm;
2009 			break;
2010 		case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
2011 			gpu = (struct acpi_srat_generic_affinity *)sub_header;
2012 			bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
2013 					*((u16 *)(&gpu->device_handle[2]));
2014 			if (bdf == pci_id) {
2015 				found = true;
2016 				numa_node = pxm_to_node(gpu->proximity_domain);
2017 			}
2018 			break;
2019 		default:
2020 			break;
2021 		}
2022 
2023 		if (found)
2024 			break;
2025 
2026 		sub_header = (struct acpi_subtable_header *)
2027 				((unsigned long)sub_header + subtable_len);
2028 		subtable_len = sub_header->length;
2029 	}
2030 
2031 	acpi_put_table(table_header);
2032 
2033 	/* Workaround bad cpu-gpu binding case */
2034 	if (found && (numa_node < 0 ||
2035 			numa_node > pxm_to_node(max_pxm)))
2036 		numa_node = 0;
2037 
2038 	if (numa_node != NUMA_NO_NODE)
2039 		set_dev_node(&kdev->pdev->dev, numa_node);
2040 }
2041 #endif
2042 
2043 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
2044  * to its NUMA node
2045  *	@avail_size: Available size in the memory
2046  *	@kdev - [IN] GPU device
2047  *	@sub_type_hdr: Memory into which io link info will be filled in
2048  *	@proximity_domain - proximity domain of the GPU node
2049  *
2050  *	Return 0 if successful else return -ve value
2051  */
2052 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
2053 			struct kfd_dev *kdev,
2054 			struct crat_subtype_iolink *sub_type_hdr,
2055 			uint32_t proximity_domain)
2056 {
2057 	*avail_size -= sizeof(struct crat_subtype_iolink);
2058 	if (*avail_size < 0)
2059 		return -ENOMEM;
2060 
2061 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2062 
2063 	/* Fill in subtype header data */
2064 	sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2065 	sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2066 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
2067 	if (kfd_dev_is_large_bar(kdev))
2068 		sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2069 
2070 	/* Fill in IOLINK subtype.
2071 	 * TODO: Fill-in other fields of iolink subtype
2072 	 */
2073 	if (kdev->adev->gmc.xgmi.connected_to_cpu) {
2074 		/*
2075 		 * with host gpu xgmi link, host can access gpu memory whether
2076 		 * or not pcie bar type is large, so always create bidirectional
2077 		 * io link.
2078 		 */
2079 		sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2080 		sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2081 		sub_type_hdr->num_hops_xgmi = 1;
2082 		if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
2083 			sub_type_hdr->minimum_bandwidth_mbs =
2084 					amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
2085 							kdev->adev, NULL, true);
2086 			sub_type_hdr->maximum_bandwidth_mbs =
2087 					sub_type_hdr->minimum_bandwidth_mbs;
2088 		}
2089 	} else {
2090 		sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
2091 		sub_type_hdr->minimum_bandwidth_mbs =
2092 				amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true);
2093 		sub_type_hdr->maximum_bandwidth_mbs =
2094 				amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false);
2095 	}
2096 
2097 	sub_type_hdr->proximity_domain_from = proximity_domain;
2098 
2099 #ifdef CONFIG_ACPI_NUMA
2100 	if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2101 		kfd_find_numa_node_in_srat(kdev);
2102 #endif
2103 #ifdef CONFIG_NUMA
2104 	if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2105 		sub_type_hdr->proximity_domain_to = 0;
2106 	else
2107 		sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
2108 #else
2109 	sub_type_hdr->proximity_domain_to = 0;
2110 #endif
2111 	return 0;
2112 }
2113 
2114 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
2115 			struct kfd_dev *kdev,
2116 			struct kfd_dev *peer_kdev,
2117 			struct crat_subtype_iolink *sub_type_hdr,
2118 			uint32_t proximity_domain_from,
2119 			uint32_t proximity_domain_to)
2120 {
2121 	*avail_size -= sizeof(struct crat_subtype_iolink);
2122 	if (*avail_size < 0)
2123 		return -ENOMEM;
2124 
2125 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2126 
2127 	sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2128 	sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2129 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
2130 			       CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2131 
2132 	sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2133 	sub_type_hdr->proximity_domain_from = proximity_domain_from;
2134 	sub_type_hdr->proximity_domain_to = proximity_domain_to;
2135 	sub_type_hdr->num_hops_xgmi =
2136 		amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
2137 	sub_type_hdr->maximum_bandwidth_mbs =
2138 		amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
2139 	sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
2140 		amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
2141 
2142 	return 0;
2143 }
2144 
2145 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
2146  *
2147  *	@pcrat_image: Fill in VCRAT for GPU
2148  *	@size:	[IN] allocated size of crat_image.
2149  *		[OUT] actual size of data filled in crat_image
2150  */
2151 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
2152 				      size_t *size, struct kfd_dev *kdev,
2153 				      uint32_t proximity_domain)
2154 {
2155 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
2156 	struct crat_subtype_generic *sub_type_hdr;
2157 	struct kfd_local_mem_info local_mem_info;
2158 	struct kfd_topology_device *peer_dev;
2159 	struct crat_subtype_computeunit *cu;
2160 	struct kfd_cu_info cu_info;
2161 	int avail_size = *size;
2162 	uint32_t total_num_of_cu;
2163 	int num_of_cache_entries = 0;
2164 	int cache_mem_filled = 0;
2165 	uint32_t nid = 0;
2166 	int ret = 0;
2167 
2168 	if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
2169 		return -EINVAL;
2170 
2171 	/* Fill the CRAT Header.
2172 	 * Modify length and total_entries as subunits are added.
2173 	 */
2174 	avail_size -= sizeof(struct crat_header);
2175 	if (avail_size < 0)
2176 		return -ENOMEM;
2177 
2178 	memset(crat_table, 0, sizeof(struct crat_header));
2179 
2180 	memcpy(&crat_table->signature, CRAT_SIGNATURE,
2181 			sizeof(crat_table->signature));
2182 	/* Change length as we add more subtypes*/
2183 	crat_table->length = sizeof(struct crat_header);
2184 	crat_table->num_domains = 1;
2185 	crat_table->total_entries = 0;
2186 
2187 	/* Fill in Subtype: Compute Unit
2188 	 * First fill in the sub type header and then sub type data
2189 	 */
2190 	avail_size -= sizeof(struct crat_subtype_computeunit);
2191 	if (avail_size < 0)
2192 		return -ENOMEM;
2193 
2194 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
2195 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
2196 
2197 	sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
2198 	sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
2199 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
2200 
2201 	/* Fill CU subtype data */
2202 	cu = (struct crat_subtype_computeunit *)sub_type_hdr;
2203 	cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
2204 	cu->proximity_domain = proximity_domain;
2205 
2206 	amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
2207 	cu->num_simd_per_cu = cu_info.simd_per_cu;
2208 	cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2209 	cu->max_waves_simd = cu_info.max_waves_per_simd;
2210 
2211 	cu->wave_front_size = cu_info.wave_front_size;
2212 	cu->array_count = cu_info.num_shader_arrays_per_engine *
2213 		cu_info.num_shader_engines;
2214 	total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
2215 	cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
2216 	cu->num_cu_per_array = cu_info.num_cu_per_sh;
2217 	cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
2218 	cu->num_banks = cu_info.num_shader_engines;
2219 	cu->lds_size_in_kb = cu_info.lds_size;
2220 
2221 	cu->hsa_capability = 0;
2222 
2223 	/* Check if this node supports IOMMU. During parsing this flag will
2224 	 * translate to HSA_CAP_ATS_PRESENT
2225 	 */
2226 	if (!kfd_iommu_check_device(kdev))
2227 		cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
2228 
2229 	crat_table->length += sub_type_hdr->length;
2230 	crat_table->total_entries++;
2231 
2232 	/* Fill in Subtype: Memory. Only on systems with large BAR (no
2233 	 * private FB), report memory as public. On other systems
2234 	 * report the total FB size (public+private) as a single
2235 	 * private heap.
2236 	 */
2237 	local_mem_info = kdev->local_mem_info;
2238 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2239 			sub_type_hdr->length);
2240 
2241 	if (debug_largebar)
2242 		local_mem_info.local_mem_size_private = 0;
2243 
2244 	if (local_mem_info.local_mem_size_private == 0)
2245 		ret = kfd_fill_gpu_memory_affinity(&avail_size,
2246 				kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
2247 				local_mem_info.local_mem_size_public,
2248 				(struct crat_subtype_memory *)sub_type_hdr,
2249 				proximity_domain,
2250 				&local_mem_info);
2251 	else
2252 		ret = kfd_fill_gpu_memory_affinity(&avail_size,
2253 				kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
2254 				local_mem_info.local_mem_size_public +
2255 				local_mem_info.local_mem_size_private,
2256 				(struct crat_subtype_memory *)sub_type_hdr,
2257 				proximity_domain,
2258 				&local_mem_info);
2259 	if (ret < 0)
2260 		return ret;
2261 
2262 	crat_table->length += sizeof(struct crat_subtype_memory);
2263 	crat_table->total_entries++;
2264 
2265 	/* TODO: Fill in cache information. This information is NOT readily
2266 	 * available in KGD
2267 	 */
2268 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2269 		sub_type_hdr->length);
2270 	ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
2271 				avail_size,
2272 				&cu_info,
2273 				(struct crat_subtype_cache *)sub_type_hdr,
2274 				&cache_mem_filled,
2275 				&num_of_cache_entries);
2276 
2277 	if (ret < 0)
2278 		return ret;
2279 
2280 	crat_table->length += cache_mem_filled;
2281 	crat_table->total_entries += num_of_cache_entries;
2282 	avail_size -= cache_mem_filled;
2283 
2284 	/* Fill in Subtype: IO_LINKS
2285 	 *  Only direct links are added here which is Link from GPU to
2286 	 *  to its NUMA node. Indirect links are added by userspace.
2287 	 */
2288 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2289 		cache_mem_filled);
2290 	ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
2291 		(struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
2292 
2293 	if (ret < 0)
2294 		return ret;
2295 
2296 	crat_table->length += sub_type_hdr->length;
2297 	crat_table->total_entries++;
2298 
2299 
2300 	/* Fill in Subtype: IO_LINKS
2301 	 * Direct links from GPU to other GPUs through xGMI.
2302 	 * We will loop GPUs that already be processed (with lower value
2303 	 * of proximity_domain), add the link for the GPUs with same
2304 	 * hive id (from this GPU to other GPU) . The reversed iolink
2305 	 * (from other GPU to this GPU) will be added
2306 	 * in kfd_parse_subtype_iolink.
2307 	 */
2308 	if (kdev->hive_id) {
2309 		for (nid = 0; nid < proximity_domain; ++nid) {
2310 			peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid);
2311 			if (!peer_dev->gpu)
2312 				continue;
2313 			if (peer_dev->gpu->hive_id != kdev->hive_id)
2314 				continue;
2315 			sub_type_hdr = (typeof(sub_type_hdr))(
2316 				(char *)sub_type_hdr +
2317 				sizeof(struct crat_subtype_iolink));
2318 			ret = kfd_fill_gpu_xgmi_link_to_gpu(
2319 				&avail_size, kdev, peer_dev->gpu,
2320 				(struct crat_subtype_iolink *)sub_type_hdr,
2321 				proximity_domain, nid);
2322 			if (ret < 0)
2323 				return ret;
2324 			crat_table->length += sub_type_hdr->length;
2325 			crat_table->total_entries++;
2326 		}
2327 	}
2328 	*size = crat_table->length;
2329 	pr_info("Virtual CRAT table created for GPU\n");
2330 
2331 	return ret;
2332 }
2333 
2334 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
2335  *		creates a Virtual CRAT (VCRAT) image
2336  *
2337  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
2338  *
2339  *	@crat_image: VCRAT image created because ACPI does not have a
2340  *		     CRAT for this device
2341  *	@size: [OUT] size of virtual crat_image
2342  *	@flags:	COMPUTE_UNIT_CPU - Create VCRAT for CPU device
2343  *		COMPUTE_UNIT_GPU - Create VCRAT for GPU
2344  *		(COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
2345  *			-- this option is not currently implemented.
2346  *			The assumption is that all AMD APUs will have CRAT
2347  *	@kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
2348  *
2349  *	Return 0 if successful else return -ve value
2350  */
2351 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
2352 				  int flags, struct kfd_dev *kdev,
2353 				  uint32_t proximity_domain)
2354 {
2355 	void *pcrat_image = NULL;
2356 	int ret = 0, num_nodes;
2357 	size_t dyn_size;
2358 
2359 	if (!crat_image)
2360 		return -EINVAL;
2361 
2362 	*crat_image = NULL;
2363 
2364 	/* Allocate the CPU Virtual CRAT size based on the number of online
2365 	 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
2366 	 * This should cover all the current conditions. A check is put not
2367 	 * to overwrite beyond allocated size for GPUs
2368 	 */
2369 	switch (flags) {
2370 	case COMPUTE_UNIT_CPU:
2371 		num_nodes = num_online_nodes();
2372 		dyn_size = sizeof(struct crat_header) +
2373 			num_nodes * (sizeof(struct crat_subtype_computeunit) +
2374 			sizeof(struct crat_subtype_memory) +
2375 			(num_nodes - 1) * sizeof(struct crat_subtype_iolink));
2376 		pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
2377 		if (!pcrat_image)
2378 			return -ENOMEM;
2379 		*size = dyn_size;
2380 		pr_debug("CRAT size is %ld", dyn_size);
2381 		ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
2382 		break;
2383 	case COMPUTE_UNIT_GPU:
2384 		if (!kdev)
2385 			return -EINVAL;
2386 		pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
2387 		if (!pcrat_image)
2388 			return -ENOMEM;
2389 		*size = VCRAT_SIZE_FOR_GPU;
2390 		ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
2391 						 proximity_domain);
2392 		break;
2393 	case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
2394 		/* TODO: */
2395 		ret = -EINVAL;
2396 		pr_err("VCRAT not implemented for APU\n");
2397 		break;
2398 	default:
2399 		ret = -EINVAL;
2400 	}
2401 
2402 	if (!ret)
2403 		*crat_image = pcrat_image;
2404 	else
2405 		kvfree(pcrat_image);
2406 
2407 	return ret;
2408 }
2409 
2410 
2411 /* kfd_destroy_crat_image
2412  *
2413  *	@crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
2414  *
2415  */
2416 void kfd_destroy_crat_image(void *crat_image)
2417 {
2418 	kvfree(crat_image);
2419 }
2420