xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_crat.c (revision 10756dc5)
1 /*
2  * Copyright 2015-2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
25 #include "kfd_crat.h"
26 #include "kfd_priv.h"
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
29 #include "amdgpu.h"
30 #include "amdgpu_amdkfd.h"
31 
32 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
33  * GPU processor ID are expressed with Bit[31]=1.
34  * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
35  * used in the CRAT.
36  */
37 static uint32_t gpu_processor_id_low = 0x80001000;
38 
39 /* Return the next available gpu_processor_id and increment it for next GPU
40  *	@total_cu_count - Total CUs present in the GPU including ones
41  *			  masked off
42  */
43 static inline unsigned int get_and_inc_gpu_processor_id(
44 				unsigned int total_cu_count)
45 {
46 	int current_id = gpu_processor_id_low;
47 
48 	gpu_processor_id_low += total_cu_count;
49 	return current_id;
50 }
51 
52 /* Static table to describe GPU Cache information */
53 struct kfd_gpu_cache_info {
54 	uint32_t	cache_size;
55 	uint32_t	cache_level;
56 	uint32_t	flags;
57 	/* Indicates how many Compute Units share this cache
58 	 * within a SA. Value = 1 indicates the cache is not shared
59 	 */
60 	uint32_t	num_cu_shared;
61 };
62 
63 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
64 	{
65 		/* TCP L1 Cache per CU */
66 		.cache_size = 16,
67 		.cache_level = 1,
68 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
69 				CRAT_CACHE_FLAGS_DATA_CACHE |
70 				CRAT_CACHE_FLAGS_SIMD_CACHE),
71 		.num_cu_shared = 1,
72 	},
73 	{
74 		/* Scalar L1 Instruction Cache (in SQC module) per bank */
75 		.cache_size = 16,
76 		.cache_level = 1,
77 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
78 				CRAT_CACHE_FLAGS_INST_CACHE |
79 				CRAT_CACHE_FLAGS_SIMD_CACHE),
80 		.num_cu_shared = 2,
81 	},
82 	{
83 		/* Scalar L1 Data Cache (in SQC module) per bank */
84 		.cache_size = 8,
85 		.cache_level = 1,
86 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
87 				CRAT_CACHE_FLAGS_DATA_CACHE |
88 				CRAT_CACHE_FLAGS_SIMD_CACHE),
89 		.num_cu_shared = 2,
90 	},
91 
92 	/* TODO: Add L2 Cache information */
93 };
94 
95 
96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97 	{
98 		/* TCP L1 Cache per CU */
99 		.cache_size = 16,
100 		.cache_level = 1,
101 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
102 				CRAT_CACHE_FLAGS_DATA_CACHE |
103 				CRAT_CACHE_FLAGS_SIMD_CACHE),
104 		.num_cu_shared = 1,
105 	},
106 	{
107 		/* Scalar L1 Instruction Cache (in SQC module) per bank */
108 		.cache_size = 8,
109 		.cache_level = 1,
110 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
111 				CRAT_CACHE_FLAGS_INST_CACHE |
112 				CRAT_CACHE_FLAGS_SIMD_CACHE),
113 		.num_cu_shared = 4,
114 	},
115 	{
116 		/* Scalar L1 Data Cache (in SQC module) per bank. */
117 		.cache_size = 4,
118 		.cache_level = 1,
119 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
120 				CRAT_CACHE_FLAGS_DATA_CACHE |
121 				CRAT_CACHE_FLAGS_SIMD_CACHE),
122 		.num_cu_shared = 4,
123 	},
124 
125 	/* TODO: Add L2 Cache information */
126 };
127 
128 #define hawaii_cache_info kaveri_cache_info
129 #define tonga_cache_info carrizo_cache_info
130 #define fiji_cache_info  carrizo_cache_info
131 #define polaris10_cache_info carrizo_cache_info
132 #define polaris11_cache_info carrizo_cache_info
133 #define polaris12_cache_info carrizo_cache_info
134 #define vegam_cache_info carrizo_cache_info
135 
136 /* NOTE: L1 cache information has been updated and L2/L3
137  * cache information has been added for Vega10 and
138  * newer ASICs. The unit for cache_size is KiB.
139  * In future,  check & update cache details
140  * for every new ASIC is required.
141  */
142 
143 static struct kfd_gpu_cache_info vega10_cache_info[] = {
144 	{
145 		/* TCP L1 Cache per CU */
146 		.cache_size = 16,
147 		.cache_level = 1,
148 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
149 				CRAT_CACHE_FLAGS_DATA_CACHE |
150 				CRAT_CACHE_FLAGS_SIMD_CACHE),
151 		.num_cu_shared = 1,
152 	},
153 	{
154 		/* Scalar L1 Instruction Cache per SQC */
155 		.cache_size = 32,
156 		.cache_level = 1,
157 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
158 				CRAT_CACHE_FLAGS_INST_CACHE |
159 				CRAT_CACHE_FLAGS_SIMD_CACHE),
160 		.num_cu_shared = 3,
161 	},
162 	{
163 		/* Scalar L1 Data Cache per SQC */
164 		.cache_size = 16,
165 		.cache_level = 1,
166 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
167 				CRAT_CACHE_FLAGS_DATA_CACHE |
168 				CRAT_CACHE_FLAGS_SIMD_CACHE),
169 		.num_cu_shared = 3,
170 	},
171 	{
172 		/* L2 Data Cache per GPU (Total Tex Cache) */
173 		.cache_size = 4096,
174 		.cache_level = 2,
175 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
176 				CRAT_CACHE_FLAGS_DATA_CACHE |
177 				CRAT_CACHE_FLAGS_SIMD_CACHE),
178 		.num_cu_shared = 16,
179 	},
180 };
181 
182 static struct kfd_gpu_cache_info raven_cache_info[] = {
183 	{
184 		/* TCP L1 Cache per CU */
185 		.cache_size = 16,
186 		.cache_level = 1,
187 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
188 				CRAT_CACHE_FLAGS_DATA_CACHE |
189 				CRAT_CACHE_FLAGS_SIMD_CACHE),
190 		.num_cu_shared = 1,
191 	},
192 	{
193 		/* Scalar L1 Instruction Cache per SQC */
194 		.cache_size = 32,
195 		.cache_level = 1,
196 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
197 				CRAT_CACHE_FLAGS_INST_CACHE |
198 				CRAT_CACHE_FLAGS_SIMD_CACHE),
199 		.num_cu_shared = 3,
200 	},
201 	{
202 		/* Scalar L1 Data Cache per SQC */
203 		.cache_size = 16,
204 		.cache_level = 1,
205 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
206 				CRAT_CACHE_FLAGS_DATA_CACHE |
207 				CRAT_CACHE_FLAGS_SIMD_CACHE),
208 		.num_cu_shared = 3,
209 	},
210 	{
211 		/* L2 Data Cache per GPU (Total Tex Cache) */
212 		.cache_size = 1024,
213 		.cache_level = 2,
214 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
215 				CRAT_CACHE_FLAGS_DATA_CACHE |
216 				CRAT_CACHE_FLAGS_SIMD_CACHE),
217 		.num_cu_shared = 11,
218 	},
219 };
220 
221 static struct kfd_gpu_cache_info renoir_cache_info[] = {
222 	{
223 		/* TCP L1 Cache per CU */
224 		.cache_size = 16,
225 		.cache_level = 1,
226 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
227 				CRAT_CACHE_FLAGS_DATA_CACHE |
228 				CRAT_CACHE_FLAGS_SIMD_CACHE),
229 		.num_cu_shared = 1,
230 	},
231 	{
232 		/* Scalar L1 Instruction Cache per SQC */
233 		.cache_size = 32,
234 		.cache_level = 1,
235 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
236 				CRAT_CACHE_FLAGS_INST_CACHE |
237 				CRAT_CACHE_FLAGS_SIMD_CACHE),
238 		.num_cu_shared = 3,
239 	},
240 	{
241 		/* Scalar L1 Data Cache per SQC */
242 		.cache_size = 16,
243 		.cache_level = 1,
244 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
245 				CRAT_CACHE_FLAGS_DATA_CACHE |
246 				CRAT_CACHE_FLAGS_SIMD_CACHE),
247 		.num_cu_shared = 3,
248 	},
249 	{
250 		/* L2 Data Cache per GPU (Total Tex Cache) */
251 		.cache_size = 1024,
252 		.cache_level = 2,
253 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
254 				CRAT_CACHE_FLAGS_DATA_CACHE |
255 				CRAT_CACHE_FLAGS_SIMD_CACHE),
256 		.num_cu_shared = 8,
257 	},
258 };
259 
260 static struct kfd_gpu_cache_info vega12_cache_info[] = {
261 	{
262 		/* TCP L1 Cache per CU */
263 		.cache_size = 16,
264 		.cache_level = 1,
265 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
266 				CRAT_CACHE_FLAGS_DATA_CACHE |
267 				CRAT_CACHE_FLAGS_SIMD_CACHE),
268 		.num_cu_shared = 1,
269 	},
270 	{
271 		/* Scalar L1 Instruction Cache per SQC */
272 		.cache_size = 32,
273 		.cache_level = 1,
274 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
275 				CRAT_CACHE_FLAGS_INST_CACHE |
276 				CRAT_CACHE_FLAGS_SIMD_CACHE),
277 		.num_cu_shared = 3,
278 	},
279 	{
280 		/* Scalar L1 Data Cache per SQC */
281 		.cache_size = 16,
282 		.cache_level = 1,
283 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
284 				CRAT_CACHE_FLAGS_DATA_CACHE |
285 				CRAT_CACHE_FLAGS_SIMD_CACHE),
286 		.num_cu_shared = 3,
287 	},
288 	{
289 		/* L2 Data Cache per GPU (Total Tex Cache) */
290 		.cache_size = 2048,
291 		.cache_level = 2,
292 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
293 				CRAT_CACHE_FLAGS_DATA_CACHE |
294 				CRAT_CACHE_FLAGS_SIMD_CACHE),
295 		.num_cu_shared = 5,
296 	},
297 };
298 
299 static struct kfd_gpu_cache_info vega20_cache_info[] = {
300 	{
301 		/* TCP L1 Cache per CU */
302 		.cache_size = 16,
303 		.cache_level = 1,
304 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
305 				CRAT_CACHE_FLAGS_DATA_CACHE |
306 				CRAT_CACHE_FLAGS_SIMD_CACHE),
307 		.num_cu_shared = 1,
308 	},
309 	{
310 		/* Scalar L1 Instruction Cache per SQC */
311 		.cache_size = 32,
312 		.cache_level = 1,
313 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
314 				CRAT_CACHE_FLAGS_INST_CACHE |
315 				CRAT_CACHE_FLAGS_SIMD_CACHE),
316 		.num_cu_shared = 3,
317 	},
318 	{
319 		/* Scalar L1 Data Cache per SQC */
320 		.cache_size = 16,
321 		.cache_level = 1,
322 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
323 				CRAT_CACHE_FLAGS_DATA_CACHE |
324 				CRAT_CACHE_FLAGS_SIMD_CACHE),
325 		.num_cu_shared = 3,
326 	},
327 	{
328 		/* L2 Data Cache per GPU (Total Tex Cache) */
329 		.cache_size = 8192,
330 		.cache_level = 2,
331 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
332 				CRAT_CACHE_FLAGS_DATA_CACHE |
333 				CRAT_CACHE_FLAGS_SIMD_CACHE),
334 		.num_cu_shared = 16,
335 	},
336 };
337 
338 static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
339 	{
340 		/* TCP L1 Cache per CU */
341 		.cache_size = 16,
342 		.cache_level = 1,
343 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
344 				CRAT_CACHE_FLAGS_DATA_CACHE |
345 				CRAT_CACHE_FLAGS_SIMD_CACHE),
346 		.num_cu_shared = 1,
347 	},
348 	{
349 		/* Scalar L1 Instruction Cache per SQC */
350 		.cache_size = 32,
351 		.cache_level = 1,
352 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
353 				CRAT_CACHE_FLAGS_INST_CACHE |
354 				CRAT_CACHE_FLAGS_SIMD_CACHE),
355 		.num_cu_shared = 2,
356 	},
357 	{
358 		/* Scalar L1 Data Cache per SQC */
359 		.cache_size = 16,
360 		.cache_level = 1,
361 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
362 				CRAT_CACHE_FLAGS_DATA_CACHE |
363 				CRAT_CACHE_FLAGS_SIMD_CACHE),
364 		.num_cu_shared = 2,
365 	},
366 	{
367 		/* L2 Data Cache per GPU (Total Tex Cache) */
368 		.cache_size = 8192,
369 		.cache_level = 2,
370 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
371 				CRAT_CACHE_FLAGS_DATA_CACHE |
372 				CRAT_CACHE_FLAGS_SIMD_CACHE),
373 		.num_cu_shared = 14,
374 	},
375 };
376 
377 static struct kfd_gpu_cache_info navi10_cache_info[] = {
378 	{
379 		/* TCP L1 Cache per CU */
380 		.cache_size = 16,
381 		.cache_level = 1,
382 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
383 				CRAT_CACHE_FLAGS_DATA_CACHE |
384 				CRAT_CACHE_FLAGS_SIMD_CACHE),
385 		.num_cu_shared = 1,
386 	},
387 	{
388 		/* Scalar L1 Instruction Cache per SQC */
389 		.cache_size = 32,
390 		.cache_level = 1,
391 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
392 				CRAT_CACHE_FLAGS_INST_CACHE |
393 				CRAT_CACHE_FLAGS_SIMD_CACHE),
394 		.num_cu_shared = 2,
395 	},
396 	{
397 		/* Scalar L1 Data Cache per SQC */
398 		.cache_size = 16,
399 		.cache_level = 1,
400 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
401 				CRAT_CACHE_FLAGS_DATA_CACHE |
402 				CRAT_CACHE_FLAGS_SIMD_CACHE),
403 		.num_cu_shared = 2,
404 	},
405 	{
406 		/* GL1 Data Cache per SA */
407 		.cache_size = 128,
408 		.cache_level = 1,
409 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
410 				CRAT_CACHE_FLAGS_DATA_CACHE |
411 				CRAT_CACHE_FLAGS_SIMD_CACHE),
412 		.num_cu_shared = 10,
413 	},
414 	{
415 		/* L2 Data Cache per GPU (Total Tex Cache) */
416 		.cache_size = 4096,
417 		.cache_level = 2,
418 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
419 				CRAT_CACHE_FLAGS_DATA_CACHE |
420 				CRAT_CACHE_FLAGS_SIMD_CACHE),
421 		.num_cu_shared = 10,
422 	},
423 };
424 
425 static struct kfd_gpu_cache_info vangogh_cache_info[] = {
426 	{
427 		/* TCP L1 Cache per CU */
428 		.cache_size = 16,
429 		.cache_level = 1,
430 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
431 				CRAT_CACHE_FLAGS_DATA_CACHE |
432 				CRAT_CACHE_FLAGS_SIMD_CACHE),
433 		.num_cu_shared = 1,
434 	},
435 	{
436 		/* Scalar L1 Instruction Cache per SQC */
437 		.cache_size = 32,
438 		.cache_level = 1,
439 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
440 				CRAT_CACHE_FLAGS_INST_CACHE |
441 				CRAT_CACHE_FLAGS_SIMD_CACHE),
442 		.num_cu_shared = 2,
443 	},
444 	{
445 		/* Scalar L1 Data Cache per SQC */
446 		.cache_size = 16,
447 		.cache_level = 1,
448 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
449 				CRAT_CACHE_FLAGS_DATA_CACHE |
450 				CRAT_CACHE_FLAGS_SIMD_CACHE),
451 		.num_cu_shared = 2,
452 	},
453 	{
454 		/* GL1 Data Cache per SA */
455 		.cache_size = 128,
456 		.cache_level = 1,
457 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
458 				CRAT_CACHE_FLAGS_DATA_CACHE |
459 				CRAT_CACHE_FLAGS_SIMD_CACHE),
460 		.num_cu_shared = 8,
461 	},
462 	{
463 		/* L2 Data Cache per GPU (Total Tex Cache) */
464 		.cache_size = 1024,
465 		.cache_level = 2,
466 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
467 				CRAT_CACHE_FLAGS_DATA_CACHE |
468 				CRAT_CACHE_FLAGS_SIMD_CACHE),
469 		.num_cu_shared = 8,
470 	},
471 };
472 
473 static struct kfd_gpu_cache_info navi14_cache_info[] = {
474 	{
475 		/* TCP L1 Cache per CU */
476 		.cache_size = 16,
477 		.cache_level = 1,
478 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
479 				CRAT_CACHE_FLAGS_DATA_CACHE |
480 				CRAT_CACHE_FLAGS_SIMD_CACHE),
481 		.num_cu_shared = 1,
482 	},
483 	{
484 		/* Scalar L1 Instruction Cache per SQC */
485 		.cache_size = 32,
486 		.cache_level = 1,
487 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
488 				CRAT_CACHE_FLAGS_INST_CACHE |
489 				CRAT_CACHE_FLAGS_SIMD_CACHE),
490 		.num_cu_shared = 2,
491 	},
492 	{
493 		/* Scalar L1 Data Cache per SQC */
494 		.cache_size = 16,
495 		.cache_level = 1,
496 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
497 				CRAT_CACHE_FLAGS_DATA_CACHE |
498 				CRAT_CACHE_FLAGS_SIMD_CACHE),
499 		.num_cu_shared = 2,
500 	},
501 	{
502 		/* GL1 Data Cache per SA */
503 		.cache_size = 128,
504 		.cache_level = 1,
505 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
506 				CRAT_CACHE_FLAGS_DATA_CACHE |
507 				CRAT_CACHE_FLAGS_SIMD_CACHE),
508 		.num_cu_shared = 12,
509 	},
510 	{
511 		/* L2 Data Cache per GPU (Total Tex Cache) */
512 		.cache_size = 2048,
513 		.cache_level = 2,
514 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
515 				CRAT_CACHE_FLAGS_DATA_CACHE |
516 				CRAT_CACHE_FLAGS_SIMD_CACHE),
517 		.num_cu_shared = 12,
518 	},
519 };
520 
521 static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
522 	{
523 		/* TCP L1 Cache per CU */
524 		.cache_size = 16,
525 		.cache_level = 1,
526 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
527 				CRAT_CACHE_FLAGS_DATA_CACHE |
528 				CRAT_CACHE_FLAGS_SIMD_CACHE),
529 		.num_cu_shared = 1,
530 	},
531 	{
532 		/* Scalar L1 Instruction Cache per SQC */
533 		.cache_size = 32,
534 		.cache_level = 1,
535 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
536 				CRAT_CACHE_FLAGS_INST_CACHE |
537 				CRAT_CACHE_FLAGS_SIMD_CACHE),
538 		.num_cu_shared = 2,
539 	},
540 	{
541 		/* Scalar L1 Data Cache per SQC */
542 		.cache_size = 16,
543 		.cache_level = 1,
544 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
545 				CRAT_CACHE_FLAGS_DATA_CACHE |
546 				CRAT_CACHE_FLAGS_SIMD_CACHE),
547 		.num_cu_shared = 2,
548 	},
549 	{
550 		/* GL1 Data Cache per SA */
551 		.cache_size = 128,
552 		.cache_level = 1,
553 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
554 				CRAT_CACHE_FLAGS_DATA_CACHE |
555 				CRAT_CACHE_FLAGS_SIMD_CACHE),
556 		.num_cu_shared = 10,
557 	},
558 	{
559 		/* L2 Data Cache per GPU (Total Tex Cache) */
560 		.cache_size = 4096,
561 		.cache_level = 2,
562 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
563 				CRAT_CACHE_FLAGS_DATA_CACHE |
564 				CRAT_CACHE_FLAGS_SIMD_CACHE),
565 		.num_cu_shared = 10,
566 	},
567 	{
568 		/* L3 Data Cache per GPU */
569 		.cache_size = 128*1024,
570 		.cache_level = 3,
571 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
572 				CRAT_CACHE_FLAGS_DATA_CACHE |
573 				CRAT_CACHE_FLAGS_SIMD_CACHE),
574 		.num_cu_shared = 10,
575 	},
576 };
577 
578 static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
579 	{
580 		/* TCP L1 Cache per CU */
581 		.cache_size = 16,
582 		.cache_level = 1,
583 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
584 				CRAT_CACHE_FLAGS_DATA_CACHE |
585 				CRAT_CACHE_FLAGS_SIMD_CACHE),
586 		.num_cu_shared = 1,
587 	},
588 	{
589 		/* Scalar L1 Instruction Cache per SQC */
590 		.cache_size = 32,
591 		.cache_level = 1,
592 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
593 				CRAT_CACHE_FLAGS_INST_CACHE |
594 				CRAT_CACHE_FLAGS_SIMD_CACHE),
595 		.num_cu_shared = 2,
596 	},
597 	{
598 		/* Scalar L1 Data Cache per SQC */
599 		.cache_size = 16,
600 		.cache_level = 1,
601 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
602 				CRAT_CACHE_FLAGS_DATA_CACHE |
603 				CRAT_CACHE_FLAGS_SIMD_CACHE),
604 		.num_cu_shared = 2,
605 	},
606 	{
607 		/* GL1 Data Cache per SA */
608 		.cache_size = 128,
609 		.cache_level = 1,
610 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
611 				CRAT_CACHE_FLAGS_DATA_CACHE |
612 				CRAT_CACHE_FLAGS_SIMD_CACHE),
613 		.num_cu_shared = 10,
614 	},
615 	{
616 		/* L2 Data Cache per GPU (Total Tex Cache) */
617 		.cache_size = 3072,
618 		.cache_level = 2,
619 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
620 				CRAT_CACHE_FLAGS_DATA_CACHE |
621 				CRAT_CACHE_FLAGS_SIMD_CACHE),
622 		.num_cu_shared = 10,
623 	},
624 	{
625 		/* L3 Data Cache per GPU */
626 		.cache_size = 96*1024,
627 		.cache_level = 3,
628 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
629 				CRAT_CACHE_FLAGS_DATA_CACHE |
630 				CRAT_CACHE_FLAGS_SIMD_CACHE),
631 		.num_cu_shared = 10,
632 	},
633 };
634 
635 static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
636 	{
637 		/* TCP L1 Cache per CU */
638 		.cache_size = 16,
639 		.cache_level = 1,
640 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
641 				CRAT_CACHE_FLAGS_DATA_CACHE |
642 				CRAT_CACHE_FLAGS_SIMD_CACHE),
643 		.num_cu_shared = 1,
644 	},
645 	{
646 		/* Scalar L1 Instruction Cache per SQC */
647 		.cache_size = 32,
648 		.cache_level = 1,
649 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
650 				CRAT_CACHE_FLAGS_INST_CACHE |
651 				CRAT_CACHE_FLAGS_SIMD_CACHE),
652 		.num_cu_shared = 2,
653 	},
654 	{
655 		/* Scalar L1 Data Cache per SQC */
656 		.cache_size = 16,
657 		.cache_level = 1,
658 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
659 				CRAT_CACHE_FLAGS_DATA_CACHE |
660 				CRAT_CACHE_FLAGS_SIMD_CACHE),
661 		.num_cu_shared = 2,
662 	},
663 	{
664 		/* GL1 Data Cache per SA */
665 		.cache_size = 128,
666 		.cache_level = 1,
667 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
668 				CRAT_CACHE_FLAGS_DATA_CACHE |
669 				CRAT_CACHE_FLAGS_SIMD_CACHE),
670 		.num_cu_shared = 8,
671 	},
672 	{
673 		/* L2 Data Cache per GPU (Total Tex Cache) */
674 		.cache_size = 2048,
675 		.cache_level = 2,
676 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
677 				CRAT_CACHE_FLAGS_DATA_CACHE |
678 				CRAT_CACHE_FLAGS_SIMD_CACHE),
679 		.num_cu_shared = 8,
680 	},
681 	{
682 		/* L3 Data Cache per GPU */
683 		.cache_size = 32*1024,
684 		.cache_level = 3,
685 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
686 				CRAT_CACHE_FLAGS_DATA_CACHE |
687 				CRAT_CACHE_FLAGS_SIMD_CACHE),
688 		.num_cu_shared = 8,
689 	},
690 };
691 
692 static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
693 	{
694 		/* TCP L1 Cache per CU */
695 		.cache_size = 16,
696 		.cache_level = 1,
697 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
698 				CRAT_CACHE_FLAGS_DATA_CACHE |
699 				CRAT_CACHE_FLAGS_SIMD_CACHE),
700 		.num_cu_shared = 1,
701 	},
702 	{
703 		/* Scalar L1 Instruction Cache per SQC */
704 		.cache_size = 32,
705 		.cache_level = 1,
706 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
707 				CRAT_CACHE_FLAGS_INST_CACHE |
708 				CRAT_CACHE_FLAGS_SIMD_CACHE),
709 		.num_cu_shared = 2,
710 	},
711 	{
712 		/* Scalar L1 Data Cache per SQC */
713 		.cache_size = 16,
714 		.cache_level = 1,
715 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
716 				CRAT_CACHE_FLAGS_DATA_CACHE |
717 				CRAT_CACHE_FLAGS_SIMD_CACHE),
718 		.num_cu_shared = 2,
719 	},
720 	{
721 		/* GL1 Data Cache per SA */
722 		.cache_size = 128,
723 		.cache_level = 1,
724 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
725 				CRAT_CACHE_FLAGS_DATA_CACHE |
726 				CRAT_CACHE_FLAGS_SIMD_CACHE),
727 		.num_cu_shared = 8,
728 	},
729 	{
730 		/* L2 Data Cache per GPU (Total Tex Cache) */
731 		.cache_size = 1024,
732 		.cache_level = 2,
733 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
734 				CRAT_CACHE_FLAGS_DATA_CACHE |
735 				CRAT_CACHE_FLAGS_SIMD_CACHE),
736 		.num_cu_shared = 8,
737 	},
738 	{
739 		/* L3 Data Cache per GPU */
740 		.cache_size = 16*1024,
741 		.cache_level = 3,
742 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
743 				CRAT_CACHE_FLAGS_DATA_CACHE |
744 				CRAT_CACHE_FLAGS_SIMD_CACHE),
745 		.num_cu_shared = 8,
746 	},
747 };
748 
749 static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
750 	{
751 		/* TCP L1 Cache per CU */
752 		.cache_size = 16,
753 		.cache_level = 1,
754 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
755 				CRAT_CACHE_FLAGS_DATA_CACHE |
756 				CRAT_CACHE_FLAGS_SIMD_CACHE),
757 		.num_cu_shared = 1,
758 	},
759 	{
760 		/* Scalar L1 Instruction Cache per SQC */
761 		.cache_size = 32,
762 		.cache_level = 1,
763 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
764 				CRAT_CACHE_FLAGS_INST_CACHE |
765 				CRAT_CACHE_FLAGS_SIMD_CACHE),
766 		.num_cu_shared = 2,
767 	},
768 	{
769 		/* Scalar L1 Data Cache per SQC */
770 		.cache_size = 16,
771 		.cache_level = 1,
772 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
773 				CRAT_CACHE_FLAGS_DATA_CACHE |
774 				CRAT_CACHE_FLAGS_SIMD_CACHE),
775 		.num_cu_shared = 2,
776 	},
777 	{
778 		/* GL1 Data Cache per SA */
779 		.cache_size = 128,
780 		.cache_level = 1,
781 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
782 				CRAT_CACHE_FLAGS_DATA_CACHE |
783 				CRAT_CACHE_FLAGS_SIMD_CACHE),
784 		.num_cu_shared = 6,
785 	},
786 	{
787 		/* L2 Data Cache per GPU (Total Tex Cache) */
788 		.cache_size = 2048,
789 		.cache_level = 2,
790 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
791 				CRAT_CACHE_FLAGS_DATA_CACHE |
792 				CRAT_CACHE_FLAGS_SIMD_CACHE),
793 		.num_cu_shared = 6,
794 	},
795 };
796 
797 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
798 		struct crat_subtype_computeunit *cu)
799 {
800 	dev->node_props.cpu_cores_count = cu->num_cpu_cores;
801 	dev->node_props.cpu_core_id_base = cu->processor_id_low;
802 	if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
803 		dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
804 
805 	pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
806 			cu->processor_id_low);
807 }
808 
809 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
810 		struct crat_subtype_computeunit *cu)
811 {
812 	dev->node_props.simd_id_base = cu->processor_id_low;
813 	dev->node_props.simd_count = cu->num_simd_cores;
814 	dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
815 	dev->node_props.max_waves_per_simd = cu->max_waves_simd;
816 	dev->node_props.wave_front_size = cu->wave_front_size;
817 	dev->node_props.array_count = cu->array_count;
818 	dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
819 	dev->node_props.simd_per_cu = cu->num_simd_per_cu;
820 	dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
821 	if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
822 		dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
823 	pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
824 }
825 
826 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
827  * topology device present in the device_list
828  */
829 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
830 				struct list_head *device_list)
831 {
832 	struct kfd_topology_device *dev;
833 
834 	pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
835 			cu->proximity_domain, cu->hsa_capability);
836 	list_for_each_entry(dev, device_list, list) {
837 		if (cu->proximity_domain == dev->proximity_domain) {
838 			if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
839 				kfd_populated_cu_info_cpu(dev, cu);
840 
841 			if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
842 				kfd_populated_cu_info_gpu(dev, cu);
843 			break;
844 		}
845 	}
846 
847 	return 0;
848 }
849 
850 static struct kfd_mem_properties *
851 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
852 		struct kfd_topology_device *dev)
853 {
854 	struct kfd_mem_properties *props;
855 
856 	list_for_each_entry(props, &dev->mem_props, list) {
857 		if (props->heap_type == heap_type
858 				&& props->flags == flags
859 				&& props->width == width)
860 			return props;
861 	}
862 
863 	return NULL;
864 }
865 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
866  * topology device present in the device_list
867  */
868 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
869 				struct list_head *device_list)
870 {
871 	struct kfd_mem_properties *props;
872 	struct kfd_topology_device *dev;
873 	uint32_t heap_type;
874 	uint64_t size_in_bytes;
875 	uint32_t flags = 0;
876 	uint32_t width;
877 
878 	pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
879 			mem->proximity_domain);
880 	list_for_each_entry(dev, device_list, list) {
881 		if (mem->proximity_domain == dev->proximity_domain) {
882 			/* We're on GPU node */
883 			if (dev->node_props.cpu_cores_count == 0) {
884 				/* APU */
885 				if (mem->visibility_type == 0)
886 					heap_type =
887 						HSA_MEM_HEAP_TYPE_FB_PRIVATE;
888 				/* dGPU */
889 				else
890 					heap_type = mem->visibility_type;
891 			} else
892 				heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
893 
894 			if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
895 				flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
896 			if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
897 				flags |= HSA_MEM_FLAGS_NON_VOLATILE;
898 
899 			size_in_bytes =
900 				((uint64_t)mem->length_high << 32) +
901 							mem->length_low;
902 			width = mem->width;
903 
904 			/* Multiple banks of the same type are aggregated into
905 			 * one. User mode doesn't care about multiple physical
906 			 * memory segments. It's managed as a single virtual
907 			 * heap for user mode.
908 			 */
909 			props = find_subtype_mem(heap_type, flags, width, dev);
910 			if (props) {
911 				props->size_in_bytes += size_in_bytes;
912 				break;
913 			}
914 
915 			props = kfd_alloc_struct(props);
916 			if (!props)
917 				return -ENOMEM;
918 
919 			props->heap_type = heap_type;
920 			props->flags = flags;
921 			props->size_in_bytes = size_in_bytes;
922 			props->width = width;
923 
924 			dev->node_props.mem_banks_count++;
925 			list_add_tail(&props->list, &dev->mem_props);
926 
927 			break;
928 		}
929 	}
930 
931 	return 0;
932 }
933 
934 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
935  * topology device present in the device_list
936  */
937 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
938 			struct list_head *device_list)
939 {
940 	struct kfd_cache_properties *props;
941 	struct kfd_topology_device *dev;
942 	uint32_t id;
943 	uint32_t total_num_of_cu;
944 
945 	id = cache->processor_id_low;
946 
947 	pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
948 	list_for_each_entry(dev, device_list, list) {
949 		total_num_of_cu = (dev->node_props.array_count *
950 					dev->node_props.cu_per_simd_array);
951 
952 		/* Cache infomration in CRAT doesn't have proximity_domain
953 		 * information as it is associated with a CPU core or GPU
954 		 * Compute Unit. So map the cache using CPU core Id or SIMD
955 		 * (GPU) ID.
956 		 * TODO: This works because currently we can safely assume that
957 		 *  Compute Units are parsed before caches are parsed. In
958 		 *  future, remove this dependency
959 		 */
960 		if ((id >= dev->node_props.cpu_core_id_base &&
961 			id <= dev->node_props.cpu_core_id_base +
962 				dev->node_props.cpu_cores_count) ||
963 			(id >= dev->node_props.simd_id_base &&
964 			id < dev->node_props.simd_id_base +
965 				total_num_of_cu)) {
966 			props = kfd_alloc_struct(props);
967 			if (!props)
968 				return -ENOMEM;
969 
970 			props->processor_id_low = id;
971 			props->cache_level = cache->cache_level;
972 			props->cache_size = cache->cache_size;
973 			props->cacheline_size = cache->cache_line_size;
974 			props->cachelines_per_tag = cache->lines_per_tag;
975 			props->cache_assoc = cache->associativity;
976 			props->cache_latency = cache->cache_latency;
977 			memcpy(props->sibling_map, cache->sibling_map,
978 					sizeof(props->sibling_map));
979 
980 			if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
981 				props->cache_type |= HSA_CACHE_TYPE_DATA;
982 			if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
983 				props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
984 			if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
985 				props->cache_type |= HSA_CACHE_TYPE_CPU;
986 			if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
987 				props->cache_type |= HSA_CACHE_TYPE_HSACU;
988 
989 			dev->cache_count++;
990 			dev->node_props.caches_count++;
991 			list_add_tail(&props->list, &dev->cache_props);
992 
993 			break;
994 		}
995 	}
996 
997 	return 0;
998 }
999 
1000 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
1001  * topology device present in the device_list
1002  */
1003 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
1004 					struct list_head *device_list)
1005 {
1006 	struct kfd_iolink_properties *props = NULL, *props2;
1007 	struct kfd_topology_device *dev, *to_dev;
1008 	uint32_t id_from;
1009 	uint32_t id_to;
1010 
1011 	id_from = iolink->proximity_domain_from;
1012 	id_to = iolink->proximity_domain_to;
1013 
1014 	pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
1015 			id_from, id_to);
1016 	list_for_each_entry(dev, device_list, list) {
1017 		if (id_from == dev->proximity_domain) {
1018 			props = kfd_alloc_struct(props);
1019 			if (!props)
1020 				return -ENOMEM;
1021 
1022 			props->node_from = id_from;
1023 			props->node_to = id_to;
1024 			props->ver_maj = iolink->version_major;
1025 			props->ver_min = iolink->version_minor;
1026 			props->iolink_type = iolink->io_interface_type;
1027 
1028 			if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1029 				props->weight = 20;
1030 			else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1031 				props->weight = 15 * iolink->num_hops_xgmi;
1032 			else
1033 				props->weight = node_distance(id_from, id_to);
1034 
1035 			props->min_latency = iolink->minimum_latency;
1036 			props->max_latency = iolink->maximum_latency;
1037 			props->min_bandwidth = iolink->minimum_bandwidth_mbs;
1038 			props->max_bandwidth = iolink->maximum_bandwidth_mbs;
1039 			props->rec_transfer_size =
1040 					iolink->recommended_transfer_size;
1041 
1042 			dev->io_link_count++;
1043 			dev->node_props.io_links_count++;
1044 			list_add_tail(&props->list, &dev->io_link_props);
1045 			break;
1046 		}
1047 	}
1048 
1049 	/* CPU topology is created before GPUs are detected, so CPU->GPU
1050 	 * links are not built at that time. If a PCIe type is discovered, it
1051 	 * means a GPU is detected and we are adding GPU->CPU to the topology.
1052 	 * At this time, also add the corresponded CPU->GPU link if GPU
1053 	 * is large bar.
1054 	 * For xGMI, we only added the link with one direction in the crat
1055 	 * table, add corresponded reversed direction link now.
1056 	 */
1057 	if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
1058 		to_dev = kfd_topology_device_by_proximity_domain(id_to);
1059 		if (!to_dev)
1060 			return -ENODEV;
1061 		/* same everything but the other direction */
1062 		props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
1063 		if (!props2)
1064 			return -ENOMEM;
1065 
1066 		props2->node_from = id_to;
1067 		props2->node_to = id_from;
1068 		props2->kobj = NULL;
1069 		to_dev->io_link_count++;
1070 		to_dev->node_props.io_links_count++;
1071 		list_add_tail(&props2->list, &to_dev->io_link_props);
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
1078  * present in the device_list
1079  *	@sub_type_hdr - subtype section of crat_image
1080  *	@device_list - list of topology devices present in this crat_image
1081  */
1082 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
1083 				struct list_head *device_list)
1084 {
1085 	struct crat_subtype_computeunit *cu;
1086 	struct crat_subtype_memory *mem;
1087 	struct crat_subtype_cache *cache;
1088 	struct crat_subtype_iolink *iolink;
1089 	int ret = 0;
1090 
1091 	switch (sub_type_hdr->type) {
1092 	case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
1093 		cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1094 		ret = kfd_parse_subtype_cu(cu, device_list);
1095 		break;
1096 	case CRAT_SUBTYPE_MEMORY_AFFINITY:
1097 		mem = (struct crat_subtype_memory *)sub_type_hdr;
1098 		ret = kfd_parse_subtype_mem(mem, device_list);
1099 		break;
1100 	case CRAT_SUBTYPE_CACHE_AFFINITY:
1101 		cache = (struct crat_subtype_cache *)sub_type_hdr;
1102 		ret = kfd_parse_subtype_cache(cache, device_list);
1103 		break;
1104 	case CRAT_SUBTYPE_TLB_AFFINITY:
1105 		/*
1106 		 * For now, nothing to do here
1107 		 */
1108 		pr_debug("Found TLB entry in CRAT table (not processing)\n");
1109 		break;
1110 	case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
1111 		/*
1112 		 * For now, nothing to do here
1113 		 */
1114 		pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
1115 		break;
1116 	case CRAT_SUBTYPE_IOLINK_AFFINITY:
1117 		iolink = (struct crat_subtype_iolink *)sub_type_hdr;
1118 		ret = kfd_parse_subtype_iolink(iolink, device_list);
1119 		break;
1120 	default:
1121 		pr_warn("Unknown subtype %d in CRAT\n",
1122 				sub_type_hdr->type);
1123 	}
1124 
1125 	return ret;
1126 }
1127 
1128 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
1129  * create a kfd_topology_device and add in to device_list. Also parse
1130  * CRAT subtypes and attach it to appropriate kfd_topology_device
1131  *	@crat_image - input image containing CRAT
1132  *	@device_list - [OUT] list of kfd_topology_device generated after
1133  *		       parsing crat_image
1134  *	@proximity_domain - Proximity domain of the first device in the table
1135  *
1136  *	Return - 0 if successful else -ve value
1137  */
1138 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
1139 			 uint32_t proximity_domain)
1140 {
1141 	struct kfd_topology_device *top_dev = NULL;
1142 	struct crat_subtype_generic *sub_type_hdr;
1143 	uint16_t node_id;
1144 	int ret = 0;
1145 	struct crat_header *crat_table = (struct crat_header *)crat_image;
1146 	uint16_t num_nodes;
1147 	uint32_t image_len;
1148 
1149 	if (!crat_image)
1150 		return -EINVAL;
1151 
1152 	if (!list_empty(device_list)) {
1153 		pr_warn("Error device list should be empty\n");
1154 		return -EINVAL;
1155 	}
1156 
1157 	num_nodes = crat_table->num_domains;
1158 	image_len = crat_table->length;
1159 
1160 	pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
1161 
1162 	for (node_id = 0; node_id < num_nodes; node_id++) {
1163 		top_dev = kfd_create_topology_device(device_list);
1164 		if (!top_dev)
1165 			break;
1166 		top_dev->proximity_domain = proximity_domain++;
1167 	}
1168 
1169 	if (!top_dev) {
1170 		ret = -ENOMEM;
1171 		goto err;
1172 	}
1173 
1174 	memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
1175 	memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
1176 			CRAT_OEMTABLEID_LENGTH);
1177 	top_dev->oem_revision = crat_table->oem_revision;
1178 
1179 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1180 	while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
1181 			((char *)crat_image) + image_len) {
1182 		if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
1183 			ret = kfd_parse_subtype(sub_type_hdr, device_list);
1184 			if (ret)
1185 				break;
1186 		}
1187 
1188 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1189 				sub_type_hdr->length);
1190 	}
1191 
1192 err:
1193 	if (ret)
1194 		kfd_release_topology_device_list(device_list);
1195 
1196 	return ret;
1197 }
1198 
1199 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1200 static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
1201 				struct kfd_gpu_cache_info *pcache_info,
1202 				struct kfd_cu_info *cu_info,
1203 				int mem_available,
1204 				int cu_bitmask,
1205 				int cache_type, unsigned int cu_processor_id,
1206 				int cu_block)
1207 {
1208 	unsigned int cu_sibling_map_mask;
1209 	int first_active_cu;
1210 
1211 	/* First check if enough memory is available */
1212 	if (sizeof(struct crat_subtype_cache) > mem_available)
1213 		return -ENOMEM;
1214 
1215 	cu_sibling_map_mask = cu_bitmask;
1216 	cu_sibling_map_mask >>= cu_block;
1217 	cu_sibling_map_mask &=
1218 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
1219 	first_active_cu = ffs(cu_sibling_map_mask);
1220 
1221 	/* CU could be inactive. In case of shared cache find the first active
1222 	 * CU. and incase of non-shared cache check if the CU is inactive. If
1223 	 * inactive active skip it
1224 	 */
1225 	if (first_active_cu) {
1226 		memset(pcache, 0, sizeof(struct crat_subtype_cache));
1227 		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1228 		pcache->length = sizeof(struct crat_subtype_cache);
1229 		pcache->flags = pcache_info[cache_type].flags;
1230 		pcache->processor_id_low = cu_processor_id
1231 					 + (first_active_cu - 1);
1232 		pcache->cache_level = pcache_info[cache_type].cache_level;
1233 		pcache->cache_size = pcache_info[cache_type].cache_size;
1234 
1235 		/* Sibling map is w.r.t processor_id_low, so shift out
1236 		 * inactive CU
1237 		 */
1238 		cu_sibling_map_mask =
1239 			cu_sibling_map_mask >> (first_active_cu - 1);
1240 
1241 		pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1242 		pcache->sibling_map[1] =
1243 				(uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1244 		pcache->sibling_map[2] =
1245 				(uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1246 		pcache->sibling_map[3] =
1247 				(uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1248 		return 0;
1249 	}
1250 	return 1;
1251 }
1252 
1253 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1254 static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
1255 				struct kfd_gpu_cache_info *pcache_info,
1256 				struct kfd_cu_info *cu_info,
1257 				int mem_available,
1258 				int cache_type, unsigned int cu_processor_id)
1259 {
1260 	unsigned int cu_sibling_map_mask;
1261 	int first_active_cu;
1262 	int i, j, k;
1263 
1264 	/* First check if enough memory is available */
1265 	if (sizeof(struct crat_subtype_cache) > mem_available)
1266 		return -ENOMEM;
1267 
1268 	cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1269 	cu_sibling_map_mask &=
1270 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
1271 	first_active_cu = ffs(cu_sibling_map_mask);
1272 
1273 	/* CU could be inactive. In case of shared cache find the first active
1274 	 * CU. and incase of non-shared cache check if the CU is inactive. If
1275 	 * inactive active skip it
1276 	 */
1277 	if (first_active_cu) {
1278 		memset(pcache, 0, sizeof(struct crat_subtype_cache));
1279 		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1280 		pcache->length = sizeof(struct crat_subtype_cache);
1281 		pcache->flags = pcache_info[cache_type].flags;
1282 		pcache->processor_id_low = cu_processor_id
1283 					 + (first_active_cu - 1);
1284 		pcache->cache_level = pcache_info[cache_type].cache_level;
1285 		pcache->cache_size = pcache_info[cache_type].cache_size;
1286 
1287 		/* Sibling map is w.r.t processor_id_low, so shift out
1288 		 * inactive CU
1289 		 */
1290 		cu_sibling_map_mask =
1291 			cu_sibling_map_mask >> (first_active_cu - 1);
1292 		k = 0;
1293 		for (i = 0; i < cu_info->num_shader_engines; i++) {
1294 			for (j = 0; j < cu_info->num_shader_arrays_per_engine;
1295 				j++) {
1296 				pcache->sibling_map[k] =
1297 				 (uint8_t)(cu_sibling_map_mask & 0xFF);
1298 				pcache->sibling_map[k+1] =
1299 				 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1300 				pcache->sibling_map[k+2] =
1301 				 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1302 				pcache->sibling_map[k+3] =
1303 				 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1304 				k += 4;
1305 				cu_sibling_map_mask =
1306 					cu_info->cu_bitmap[i % 4][j + i / 4];
1307 				cu_sibling_map_mask &= (
1308 				 (1 << pcache_info[cache_type].num_cu_shared)
1309 				 - 1);
1310 			}
1311 		}
1312 		return 0;
1313 	}
1314 	return 1;
1315 }
1316 
1317 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
1318  * tables
1319  *
1320  *	@kdev - [IN] GPU device
1321  *	@gpu_processor_id - [IN] GPU processor ID to which these caches
1322  *			    associate
1323  *	@available_size - [IN] Amount of memory available in pcache
1324  *	@cu_info - [IN] Compute Unit info obtained from KGD
1325  *	@pcache - [OUT] memory into which cache data is to be filled in.
1326  *	@size_filled - [OUT] amount of data used up in pcache.
1327  *	@num_of_entries - [OUT] number of caches added
1328  */
1329 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
1330 			int gpu_processor_id,
1331 			int available_size,
1332 			struct kfd_cu_info *cu_info,
1333 			struct crat_subtype_cache *pcache,
1334 			int *size_filled,
1335 			int *num_of_entries)
1336 {
1337 	struct kfd_gpu_cache_info *pcache_info;
1338 	int num_of_cache_types = 0;
1339 	int i, j, k;
1340 	int ct = 0;
1341 	int mem_available = available_size;
1342 	unsigned int cu_processor_id;
1343 	int ret;
1344 	unsigned int num_cu_shared;
1345 
1346 	switch (kdev->adev->asic_type) {
1347 	case CHIP_KAVERI:
1348 		pcache_info = kaveri_cache_info;
1349 		num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
1350 		break;
1351 	case CHIP_HAWAII:
1352 		pcache_info = hawaii_cache_info;
1353 		num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
1354 		break;
1355 	case CHIP_CARRIZO:
1356 		pcache_info = carrizo_cache_info;
1357 		num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
1358 		break;
1359 	case CHIP_TONGA:
1360 		pcache_info = tonga_cache_info;
1361 		num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
1362 		break;
1363 	case CHIP_FIJI:
1364 		pcache_info = fiji_cache_info;
1365 		num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
1366 		break;
1367 	case CHIP_POLARIS10:
1368 		pcache_info = polaris10_cache_info;
1369 		num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
1370 		break;
1371 	case CHIP_POLARIS11:
1372 		pcache_info = polaris11_cache_info;
1373 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
1374 		break;
1375 	case CHIP_POLARIS12:
1376 		pcache_info = polaris12_cache_info;
1377 		num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
1378 		break;
1379 	case CHIP_VEGAM:
1380 		pcache_info = vegam_cache_info;
1381 		num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
1382 		break;
1383 	default:
1384 		switch(KFD_GC_VERSION(kdev)) {
1385 		case IP_VERSION(9, 0, 1):
1386 			pcache_info = vega10_cache_info;
1387 			num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
1388 			break;
1389 		case IP_VERSION(9, 2, 1):
1390 			pcache_info = vega12_cache_info;
1391 			num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
1392 			break;
1393 		case IP_VERSION(9, 4, 0):
1394 		case IP_VERSION(9, 4, 1):
1395 			pcache_info = vega20_cache_info;
1396 			num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
1397 			break;
1398 		case IP_VERSION(9, 4, 2):
1399 			pcache_info = aldebaran_cache_info;
1400 			num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
1401 			break;
1402 		case IP_VERSION(9, 1, 0):
1403 		case IP_VERSION(9, 2, 2):
1404 			pcache_info = raven_cache_info;
1405 			num_of_cache_types = ARRAY_SIZE(raven_cache_info);
1406 			break;
1407 		case IP_VERSION(9, 3, 0):
1408 			pcache_info = renoir_cache_info;
1409 			num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
1410 			break;
1411 		case IP_VERSION(10, 1, 10):
1412 		case IP_VERSION(10, 1, 2):
1413 		case IP_VERSION(10, 1, 3):
1414 			pcache_info = navi10_cache_info;
1415 			num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
1416 			break;
1417 		case IP_VERSION(10, 1, 1):
1418 			pcache_info = navi14_cache_info;
1419 			num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
1420 			break;
1421 		case IP_VERSION(10, 3, 0):
1422 			pcache_info = sienna_cichlid_cache_info;
1423 			num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
1424 			break;
1425 		case IP_VERSION(10, 3, 2):
1426 			pcache_info = navy_flounder_cache_info;
1427 			num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
1428 			break;
1429 		case IP_VERSION(10, 3, 4):
1430 			pcache_info = dimgrey_cavefish_cache_info;
1431 			num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
1432 			break;
1433 		case IP_VERSION(10, 3, 1):
1434 			pcache_info = vangogh_cache_info;
1435 			num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
1436 			break;
1437 		case IP_VERSION(10, 3, 5):
1438 			pcache_info = beige_goby_cache_info;
1439 			num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
1440 			break;
1441 		case IP_VERSION(10, 3, 3):
1442 			pcache_info = yellow_carp_cache_info;
1443 			num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
1444 			break;
1445 		default:
1446 			return -EINVAL;
1447 		}
1448 	}
1449 
1450 	*size_filled = 0;
1451 	*num_of_entries = 0;
1452 
1453 	/* For each type of cache listed in the kfd_gpu_cache_info table,
1454 	 * go through all available Compute Units.
1455 	 * The [i,j,k] loop will
1456 	 *		if kfd_gpu_cache_info.num_cu_shared = 1
1457 	 *			will parse through all available CU
1458 	 *		If (kfd_gpu_cache_info.num_cu_shared != 1)
1459 	 *			then it will consider only one CU from
1460 	 *			the shared unit
1461 	 */
1462 
1463 	for (ct = 0; ct < num_of_cache_types; ct++) {
1464 	  cu_processor_id = gpu_processor_id;
1465 	  if (pcache_info[ct].cache_level == 1) {
1466 	    for (i = 0; i < cu_info->num_shader_engines; i++) {
1467 	      for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1468 	        for (k = 0; k < cu_info->num_cu_per_sh;
1469 		  k += pcache_info[ct].num_cu_shared) {
1470 		  ret = fill_in_l1_pcache(pcache,
1471 					pcache_info,
1472 					cu_info,
1473 					mem_available,
1474 					cu_info->cu_bitmap[i % 4][j + i / 4],
1475 					ct,
1476 					cu_processor_id,
1477 					k);
1478 
1479 		  if (ret < 0)
1480 			break;
1481 
1482 		  if (!ret) {
1483 				pcache++;
1484 				(*num_of_entries)++;
1485 				mem_available -= sizeof(*pcache);
1486 				(*size_filled) += sizeof(*pcache);
1487 		  }
1488 
1489 		  /* Move to next CU block */
1490 		  num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1491 					cu_info->num_cu_per_sh) ?
1492 					pcache_info[ct].num_cu_shared :
1493 					(cu_info->num_cu_per_sh - k);
1494 		  cu_processor_id += num_cu_shared;
1495 		}
1496 	      }
1497 	    }
1498 	  } else {
1499 			ret = fill_in_l2_l3_pcache(pcache,
1500 				pcache_info,
1501 				cu_info,
1502 				mem_available,
1503 				ct,
1504 				cu_processor_id);
1505 
1506 			if (ret < 0)
1507 				break;
1508 
1509 			if (!ret) {
1510 				pcache++;
1511 				(*num_of_entries)++;
1512 				mem_available -= sizeof(*pcache);
1513 				(*size_filled) += sizeof(*pcache);
1514 			}
1515 	  }
1516 	}
1517 
1518 	pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
1519 
1520 	return 0;
1521 }
1522 
1523 static bool kfd_ignore_crat(void)
1524 {
1525 	bool ret;
1526 
1527 	if (ignore_crat)
1528 		return true;
1529 
1530 #ifndef KFD_SUPPORT_IOMMU_V2
1531 	ret = true;
1532 #else
1533 	ret = false;
1534 #endif
1535 
1536 	return ret;
1537 }
1538 
1539 /*
1540  * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
1541  * copies CRAT from ACPI (if available).
1542  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1543  *
1544  *	@crat_image: CRAT read from ACPI. If no CRAT in ACPI then
1545  *		     crat_image will be NULL
1546  *	@size: [OUT] size of crat_image
1547  *
1548  *	Return 0 if successful else return error code
1549  */
1550 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
1551 {
1552 	struct acpi_table_header *crat_table;
1553 	acpi_status status;
1554 	void *pcrat_image;
1555 	int rc = 0;
1556 
1557 	if (!crat_image)
1558 		return -EINVAL;
1559 
1560 	*crat_image = NULL;
1561 
1562 	if (kfd_ignore_crat()) {
1563 		pr_info("CRAT table disabled by module option\n");
1564 		return -ENODATA;
1565 	}
1566 
1567 	/* Fetch the CRAT table from ACPI */
1568 	status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
1569 	if (status == AE_NOT_FOUND) {
1570 		pr_warn("CRAT table not found\n");
1571 		return -ENODATA;
1572 	} else if (ACPI_FAILURE(status)) {
1573 		const char *err = acpi_format_exception(status);
1574 
1575 		pr_err("CRAT table error: %s\n", err);
1576 		return -EINVAL;
1577 	}
1578 
1579 	pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
1580 	if (!pcrat_image) {
1581 		rc = -ENOMEM;
1582 		goto out;
1583 	}
1584 
1585 	memcpy(pcrat_image, crat_table, crat_table->length);
1586 	*crat_image = pcrat_image;
1587 	*size = crat_table->length;
1588 out:
1589 	acpi_put_table(crat_table);
1590 	return rc;
1591 }
1592 
1593 /* Memory required to create Virtual CRAT.
1594  * Since there is no easy way to predict the amount of memory required, the
1595  * following amount is allocated for GPU Virtual CRAT. This is
1596  * expected to cover all known conditions. But to be safe additional check
1597  * is put in the code to ensure we don't overwrite.
1598  */
1599 #define VCRAT_SIZE_FOR_GPU	(4 * PAGE_SIZE)
1600 
1601 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
1602  *
1603  *	@numa_node_id: CPU NUMA node id
1604  *	@avail_size: Available size in the memory
1605  *	@sub_type_hdr: Memory into which compute info will be filled in
1606  *
1607  *	Return 0 if successful else return -ve value
1608  */
1609 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
1610 				int proximity_domain,
1611 				struct crat_subtype_computeunit *sub_type_hdr)
1612 {
1613 	const struct cpumask *cpumask;
1614 
1615 	*avail_size -= sizeof(struct crat_subtype_computeunit);
1616 	if (*avail_size < 0)
1617 		return -ENOMEM;
1618 
1619 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1620 
1621 	/* Fill in subtype header data */
1622 	sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1623 	sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1624 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1625 
1626 	cpumask = cpumask_of_node(numa_node_id);
1627 
1628 	/* Fill in CU data */
1629 	sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
1630 	sub_type_hdr->proximity_domain = proximity_domain;
1631 	sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
1632 	if (sub_type_hdr->processor_id_low == -1)
1633 		return -EINVAL;
1634 
1635 	sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
1636 
1637 	return 0;
1638 }
1639 
1640 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
1641  *
1642  *	@numa_node_id: CPU NUMA node id
1643  *	@avail_size: Available size in the memory
1644  *	@sub_type_hdr: Memory into which compute info will be filled in
1645  *
1646  *	Return 0 if successful else return -ve value
1647  */
1648 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
1649 			int proximity_domain,
1650 			struct crat_subtype_memory *sub_type_hdr)
1651 {
1652 	uint64_t mem_in_bytes = 0;
1653 	pg_data_t *pgdat;
1654 	int zone_type;
1655 
1656 	*avail_size -= sizeof(struct crat_subtype_memory);
1657 	if (*avail_size < 0)
1658 		return -ENOMEM;
1659 
1660 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1661 
1662 	/* Fill in subtype header data */
1663 	sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1664 	sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1665 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1666 
1667 	/* Fill in Memory Subunit data */
1668 
1669 	/* Unlike si_meminfo, si_meminfo_node is not exported. So
1670 	 * the following lines are duplicated from si_meminfo_node
1671 	 * function
1672 	 */
1673 	pgdat = NODE_DATA(numa_node_id);
1674 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1675 		mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
1676 	mem_in_bytes <<= PAGE_SHIFT;
1677 
1678 	sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
1679 	sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
1680 	sub_type_hdr->proximity_domain = proximity_domain;
1681 
1682 	return 0;
1683 }
1684 
1685 #ifdef CONFIG_X86_64
1686 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
1687 				uint32_t *num_entries,
1688 				struct crat_subtype_iolink *sub_type_hdr)
1689 {
1690 	int nid;
1691 	struct cpuinfo_x86 *c = &cpu_data(0);
1692 	uint8_t link_type;
1693 
1694 	if (c->x86_vendor == X86_VENDOR_AMD)
1695 		link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
1696 	else
1697 		link_type = CRAT_IOLINK_TYPE_QPI_1_1;
1698 
1699 	*num_entries = 0;
1700 
1701 	/* Create IO links from this node to other CPU nodes */
1702 	for_each_online_node(nid) {
1703 		if (nid == numa_node_id) /* node itself */
1704 			continue;
1705 
1706 		*avail_size -= sizeof(struct crat_subtype_iolink);
1707 		if (*avail_size < 0)
1708 			return -ENOMEM;
1709 
1710 		memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1711 
1712 		/* Fill in subtype header data */
1713 		sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1714 		sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1715 		sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1716 
1717 		/* Fill in IO link data */
1718 		sub_type_hdr->proximity_domain_from = numa_node_id;
1719 		sub_type_hdr->proximity_domain_to = nid;
1720 		sub_type_hdr->io_interface_type = link_type;
1721 
1722 		(*num_entries)++;
1723 		sub_type_hdr++;
1724 	}
1725 
1726 	return 0;
1727 }
1728 #endif
1729 
1730 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
1731  *
1732  *	@pcrat_image: Fill in VCRAT for CPU
1733  *	@size:	[IN] allocated size of crat_image.
1734  *		[OUT] actual size of data filled in crat_image
1735  */
1736 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
1737 {
1738 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1739 	struct acpi_table_header *acpi_table;
1740 	acpi_status status;
1741 	struct crat_subtype_generic *sub_type_hdr;
1742 	int avail_size = *size;
1743 	int numa_node_id;
1744 #ifdef CONFIG_X86_64
1745 	uint32_t entries = 0;
1746 #endif
1747 	int ret = 0;
1748 
1749 	if (!pcrat_image)
1750 		return -EINVAL;
1751 
1752 	/* Fill in CRAT Header.
1753 	 * Modify length and total_entries as subunits are added.
1754 	 */
1755 	avail_size -= sizeof(struct crat_header);
1756 	if (avail_size < 0)
1757 		return -ENOMEM;
1758 
1759 	memset(crat_table, 0, sizeof(struct crat_header));
1760 	memcpy(&crat_table->signature, CRAT_SIGNATURE,
1761 			sizeof(crat_table->signature));
1762 	crat_table->length = sizeof(struct crat_header);
1763 
1764 	status = acpi_get_table("DSDT", 0, &acpi_table);
1765 	if (status != AE_OK)
1766 		pr_warn("DSDT table not found for OEM information\n");
1767 	else {
1768 		crat_table->oem_revision = acpi_table->revision;
1769 		memcpy(crat_table->oem_id, acpi_table->oem_id,
1770 				CRAT_OEMID_LENGTH);
1771 		memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1772 				CRAT_OEMTABLEID_LENGTH);
1773 		acpi_put_table(acpi_table);
1774 	}
1775 	crat_table->total_entries = 0;
1776 	crat_table->num_domains = 0;
1777 
1778 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1779 
1780 	for_each_online_node(numa_node_id) {
1781 		if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1782 			continue;
1783 
1784 		/* Fill in Subtype: Compute Unit */
1785 		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1786 			crat_table->num_domains,
1787 			(struct crat_subtype_computeunit *)sub_type_hdr);
1788 		if (ret < 0)
1789 			return ret;
1790 		crat_table->length += sub_type_hdr->length;
1791 		crat_table->total_entries++;
1792 
1793 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1794 			sub_type_hdr->length);
1795 
1796 		/* Fill in Subtype: Memory */
1797 		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1798 			crat_table->num_domains,
1799 			(struct crat_subtype_memory *)sub_type_hdr);
1800 		if (ret < 0)
1801 			return ret;
1802 		crat_table->length += sub_type_hdr->length;
1803 		crat_table->total_entries++;
1804 
1805 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1806 			sub_type_hdr->length);
1807 
1808 		/* Fill in Subtype: IO Link */
1809 #ifdef CONFIG_X86_64
1810 		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1811 				&entries,
1812 				(struct crat_subtype_iolink *)sub_type_hdr);
1813 		if (ret < 0)
1814 			return ret;
1815 
1816 		if (entries) {
1817 			crat_table->length += (sub_type_hdr->length * entries);
1818 			crat_table->total_entries += entries;
1819 
1820 			sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1821 					sub_type_hdr->length * entries);
1822 		}
1823 #else
1824 		pr_info("IO link not available for non x86 platforms\n");
1825 #endif
1826 
1827 		crat_table->num_domains++;
1828 	}
1829 
1830 	/* TODO: Add cache Subtype for CPU.
1831 	 * Currently, CPU cache information is available in function
1832 	 * detect_cache_attributes(cpu) defined in the file
1833 	 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1834 	 * exported and to get the same information the code needs to be
1835 	 * duplicated.
1836 	 */
1837 
1838 	*size = crat_table->length;
1839 	pr_info("Virtual CRAT table created for CPU\n");
1840 
1841 	return 0;
1842 }
1843 
1844 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1845 		struct kfd_dev *kdev, uint8_t type, uint64_t size,
1846 		struct crat_subtype_memory *sub_type_hdr,
1847 		uint32_t proximity_domain,
1848 		const struct kfd_local_mem_info *local_mem_info)
1849 {
1850 	*avail_size -= sizeof(struct crat_subtype_memory);
1851 	if (*avail_size < 0)
1852 		return -ENOMEM;
1853 
1854 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1855 	sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1856 	sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1857 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1858 
1859 	sub_type_hdr->proximity_domain = proximity_domain;
1860 
1861 	pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1862 			type, size);
1863 
1864 	sub_type_hdr->length_low = lower_32_bits(size);
1865 	sub_type_hdr->length_high = upper_32_bits(size);
1866 
1867 	sub_type_hdr->width = local_mem_info->vram_width;
1868 	sub_type_hdr->visibility_type = type;
1869 
1870 	return 0;
1871 }
1872 
1873 #ifdef CONFIG_ACPI_NUMA
1874 static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
1875 {
1876 	struct acpi_table_header *table_header = NULL;
1877 	struct acpi_subtable_header *sub_header = NULL;
1878 	unsigned long table_end, subtable_len;
1879 	u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 |
1880 			pci_dev_id(kdev->pdev);
1881 	u32 bdf;
1882 	acpi_status status;
1883 	struct acpi_srat_cpu_affinity *cpu;
1884 	struct acpi_srat_generic_affinity *gpu;
1885 	int pxm = 0, max_pxm = 0;
1886 	int numa_node = NUMA_NO_NODE;
1887 	bool found = false;
1888 
1889 	/* Fetch the SRAT table from ACPI */
1890 	status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
1891 	if (status == AE_NOT_FOUND) {
1892 		pr_warn("SRAT table not found\n");
1893 		return;
1894 	} else if (ACPI_FAILURE(status)) {
1895 		const char *err = acpi_format_exception(status);
1896 		pr_err("SRAT table error: %s\n", err);
1897 		return;
1898 	}
1899 
1900 	table_end = (unsigned long)table_header + table_header->length;
1901 
1902 	/* Parse all entries looking for a match. */
1903 	sub_header = (struct acpi_subtable_header *)
1904 			((unsigned long)table_header +
1905 			sizeof(struct acpi_table_srat));
1906 	subtable_len = sub_header->length;
1907 
1908 	while (((unsigned long)sub_header) + subtable_len  < table_end) {
1909 		/*
1910 		 * If length is 0, break from this loop to avoid
1911 		 * infinite loop.
1912 		 */
1913 		if (subtable_len == 0) {
1914 			pr_err("SRAT invalid zero length\n");
1915 			break;
1916 		}
1917 
1918 		switch (sub_header->type) {
1919 		case ACPI_SRAT_TYPE_CPU_AFFINITY:
1920 			cpu = (struct acpi_srat_cpu_affinity *)sub_header;
1921 			pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
1922 					cpu->proximity_domain_lo;
1923 			if (pxm > max_pxm)
1924 				max_pxm = pxm;
1925 			break;
1926 		case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
1927 			gpu = (struct acpi_srat_generic_affinity *)sub_header;
1928 			bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
1929 					*((u16 *)(&gpu->device_handle[2]));
1930 			if (bdf == pci_id) {
1931 				found = true;
1932 				numa_node = pxm_to_node(gpu->proximity_domain);
1933 			}
1934 			break;
1935 		default:
1936 			break;
1937 		}
1938 
1939 		if (found)
1940 			break;
1941 
1942 		sub_header = (struct acpi_subtable_header *)
1943 				((unsigned long)sub_header + subtable_len);
1944 		subtable_len = sub_header->length;
1945 	}
1946 
1947 	acpi_put_table(table_header);
1948 
1949 	/* Workaround bad cpu-gpu binding case */
1950 	if (found && (numa_node < 0 ||
1951 			numa_node > pxm_to_node(max_pxm)))
1952 		numa_node = 0;
1953 
1954 	if (numa_node != NUMA_NO_NODE)
1955 		set_dev_node(&kdev->pdev->dev, numa_node);
1956 }
1957 #endif
1958 
1959 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1960  * to its NUMA node
1961  *	@avail_size: Available size in the memory
1962  *	@kdev - [IN] GPU device
1963  *	@sub_type_hdr: Memory into which io link info will be filled in
1964  *	@proximity_domain - proximity domain of the GPU node
1965  *
1966  *	Return 0 if successful else return -ve value
1967  */
1968 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1969 			struct kfd_dev *kdev,
1970 			struct crat_subtype_iolink *sub_type_hdr,
1971 			uint32_t proximity_domain)
1972 {
1973 	*avail_size -= sizeof(struct crat_subtype_iolink);
1974 	if (*avail_size < 0)
1975 		return -ENOMEM;
1976 
1977 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1978 
1979 	/* Fill in subtype header data */
1980 	sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1981 	sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1982 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1983 	if (kfd_dev_is_large_bar(kdev))
1984 		sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1985 
1986 	/* Fill in IOLINK subtype.
1987 	 * TODO: Fill-in other fields of iolink subtype
1988 	 */
1989 	if (kdev->adev->gmc.xgmi.connected_to_cpu) {
1990 		/*
1991 		 * with host gpu xgmi link, host can access gpu memory whether
1992 		 * or not pcie bar type is large, so always create bidirectional
1993 		 * io link.
1994 		 */
1995 		sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1996 		sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1997 		sub_type_hdr->num_hops_xgmi = 1;
1998 		if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
1999 			sub_type_hdr->minimum_bandwidth_mbs =
2000 					amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
2001 							kdev->adev, NULL, true);
2002 			sub_type_hdr->maximum_bandwidth_mbs =
2003 					sub_type_hdr->minimum_bandwidth_mbs;
2004 		}
2005 	} else {
2006 		sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
2007 		sub_type_hdr->minimum_bandwidth_mbs =
2008 				amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true);
2009 		sub_type_hdr->maximum_bandwidth_mbs =
2010 				amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false);
2011 	}
2012 
2013 	sub_type_hdr->proximity_domain_from = proximity_domain;
2014 
2015 #ifdef CONFIG_ACPI_NUMA
2016 	if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2017 		kfd_find_numa_node_in_srat(kdev);
2018 #endif
2019 #ifdef CONFIG_NUMA
2020 	if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2021 		sub_type_hdr->proximity_domain_to = 0;
2022 	else
2023 		sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
2024 #else
2025 	sub_type_hdr->proximity_domain_to = 0;
2026 #endif
2027 	return 0;
2028 }
2029 
2030 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
2031 			struct kfd_dev *kdev,
2032 			struct kfd_dev *peer_kdev,
2033 			struct crat_subtype_iolink *sub_type_hdr,
2034 			uint32_t proximity_domain_from,
2035 			uint32_t proximity_domain_to)
2036 {
2037 	*avail_size -= sizeof(struct crat_subtype_iolink);
2038 	if (*avail_size < 0)
2039 		return -ENOMEM;
2040 
2041 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2042 
2043 	sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2044 	sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2045 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
2046 			       CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2047 
2048 	sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2049 	sub_type_hdr->proximity_domain_from = proximity_domain_from;
2050 	sub_type_hdr->proximity_domain_to = proximity_domain_to;
2051 	sub_type_hdr->num_hops_xgmi =
2052 		amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
2053 	sub_type_hdr->maximum_bandwidth_mbs =
2054 		amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
2055 	sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
2056 		amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
2057 
2058 	return 0;
2059 }
2060 
2061 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
2062  *
2063  *	@pcrat_image: Fill in VCRAT for GPU
2064  *	@size:	[IN] allocated size of crat_image.
2065  *		[OUT] actual size of data filled in crat_image
2066  */
2067 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
2068 				      size_t *size, struct kfd_dev *kdev,
2069 				      uint32_t proximity_domain)
2070 {
2071 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
2072 	struct crat_subtype_generic *sub_type_hdr;
2073 	struct kfd_local_mem_info local_mem_info;
2074 	struct kfd_topology_device *peer_dev;
2075 	struct crat_subtype_computeunit *cu;
2076 	struct kfd_cu_info cu_info;
2077 	int avail_size = *size;
2078 	uint32_t total_num_of_cu;
2079 	int num_of_cache_entries = 0;
2080 	int cache_mem_filled = 0;
2081 	uint32_t nid = 0;
2082 	int ret = 0;
2083 
2084 	if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
2085 		return -EINVAL;
2086 
2087 	/* Fill the CRAT Header.
2088 	 * Modify length and total_entries as subunits are added.
2089 	 */
2090 	avail_size -= sizeof(struct crat_header);
2091 	if (avail_size < 0)
2092 		return -ENOMEM;
2093 
2094 	memset(crat_table, 0, sizeof(struct crat_header));
2095 
2096 	memcpy(&crat_table->signature, CRAT_SIGNATURE,
2097 			sizeof(crat_table->signature));
2098 	/* Change length as we add more subtypes*/
2099 	crat_table->length = sizeof(struct crat_header);
2100 	crat_table->num_domains = 1;
2101 	crat_table->total_entries = 0;
2102 
2103 	/* Fill in Subtype: Compute Unit
2104 	 * First fill in the sub type header and then sub type data
2105 	 */
2106 	avail_size -= sizeof(struct crat_subtype_computeunit);
2107 	if (avail_size < 0)
2108 		return -ENOMEM;
2109 
2110 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
2111 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
2112 
2113 	sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
2114 	sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
2115 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
2116 
2117 	/* Fill CU subtype data */
2118 	cu = (struct crat_subtype_computeunit *)sub_type_hdr;
2119 	cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
2120 	cu->proximity_domain = proximity_domain;
2121 
2122 	amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
2123 	cu->num_simd_per_cu = cu_info.simd_per_cu;
2124 	cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2125 	cu->max_waves_simd = cu_info.max_waves_per_simd;
2126 
2127 	cu->wave_front_size = cu_info.wave_front_size;
2128 	cu->array_count = cu_info.num_shader_arrays_per_engine *
2129 		cu_info.num_shader_engines;
2130 	total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
2131 	cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
2132 	cu->num_cu_per_array = cu_info.num_cu_per_sh;
2133 	cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
2134 	cu->num_banks = cu_info.num_shader_engines;
2135 	cu->lds_size_in_kb = cu_info.lds_size;
2136 
2137 	cu->hsa_capability = 0;
2138 
2139 	/* Check if this node supports IOMMU. During parsing this flag will
2140 	 * translate to HSA_CAP_ATS_PRESENT
2141 	 */
2142 	if (!kfd_iommu_check_device(kdev))
2143 		cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
2144 
2145 	crat_table->length += sub_type_hdr->length;
2146 	crat_table->total_entries++;
2147 
2148 	/* Fill in Subtype: Memory. Only on systems with large BAR (no
2149 	 * private FB), report memory as public. On other systems
2150 	 * report the total FB size (public+private) as a single
2151 	 * private heap.
2152 	 */
2153 	amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info);
2154 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2155 			sub_type_hdr->length);
2156 
2157 	if (debug_largebar)
2158 		local_mem_info.local_mem_size_private = 0;
2159 
2160 	if (local_mem_info.local_mem_size_private == 0)
2161 		ret = kfd_fill_gpu_memory_affinity(&avail_size,
2162 				kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
2163 				local_mem_info.local_mem_size_public,
2164 				(struct crat_subtype_memory *)sub_type_hdr,
2165 				proximity_domain,
2166 				&local_mem_info);
2167 	else
2168 		ret = kfd_fill_gpu_memory_affinity(&avail_size,
2169 				kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
2170 				local_mem_info.local_mem_size_public +
2171 				local_mem_info.local_mem_size_private,
2172 				(struct crat_subtype_memory *)sub_type_hdr,
2173 				proximity_domain,
2174 				&local_mem_info);
2175 	if (ret < 0)
2176 		return ret;
2177 
2178 	crat_table->length += sizeof(struct crat_subtype_memory);
2179 	crat_table->total_entries++;
2180 
2181 	/* TODO: Fill in cache information. This information is NOT readily
2182 	 * available in KGD
2183 	 */
2184 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2185 		sub_type_hdr->length);
2186 	ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
2187 				avail_size,
2188 				&cu_info,
2189 				(struct crat_subtype_cache *)sub_type_hdr,
2190 				&cache_mem_filled,
2191 				&num_of_cache_entries);
2192 
2193 	if (ret < 0)
2194 		return ret;
2195 
2196 	crat_table->length += cache_mem_filled;
2197 	crat_table->total_entries += num_of_cache_entries;
2198 	avail_size -= cache_mem_filled;
2199 
2200 	/* Fill in Subtype: IO_LINKS
2201 	 *  Only direct links are added here which is Link from GPU to
2202 	 *  to its NUMA node. Indirect links are added by userspace.
2203 	 */
2204 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2205 		cache_mem_filled);
2206 	ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
2207 		(struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
2208 
2209 	if (ret < 0)
2210 		return ret;
2211 
2212 	crat_table->length += sub_type_hdr->length;
2213 	crat_table->total_entries++;
2214 
2215 
2216 	/* Fill in Subtype: IO_LINKS
2217 	 * Direct links from GPU to other GPUs through xGMI.
2218 	 * We will loop GPUs that already be processed (with lower value
2219 	 * of proximity_domain), add the link for the GPUs with same
2220 	 * hive id (from this GPU to other GPU) . The reversed iolink
2221 	 * (from other GPU to this GPU) will be added
2222 	 * in kfd_parse_subtype_iolink.
2223 	 */
2224 	if (kdev->hive_id) {
2225 		for (nid = 0; nid < proximity_domain; ++nid) {
2226 			peer_dev = kfd_topology_device_by_proximity_domain(nid);
2227 			if (!peer_dev->gpu)
2228 				continue;
2229 			if (peer_dev->gpu->hive_id != kdev->hive_id)
2230 				continue;
2231 			sub_type_hdr = (typeof(sub_type_hdr))(
2232 				(char *)sub_type_hdr +
2233 				sizeof(struct crat_subtype_iolink));
2234 			ret = kfd_fill_gpu_xgmi_link_to_gpu(
2235 				&avail_size, kdev, peer_dev->gpu,
2236 				(struct crat_subtype_iolink *)sub_type_hdr,
2237 				proximity_domain, nid);
2238 			if (ret < 0)
2239 				return ret;
2240 			crat_table->length += sub_type_hdr->length;
2241 			crat_table->total_entries++;
2242 		}
2243 	}
2244 	*size = crat_table->length;
2245 	pr_info("Virtual CRAT table created for GPU\n");
2246 
2247 	return ret;
2248 }
2249 
2250 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
2251  *		creates a Virtual CRAT (VCRAT) image
2252  *
2253  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
2254  *
2255  *	@crat_image: VCRAT image created because ACPI does not have a
2256  *		     CRAT for this device
2257  *	@size: [OUT] size of virtual crat_image
2258  *	@flags:	COMPUTE_UNIT_CPU - Create VCRAT for CPU device
2259  *		COMPUTE_UNIT_GPU - Create VCRAT for GPU
2260  *		(COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
2261  *			-- this option is not currently implemented.
2262  *			The assumption is that all AMD APUs will have CRAT
2263  *	@kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
2264  *
2265  *	Return 0 if successful else return -ve value
2266  */
2267 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
2268 				  int flags, struct kfd_dev *kdev,
2269 				  uint32_t proximity_domain)
2270 {
2271 	void *pcrat_image = NULL;
2272 	int ret = 0, num_nodes;
2273 	size_t dyn_size;
2274 
2275 	if (!crat_image)
2276 		return -EINVAL;
2277 
2278 	*crat_image = NULL;
2279 
2280 	/* Allocate the CPU Virtual CRAT size based on the number of online
2281 	 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
2282 	 * This should cover all the current conditions. A check is put not
2283 	 * to overwrite beyond allocated size for GPUs
2284 	 */
2285 	switch (flags) {
2286 	case COMPUTE_UNIT_CPU:
2287 		num_nodes = num_online_nodes();
2288 		dyn_size = sizeof(struct crat_header) +
2289 			num_nodes * (sizeof(struct crat_subtype_computeunit) +
2290 			sizeof(struct crat_subtype_memory) +
2291 			(num_nodes - 1) * sizeof(struct crat_subtype_iolink));
2292 		pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
2293 		if (!pcrat_image)
2294 			return -ENOMEM;
2295 		*size = dyn_size;
2296 		pr_debug("CRAT size is %ld", dyn_size);
2297 		ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
2298 		break;
2299 	case COMPUTE_UNIT_GPU:
2300 		if (!kdev)
2301 			return -EINVAL;
2302 		pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
2303 		if (!pcrat_image)
2304 			return -ENOMEM;
2305 		*size = VCRAT_SIZE_FOR_GPU;
2306 		ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
2307 						 proximity_domain);
2308 		break;
2309 	case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
2310 		/* TODO: */
2311 		ret = -EINVAL;
2312 		pr_err("VCRAT not implemented for APU\n");
2313 		break;
2314 	default:
2315 		ret = -EINVAL;
2316 	}
2317 
2318 	if (!ret)
2319 		*crat_image = pcrat_image;
2320 	else
2321 		kvfree(pcrat_image);
2322 
2323 	return ret;
2324 }
2325 
2326 
2327 /* kfd_destroy_crat_image
2328  *
2329  *	@crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
2330  *
2331  */
2332 void kfd_destroy_crat_image(void *crat_image)
2333 {
2334 	kvfree(crat_image);
2335 }
2336